baserlib: Move GetClient/SubmitJob into base class
[ganeti-local] / lib / rapi / rlib2.py
index 2aaa774..d6a29c7 100644 (file)
 # 02110-1301, USA.
 
 
-"""Remote API version 2 baserlib.library.
+"""Remote API resource implementations.
 
-  PUT or POST?
-  ============
+PUT or POST?
+============
 
-  According to RFC2616 the main difference between PUT and POST is that
-  POST can create new resources but PUT can only create the resource the
-  URI was pointing to on the PUT request.
+According to RFC2616 the main difference between PUT and POST is that
+POST can create new resources but PUT can only create the resource the
+URI was pointing to on the PUT request.
 
-  To be in context of this module for instance creation POST on
-  /2/instances is legitim while PUT would be not, due to it does create a
-  new entity and not just replace /2/instances with it.
+In the context of this module POST on ``/2/instances`` to change an existing
+entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
+new instance) with a name specified in the request.
 
-  So when adding new methods, if they are operating on the URI entity itself,
-  PUT should be prefered over POST.
+Quoting from RFC2616, section 9.6::
+
+  The fundamental difference between the POST and PUT requests is reflected in
+  the different meaning of the Request-URI. The URI in a POST request
+  identifies the resource that will handle the enclosed entity. That resource
+  might be a data-accepting process, a gateway to some other protocol, or a
+  separate entity that accepts annotations. In contrast, the URI in a PUT
+  request identifies the entity enclosed with the request -- the user agent
+  knows what URI is intended and the server MUST NOT attempt to apply the
+  request to some other resource. If the server desires that the request be
+  applied to a different URI, it MUST send a 301 (Moved Permanently) response;
+  the user agent MAY then make its own decision regarding whether or not to
+  redirect the request.
+
+So when adding new methods, if they are operating on the URI entity itself,
+PUT should be prefered over POST.
 
 """
 
@@ -47,6 +61,8 @@ from ganeti import constants
 from ganeti import cli
 from ganeti import rapi
 from ganeti import ht
+from ganeti import compat
+from ganeti import ssconf
 from ganeti.rapi import baserlib
 
 
@@ -73,11 +89,23 @@ N_FIELDS = ["name", "offline", "master_candidate", "drained",
             "group.uuid",
             ] + _COMMON_FIELDS
 
-G_FIELDS = ["name", "uuid",
-            "alloc_policy",
-            "node_cnt", "node_list",
-            "ctime", "mtime", "serial_no",
-            ]  # "tags" is missing to be able to use _COMMON_FIELDS here.
+G_FIELDS = [
+  "alloc_policy",
+  "name",
+  "node_cnt",
+  "node_list",
+  ] + _COMMON_FIELDS
+
+J_FIELDS_BULK = [
+  "id", "ops", "status", "summary",
+  "opstatus",
+  "received_ts", "start_ts", "end_ts",
+  ]
+
+J_FIELDS = J_FIELDS_BULK + [
+  "oplog",
+  "opresult",
+  ]
 
 _NR_DRAINED = "drained"
 _NR_MASTER_CANDIATE = "master-candidate"
@@ -86,13 +114,15 @@ _NR_OFFLINE = "offline"
 _NR_REGULAR = "regular"
 
 _NR_MAP = {
-  "M": _NR_MASTER,
-  "C": _NR_MASTER_CANDIATE,
-  "D": _NR_DRAINED,
-  "O": _NR_OFFLINE,
-  "R": _NR_REGULAR,
+  constants.NR_MASTER: _NR_MASTER,
+  constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE,
+  constants.NR_DRAINED: _NR_DRAINED,
+  constants.NR_OFFLINE: _NR_OFFLINE,
+  constants.NR_REGULAR: _NR_REGULAR,
   }
 
+assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
+
 # Request data version field
 _REQ_DATA_VERSION = "__version__"
 
@@ -102,10 +132,35 @@ _INST_CREATE_REQV1 = "instance-create-reqv1"
 # Feature string for instance reinstall request version 1
 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
 
+# Feature string for node migration version 1
+_NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
+
+# Feature string for node evacuation with LU-generated jobs
+_NODE_EVAC_RES1 = "node-evac-res1"
+
+ALL_FEATURES = frozenset([
+  _INST_CREATE_REQV1,
+  _INST_REINSTALL_REQV1,
+  _NODE_MIGRATE_REQV1,
+  _NODE_EVAC_RES1,
+  ])
+
 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
 _WFJC_TIMEOUT = 10
 
 
+class R_root(baserlib.R_Generic):
+  """/ resource.
+
+  """
+  @staticmethod
+  def GET():
+    """Supported for legacy reasons.
+
+    """
+    return None
+
+
 class R_version(baserlib.R_Generic):
   """/version resource.
 
@@ -125,12 +180,11 @@ class R_2_info(baserlib.R_Generic):
   """/2/info resource.
 
   """
-  @staticmethod
-  def GET():
+  def GET(self):
     """Returns cluster information.
 
     """
-    client = baserlib.GetClient()
+    client = self.GetClient()
     return client.QueryClusterInfo()
 
 
@@ -143,15 +197,14 @@ class R_2_features(baserlib.R_Generic):
     """Returns list of optional RAPI features implemented.
 
     """
-    return [_INST_CREATE_REQV1, _INST_REINSTALL_REQV1]
+    return list(ALL_FEATURES)
 
 
 class R_2_os(baserlib.R_Generic):
   """/2/os resource.
 
   """
-  @staticmethod
-  def GET():
+  def GET(self):
     """Return a list of all OSes.
 
     Can return error 500 in case of a problem.
@@ -159,9 +212,9 @@ class R_2_os(baserlib.R_Generic):
     Example: ["debian-etch"]
 
     """
-    cl = baserlib.GetClient()
+    cl = self.GetClient()
     op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
-    job_id = baserlib.SubmitJob([op], cl)
+    job_id = self.SubmitJob([op], cl=cl)
     # we use custom feedback function, instead of print we log the status
     result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
     diagnose_data = result[0]
@@ -180,12 +233,11 @@ class R_2_redist_config(baserlib.R_Generic):
   """/2/redistribute-config resource.
 
   """
-  @staticmethod
-  def PUT():
+  def PUT(self):
     """Redistribute configuration to all nodes.
 
     """
-    return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
+    return self.SubmitJob([opcodes.OpClusterRedistConf()])
 
 
 class R_2_cluster_modify(baserlib.R_Generic):
@@ -201,26 +253,28 @@ class R_2_cluster_modify(baserlib.R_Generic):
     op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
                              None)
 
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 class R_2_jobs(baserlib.R_Generic):
   """/2/jobs resource.
 
   """
-  @staticmethod
-  def GET():
+  def GET(self):
     """Returns a dictionary of jobs.
 
     @return: a dictionary with jobs id and uri.
 
     """
-    fields = ["id"]
-    cl = baserlib.GetClient()
-    # Convert the list of lists to the list of ids
-    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
-    return baserlib.BuildUriList(result, "/2/jobs/%s",
-                                 uri_fields=("id", "uri"))
+    client = self.GetClient()
+
+    if self.useBulk():
+      bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
+      return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
+    else:
+      jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
+      return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
+                                   uri_fields=("id", "uri"))
 
 
 class R_2_jobs_id(baserlib.R_Generic):
@@ -240,22 +294,18 @@ class R_2_jobs_id(baserlib.R_Generic):
             - opresult: OpCodes results as a list of lists
 
     """
-    fields = ["id", "ops", "status", "summary",
-              "opstatus", "opresult", "oplog",
-              "received_ts", "start_ts", "end_ts",
-              ]
     job_id = self.items[0]
-    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
+    result = self.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
     if result is None:
       raise http.HttpNotFound()
-    return baserlib.MapFields(fields, result)
+    return baserlib.MapFields(J_FIELDS, result)
 
   def DELETE(self):
     """Cancel not-yet-started job.
 
     """
     job_id = self.items[0]
-    result = baserlib.GetClient().CancelJob(job_id)
+    result = self.GetClient().CancelJob(job_id)
     return result
 
 
@@ -289,7 +339,7 @@ class R_2_jobs_id_wait(baserlib.R_Generic):
       raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
                                 " be a number")
 
-    client = baserlib.GetClient()
+    client = self.GetClient()
     result = client.WaitForJobChangeOnce(job_id, fields,
                                          prev_job_info, prev_log_serial,
                                          timeout=_WFJC_TIMEOUT)
@@ -316,7 +366,7 @@ class R_2_nodes(baserlib.R_Generic):
     """Returns a list of all nodes.
 
     """
-    client = baserlib.GetClient()
+    client = self.GetClient()
 
     if self.useBulk():
       bulkdata = client.QueryNodes([], N_FIELDS, False)
@@ -337,7 +387,7 @@ class R_2_nodes_name(baserlib.R_Generic):
 
     """
     node_name = self.items[0]
-    client = baserlib.GetClient()
+    client = self.GetClient()
 
     result = baserlib.HandleItemQueryErrors(client.QueryNodes,
                                             names=[node_name], fields=N_FIELDS,
@@ -357,7 +407,7 @@ class R_2_nodes_name_role(baserlib.R_Generic):
 
     """
     node_name = self.items[0]
-    client = baserlib.GetClient()
+    client = self.GetClient()
     result = client.QueryNodes(names=[node_name], fields=["role"],
                                use_locking=self.useLocking())
 
@@ -401,7 +451,7 @@ class R_2_nodes_name_role(baserlib.R_Generic):
                                  drained=drained,
                                  force=bool(self.useForce()))
 
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 class R_2_nodes_name_evacuate(baserlib.R_Generic):
@@ -409,38 +459,15 @@ class R_2_nodes_name_evacuate(baserlib.R_Generic):
 
   """
   def POST(self):
-    """Evacuate all secondary instances off a node.
+    """Evacuate all instances off a node.
 
     """
-    node_name = self.items[0]
-    remote_node = self._checkStringVariable("remote_node", default=None)
-    iallocator = self._checkStringVariable("iallocator", default=None)
-    early_r = bool(self._checkIntVariable("early_release", default=0))
-    dry_run = bool(self.dryRun())
-
-    cl = baserlib.GetClient()
-
-    op = opcodes.OpNodeEvacStrategy(nodes=[node_name],
-                                    iallocator=iallocator,
-                                    remote_node=remote_node)
-
-    job_id = baserlib.SubmitJob([op], cl)
-    # we use custom feedback function, instead of print we log the status
-    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
-
-    jobs = []
-    for iname, node in result:
-      if dry_run:
-        jid = None
-      else:
-        op = opcodes.OpInstanceReplaceDisks(instance_name=iname,
-                                            remote_node=node, disks=[],
-                                            mode=constants.REPLACE_DISK_CHG,
-                                            early_release=early_r)
-        jid = baserlib.SubmitJob([op])
-      jobs.append((jid, iname, node))
+    op = baserlib.FillOpcode(opcodes.OpNodeEvacuate, self.request_body, {
+      "node_name": self.items[0],
+      "dry_run": self.dryRun(),
+      })
 
-    return jobs
+    return self.SubmitJob([op])
 
 
 class R_2_nodes_name_migrate(baserlib.R_Generic):
@@ -453,20 +480,31 @@ class R_2_nodes_name_migrate(baserlib.R_Generic):
     """
     node_name = self.items[0]
 
-    if "live" in self.queryargs and "mode" in self.queryargs:
-      raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
-                                " be passed")
-    elif "live" in self.queryargs:
-      if self._checkIntVariable("live", default=1):
-        mode = constants.HT_MIGRATION_LIVE
+    if self.queryargs:
+      # Support old-style requests
+      if "live" in self.queryargs and "mode" in self.queryargs:
+        raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
+                                  " be passed")
+
+      if "live" in self.queryargs:
+        if self._checkIntVariable("live", default=1):
+          mode = constants.HT_MIGRATION_LIVE
+        else:
+          mode = constants.HT_MIGRATION_NONLIVE
       else:
-        mode = constants.HT_MIGRATION_NONLIVE
+        mode = self._checkStringVariable("mode", default=None)
+
+      data = {
+        "mode": mode,
+        }
     else:
-      mode = self._checkStringVariable("mode", default=None)
+      data = self.request_body
 
-    op = opcodes.OpNodeMigrate(node_name=node_name, mode=mode)
+    op = baserlib.FillOpcode(opcodes.OpNodeMigrate, data, {
+      "node_name": node_name,
+      })
 
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 class R_2_nodes_name_storage(baserlib.R_Generic):
@@ -492,7 +530,7 @@ class R_2_nodes_name_storage(baserlib.R_Generic):
     op = opcodes.OpNodeQueryStorage(nodes=[node_name],
                                     storage_type=storage_type,
                                     output_fields=output_fields.split(","))
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 class R_2_nodes_name_storage_modify(baserlib.R_Generic):
@@ -522,7 +560,7 @@ class R_2_nodes_name_storage_modify(baserlib.R_Generic):
                                      storage_type=storage_type,
                                      name=name,
                                      changes=changes)
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 class R_2_nodes_name_storage_repair(baserlib.R_Generic):
@@ -545,7 +583,7 @@ class R_2_nodes_name_storage_repair(baserlib.R_Generic):
     op = opcodes.OpRepairNodeStorage(node_name=node_name,
                                      storage_type=storage_type,
                                      name=name)
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 def _ParseCreateGroupRequest(data, dry_run):
@@ -555,12 +593,16 @@ def _ParseCreateGroupRequest(data, dry_run):
   @return: Group creation opcode
 
   """
-  group_name = baserlib.CheckParameter(data, "name")
-  alloc_policy = baserlib.CheckParameter(data, "alloc_policy", default=None)
+  override = {
+    "dry_run": dry_run,
+    }
 
-  return opcodes.OpGroupAdd(group_name=group_name,
-                            alloc_policy=alloc_policy,
-                            dry_run=dry_run)
+  rename = {
+    "name": "group_name",
+    }
+
+  return baserlib.FillOpcode(opcodes.OpGroupAdd, data, override,
+                             rename=rename)
 
 
 class R_2_groups(baserlib.R_Generic):
@@ -571,7 +613,7 @@ class R_2_groups(baserlib.R_Generic):
     """Returns a list of all node groups.
 
     """
-    client = baserlib.GetClient()
+    client = self.GetClient()
 
     if self.useBulk():
       bulkdata = client.QueryGroups([], G_FIELDS, False)
@@ -590,7 +632,7 @@ class R_2_groups(baserlib.R_Generic):
     """
     baserlib.CheckType(self.request_body, dict, "Body contents")
     op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 class R_2_groups_name(baserlib.R_Generic):
@@ -602,7 +644,7 @@ class R_2_groups_name(baserlib.R_Generic):
 
     """
     group_name = self.items[0]
-    client = baserlib.GetClient()
+    client = self.GetClient()
 
     result = baserlib.HandleItemQueryErrors(client.QueryGroups,
                                             names=[group_name], fields=G_FIELDS,
@@ -617,7 +659,7 @@ class R_2_groups_name(baserlib.R_Generic):
     op = opcodes.OpGroupRemove(group_name=self.items[0],
                                dry_run=bool(self.dryRun()))
 
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 def _ParseModifyGroupRequest(name, data):
@@ -632,7 +674,6 @@ def _ParseModifyGroupRequest(name, data):
     })
 
 
-
 class R_2_groups_name_modify(baserlib.R_Generic):
   """/2/groups/[group_name]/modify resource.
 
@@ -647,7 +688,7 @@ class R_2_groups_name_modify(baserlib.R_Generic):
 
     op = _ParseModifyGroupRequest(self.items[0], self.request_body)
 
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 def _ParseRenameGroupRequest(name, data, dry_run):
@@ -664,10 +705,10 @@ def _ParseRenameGroupRequest(name, data, dry_run):
   @return: Node group rename opcode
 
   """
-  new_name = baserlib.CheckParameter(data, "new_name")
-
-  return opcodes.OpGroupRename(group_name=name, new_name=new_name,
-                               dry_run=dry_run)
+  return baserlib.FillOpcode(opcodes.OpGroupRename, data, {
+    "group_name": name,
+    "dry_run": dry_run,
+    })
 
 
 class R_2_groups_name_rename(baserlib.R_Generic):
@@ -683,7 +724,7 @@ class R_2_groups_name_rename(baserlib.R_Generic):
     baserlib.CheckType(self.request_body, dict, "Body contents")
     op = _ParseRenameGroupRequest(self.items[0], self.request_body,
                                   self.dryRun())
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 class R_2_groups_name_assign_nodes(baserlib.R_Generic):
@@ -702,7 +743,7 @@ class R_2_groups_name_assign_nodes(baserlib.R_Generic):
       "force": self.useForce(),
       })
 
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 def _ParseInstanceCreateRequestVersion1(data, dry_run):
@@ -733,7 +774,7 @@ class R_2_instances(baserlib.R_Generic):
     """Returns a list of all available instances.
 
     """
-    client = baserlib.GetClient()
+    client = self.GetClient()
 
     use_locking = self.useLocking()
     if self.useBulk():
@@ -745,67 +786,6 @@ class R_2_instances(baserlib.R_Generic):
       return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
                                    uri_fields=("id", "uri"))
 
-  def _ParseVersion0CreateRequest(self):
-    """Parses an instance creation request version 0.
-
-    Request data version 0 is deprecated and should not be used anymore.
-
-    @rtype: L{opcodes.OpInstanceCreate}
-    @return: Instance creation opcode
-
-    """
-    # Do not modify anymore, request data version 0 is deprecated
-    beparams = baserlib.MakeParamsDict(self.request_body,
-                                       constants.BES_PARAMETERS)
-    hvparams = baserlib.MakeParamsDict(self.request_body,
-                                       constants.HVS_PARAMETERS)
-    fn = self.getBodyParameter
-
-    # disk processing
-    disk_data = fn('disks')
-    if not isinstance(disk_data, list):
-      raise http.HttpBadRequest("The 'disks' parameter should be a list")
-    disks = []
-    for idx, d in enumerate(disk_data):
-      if not isinstance(d, int):
-        raise http.HttpBadRequest("Disk %d specification wrong: should"
-                                  " be an integer" % idx)
-      disks.append({"size": d})
-
-    # nic processing (one nic only)
-    nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
-    if fn("ip", None) is not None:
-      nics[0]["ip"] = fn("ip")
-    if fn("mode", None) is not None:
-      nics[0]["mode"] = fn("mode")
-    if fn("link", None) is not None:
-      nics[0]["link"] = fn("link")
-    if fn("bridge", None) is not None:
-      nics[0]["bridge"] = fn("bridge")
-
-    # Do not modify anymore, request data version 0 is deprecated
-    return opcodes.OpInstanceCreate(
-      mode=constants.INSTANCE_CREATE,
-      instance_name=fn('name'),
-      disks=disks,
-      disk_template=fn('disk_template'),
-      os_type=fn('os'),
-      pnode=fn('pnode', None),
-      snode=fn('snode', None),
-      iallocator=fn('iallocator', None),
-      nics=nics,
-      start=fn('start', True),
-      ip_check=fn('ip_check', True),
-      name_check=fn('name_check', True),
-      wait_for_sync=True,
-      hypervisor=fn('hypervisor', None),
-      hvparams=hvparams,
-      beparams=beparams,
-      file_storage_dir=fn('file_storage_dir', None),
-      file_driver=fn('file_driver', constants.FD_LOOP),
-      dry_run=bool(self.dryRun()),
-      )
-
   def POST(self):
     """Create an instance.
 
@@ -819,15 +799,18 @@ class R_2_instances(baserlib.R_Generic):
     data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
 
     if data_version == 0:
-      op = self._ParseVersion0CreateRequest()
+      raise http.HttpBadRequest("Instance creation request version 0 is no"
+                                " longer supported")
     elif data_version == 1:
-      op = _ParseInstanceCreateRequestVersion1(self.request_body,
-                                               self.dryRun())
+      data = self.request_body.copy()
+      # Remove "__version__"
+      data.pop(_REQ_DATA_VERSION, None)
+      op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
     else:
       raise http.HttpBadRequest("Unsupported request data version %s" %
                                 data_version)
 
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 class R_2_instances_name(baserlib.R_Generic):
@@ -838,7 +821,7 @@ class R_2_instances_name(baserlib.R_Generic):
     """Send information about an instance.
 
     """
-    client = baserlib.GetClient()
+    client = self.GetClient()
     instance_name = self.items[0]
 
     result = baserlib.HandleItemQueryErrors(client.QueryInstances,
@@ -855,7 +838,7 @@ class R_2_instances_name(baserlib.R_Generic):
     op = opcodes.OpInstanceRemove(instance_name=self.items[0],
                                   ignore_failures=False,
                                   dry_run=bool(self.dryRun()))
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 class R_2_instances_name_info(baserlib.R_Generic):
@@ -871,7 +854,7 @@ class R_2_instances_name_info(baserlib.R_Generic):
 
     op = opcodes.OpInstanceQueryData(instances=[instance_name],
                                      static=static)
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 class R_2_instances_name_reboot(baserlib.R_Generic):
@@ -888,15 +871,15 @@ class R_2_instances_name_reboot(baserlib.R_Generic):
 
     """
     instance_name = self.items[0]
-    reboot_type = self.queryargs.get('type',
+    reboot_type = self.queryargs.get("type",
                                      [constants.INSTANCE_REBOOT_HARD])[0]
-    ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
+    ignore_secondaries = bool(self._checkIntVariable("ignore_secondaries"))
     op = opcodes.OpInstanceReboot(instance_name=instance_name,
                                   reboot_type=reboot_type,
                                   ignore_secondaries=ignore_secondaries,
                                   dry_run=bool(self.dryRun()))
 
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 class R_2_instances_name_startup(baserlib.R_Generic):
@@ -913,12 +896,28 @@ class R_2_instances_name_startup(baserlib.R_Generic):
 
     """
     instance_name = self.items[0]
-    force_startup = bool(self._checkIntVariable('force'))
+    force_startup = bool(self._checkIntVariable("force"))
+    no_remember = bool(self._checkIntVariable("no_remember"))
     op = opcodes.OpInstanceStartup(instance_name=instance_name,
                                    force=force_startup,
-                                   dry_run=bool(self.dryRun()))
+                                   dry_run=bool(self.dryRun()),
+                                   no_remember=no_remember)
+
+    return self.SubmitJob([op])
+
 
-    return baserlib.SubmitJob([op])
+def _ParseShutdownInstanceRequest(name, data, dry_run, no_remember):
+  """Parses a request for an instance shutdown.
+
+  @rtype: L{opcodes.OpInstanceShutdown}
+  @return: Instance shutdown opcode
+
+  """
+  return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, {
+    "instance_name": name,
+    "dry_run": dry_run,
+    "no_remember": no_remember,
+    })
 
 
 class R_2_instances_name_shutdown(baserlib.R_Generic):
@@ -930,12 +929,16 @@ class R_2_instances_name_shutdown(baserlib.R_Generic):
   def PUT(self):
     """Shutdown an instance.
 
+    @return: a job id
+
     """
-    instance_name = self.items[0]
-    op = opcodes.OpInstanceShutdown(instance_name=instance_name,
-                                    dry_run=bool(self.dryRun()))
+    baserlib.CheckType(self.request_body, dict, "Body contents")
 
-    return baserlib.SubmitJob([op])
+    no_remember = bool(self._checkIntVariable("no_remember"))
+    op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
+                                       bool(self.dryRun()), no_remember)
+
+    return self.SubmitJob([op])
 
 
 def _ParseInstanceReinstallRequest(name, data):
@@ -945,7 +948,7 @@ def _ParseInstanceReinstallRequest(name, data):
   if not isinstance(data, dict):
     raise http.HttpBadRequest("Invalid body contents, not a dictionary")
 
-  ostype = baserlib.CheckParameter(data, "os")
+  ostype = baserlib.CheckParameter(data, "os", default=None)
   start = baserlib.CheckParameter(data, "start", exptype=bool,
                                   default=True)
   osparams = baserlib.CheckParameter(data, "osparams", default=None)
@@ -981,18 +984,18 @@ class R_2_instances_name_reinstall(baserlib.R_Generic):
         raise http.HttpBadRequest("Can't combine query and body parameters")
 
       body = self.request_body
-    else:
-      if not self.queryargs:
-        raise http.HttpBadRequest("Missing query parameters")
+    elif self.queryargs:
       # Legacy interface, do not modify/extend
       body = {
         "os": self._checkStringVariable("os"),
         "start": not self._checkIntVariable("nostartup"),
         }
+    else:
+      body = {}
 
     ops = _ParseInstanceReinstallRequest(self.items[0], body)
 
-    return baserlib.SubmitJob(ops)
+    return self.SubmitJob(ops)
 
 
 def _ParseInstanceReplaceDisksRequest(name, data):
@@ -1032,7 +1035,7 @@ class R_2_instances_name_replace_disks(baserlib.R_Generic):
     """
     op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
 
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 class R_2_instances_name_activate_disks(baserlib.R_Generic):
@@ -1046,12 +1049,12 @@ class R_2_instances_name_activate_disks(baserlib.R_Generic):
 
     """
     instance_name = self.items[0]
-    ignore_size = bool(self._checkIntVariable('ignore_size'))
+    ignore_size = bool(self._checkIntVariable("ignore_size"))
 
     op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
                                          ignore_size=ignore_size)
 
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
@@ -1066,7 +1069,7 @@ class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
 
     op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
 
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 class R_2_instances_name_prepare_export(baserlib.R_Generic):
@@ -1085,7 +1088,7 @@ class R_2_instances_name_prepare_export(baserlib.R_Generic):
     op = opcodes.OpBackupPrepare(instance_name=instance_name,
                                  mode=mode)
 
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 def _ParseExportInstanceRequest(name, data):
@@ -1121,7 +1124,7 @@ class R_2_instances_name_export(baserlib.R_Generic):
 
     op = _ParseExportInstanceRequest(self.items[0], self.request_body)
 
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 def _ParseMigrateInstanceRequest(name, data):
@@ -1150,7 +1153,26 @@ class R_2_instances_name_migrate(baserlib.R_Generic):
 
     op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
 
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
+
+
+class R_2_instances_name_failover(baserlib.R_Generic):
+  """/2/instances/[instance_name]/failover resource.
+
+  """
+  def PUT(self):
+    """Does a failover of an instance.
+
+    @return: a job id
+
+    """
+    baserlib.CheckType(self.request_body, dict, "Body contents")
+
+    op = baserlib.FillOpcode(opcodes.OpInstanceFailover, self.request_body, {
+      "instance_name": self.items[0],
+      })
+
+    return self.SubmitJob([op])
 
 
 def _ParseRenameInstanceRequest(name, data):
@@ -1179,7 +1201,7 @@ class R_2_instances_name_rename(baserlib.R_Generic):
 
     op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
 
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 def _ParseModifyInstanceRequest(name, data):
@@ -1208,7 +1230,7 @@ class R_2_instances_name_modify(baserlib.R_Generic):
 
     op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
 
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
 
 
 class R_2_instances_name_disk_grow(baserlib.R_Generic):
@@ -1226,7 +1248,106 @@ class R_2_instances_name_disk_grow(baserlib.R_Generic):
       "disk": int(self.items[1]),
       })
 
-    return baserlib.SubmitJob([op])
+    return self.SubmitJob([op])
+
+
+class R_2_instances_name_console(baserlib.R_Generic):
+  """/2/instances/[instance_name]/console resource.
+
+  """
+  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+
+  def GET(self):
+    """Request information for connecting to instance's console.
+
+    @return: Serialized instance console description, see
+             L{objects.InstanceConsole}
+
+    """
+    client = self.GetClient()
+
+    ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
+
+    if console is None:
+      raise http.HttpServiceUnavailable("Instance console unavailable")
+
+    assert isinstance(console, dict)
+    return console
+
+
+def _GetQueryFields(args):
+  """
+
+  """
+  try:
+    fields = args["fields"]
+  except KeyError:
+    raise http.HttpBadRequest("Missing 'fields' query argument")
+
+  return _SplitQueryFields(fields[0])
+
+
+def _SplitQueryFields(fields):
+  """
+
+  """
+  return [i.strip() for i in fields.split(",")]
+
+
+class R_2_query(baserlib.R_Generic):
+  """/2/query/[resource] resource.
+
+  """
+  # Results might contain sensitive information
+  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+
+  def _Query(self, fields, filter_):
+    return self.GetClient().Query(self.items[0], fields, filter_).ToDict()
+
+  def GET(self):
+    """Returns resource information.
+
+    @return: Query result, see L{objects.QueryResponse}
+
+    """
+    return self._Query(_GetQueryFields(self.queryargs), None)
+
+  def PUT(self):
+    """Submits job querying for resources.
+
+    @return: Query result, see L{objects.QueryResponse}
+
+    """
+    body = self.request_body
+
+    baserlib.CheckType(body, dict, "Body contents")
+
+    try:
+      fields = body["fields"]
+    except KeyError:
+      fields = _GetQueryFields(self.queryargs)
+
+    return self._Query(fields, self.request_body.get("filter", None))
+
+
+class R_2_query_fields(baserlib.R_Generic):
+  """/2/query/[resource]/fields resource.
+
+  """
+  def GET(self):
+    """Retrieves list of available fields for a resource.
+
+    @return: List of serialized L{objects.QueryFieldDefinition}
+
+    """
+    try:
+      raw_fields = self.queryargs["fields"]
+    except KeyError:
+      fields = None
+    else:
+      fields = _SplitQueryFields(raw_fields[0])
+
+    return self.GetClient().QueryFields(self.items[0], fields).ToDict()
 
 
 class _R_Tags(baserlib.R_Generic):
@@ -1257,8 +1378,33 @@ class _R_Tags(baserlib.R_Generic):
     Example: ["tag1", "tag2", "tag3"]
 
     """
-    # pylint: disable-msg=W0212
-    return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
+    kind = self.TAG_LEVEL
+
+    if kind in (constants.TAG_INSTANCE,
+                constants.TAG_NODEGROUP,
+                constants.TAG_NODE):
+      if not self.name:
+        raise http.HttpBadRequest("Missing name on tag request")
+
+      cl = self.GetClient()
+      if kind == constants.TAG_INSTANCE:
+        fn = cl.QueryInstances
+      elif kind == constants.TAG_NODEGROUP:
+        fn = cl.QueryGroups
+      else:
+        fn = cl.QueryNodes
+      result = fn(names=[self.name], fields=["tags"], use_locking=False)
+      if not result or not result[0]:
+        raise http.HttpBadGateway("Invalid response from tag query")
+      tags = result[0][0]
+
+    elif kind == constants.TAG_CLUSTER:
+      assert not self.name
+      # TODO: Use query API?
+      ssc = ssconf.SimpleStore()
+      tags = ssc.GetClusterTags()
+
+    return list(tags)
 
   def PUT(self):
     """Add a set of tags.
@@ -1268,12 +1414,12 @@ class _R_Tags(baserlib.R_Generic):
 
     """
     # pylint: disable-msg=W0212
-    if 'tag' not in self.queryargs:
+    if "tag" not in self.queryargs:
       raise http.HttpBadRequest("Please specify tag(s) to add using the"
                                 " the 'tag' parameter")
-    return baserlib._Tags_PUT(self.TAG_LEVEL,
-                              self.queryargs['tag'], name=self.name,
-                              dry_run=bool(self.dryRun()))
+    op = opcodes.OpTagsSet(kind=self.TAG_LEVEL, name=self.name,
+                           tags=self.queryargs["tag"], dry_run=self.dryRun())
+    return self.SubmitJob([op])
 
   def DELETE(self):
     """Delete a tag.
@@ -1284,14 +1430,13 @@ class _R_Tags(baserlib.R_Generic):
 
     """
     # pylint: disable-msg=W0212
-    if 'tag' not in self.queryargs:
+    if "tag" not in self.queryargs:
       # no we not gonna delete all tags
       raise http.HttpBadRequest("Cannot delete all tags - please specify"
                                 " tag(s) using the 'tag' parameter")
-    return baserlib._Tags_DELETE(self.TAG_LEVEL,
-                                 self.queryargs['tag'],
-                                 name=self.name,
-                                 dry_run=bool(self.dryRun()))
+    op = opcodes.OpTagsDel(kind=self.TAG_LEVEL, name=self.name,
+                           tags=self.queryargs["tag"], dry_run=self.dryRun())
+    return self.SubmitJob([op])
 
 
 class R_2_instances_name_tags(_R_Tags):
@@ -1312,6 +1457,15 @@ class R_2_nodes_name_tags(_R_Tags):
   TAG_LEVEL = constants.TAG_NODE
 
 
+class R_2_groups_name_tags(_R_Tags):
+  """ /2/groups/[group_name]/tags resource.
+
+  Manages per-nodegroup tags.
+
+  """
+  TAG_LEVEL = constants.TAG_NODEGROUP
+
+
 class R_2_tags(_R_Tags):
   """ /2/tags resource.