rapi: Re-add “/2” resource
[ganeti-local] / lib / rapi / rlib2.py
index 3a398a5..550af5a 100644 (file)
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2006, 2007, 2008 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # 02110-1301, USA.
 
 
-"""Remote API version 2 baserlib.library.
+"""Remote API resource implementations.
+
+PUT or POST?
+============
+
+According to RFC2616 the main difference between PUT and POST is that
+POST can create new resources but PUT can only create the resource the
+URI was pointing to on the PUT request.
+
+In the context of this module POST on ``/2/instances`` to change an existing
+entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
+new instance) with a name specified in the request.
+
+Quoting from RFC2616, section 9.6::
+
+  The fundamental difference between the POST and PUT requests is reflected in
+  the different meaning of the Request-URI. The URI in a POST request
+  identifies the resource that will handle the enclosed entity. That resource
+  might be a data-accepting process, a gateway to some other protocol, or a
+  separate entity that accepts annotations. In contrast, the URI in a PUT
+  request identifies the entity enclosed with the request -- the user agent
+  knows what URI is intended and the server MUST NOT attempt to apply the
+  request to some other resource. If the server desires that the request be
+  applied to a different URI, it MUST send a 301 (Moved Permanently) response;
+  the user agent MAY then make its own decision regarding whether or not to
+  redirect the request.
+
+So when adding new methods, if they are operating on the URI entity itself,
+PUT should be prefered over POST.
 
 """
 
+# pylint: disable-msg=C0103
+
+# C0103: Invalid name, since the R_* names are not conforming
+
 from ganeti import opcodes
 from ganeti import http
 from ganeti import constants
 from ganeti import cli
 from ganeti import rapi
+from ganeti import ht
+from ganeti import compat
+from ganeti import ssconf
 from ganeti.rapi import baserlib
 
 
+_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
 I_FIELDS = ["name", "admin_state", "os",
             "pnode", "snodes",
             "disk_template",
-            "nic.ips", "nic.macs", "nic.modes", "nic.links",
+            "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
             "network_port",
             "disk.sizes", "disk_usage",
             "beparams", "hvparams",
-            "oper_state", "oper_ram", "status",
-            "tags"]
+            "oper_state", "oper_ram", "oper_vcpus", "status",
+            "custom_hvparams", "custom_beparams", "custom_nicparams",
+            ] + _COMMON_FIELDS
 
 N_FIELDS = ["name", "offline", "master_candidate", "drained",
             "dtotal", "dfree",
             "mtotal", "mnode", "mfree",
-            "pinst_cnt", "sinst_cnt", "tags",
+            "pinst_cnt", "sinst_cnt",
             "ctotal", "cnodes", "csockets",
-            ]
+            "pip", "sip", "role",
+            "pinst_list", "sinst_list",
+            "master_capable", "vm_capable",
+            "group.uuid",
+            ] + _COMMON_FIELDS
+
+G_FIELDS = [
+  "alloc_policy",
+  "name",
+  "node_cnt",
+  "node_list",
+  ] + _COMMON_FIELDS
+
+J_FIELDS_BULK = [
+  "id", "ops", "status", "summary",
+  "opstatus",
+  "received_ts", "start_ts", "end_ts",
+  ]
+
+J_FIELDS = J_FIELDS_BULK + [
+  "oplog",
+  "opresult",
+  ]
 
 _NR_DRAINED = "drained"
-_NR_MASTER_CANDIATE = "master-candidate"
+_NR_MASTER_CANDIDATE = "master-candidate"
 _NR_MASTER = "master"
 _NR_OFFLINE = "offline"
 _NR_REGULAR = "regular"
 
 _NR_MAP = {
-  "M": _NR_MASTER,
-  "C": _NR_MASTER_CANDIATE,
-  "D": _NR_DRAINED,
-  "O": _NR_OFFLINE,
-  "R": _NR_REGULAR,
+  constants.NR_MASTER: _NR_MASTER,
+  constants.NR_MCANDIDATE: _NR_MASTER_CANDIDATE,
+  constants.NR_DRAINED: _NR_DRAINED,
+  constants.NR_OFFLINE: _NR_OFFLINE,
+  constants.NR_REGULAR: _NR_REGULAR,
   }
 
+assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
+
+# Request data version field
+_REQ_DATA_VERSION = "__version__"
+
+# Feature string for instance creation request data version 1
+_INST_CREATE_REQV1 = "instance-create-reqv1"
+
+# Feature string for instance reinstall request version 1
+_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
+
+# Feature string for node migration version 1
+_NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
+
+# Feature string for node evacuation with LU-generated jobs
+_NODE_EVAC_RES1 = "node-evac-res1"
+
+ALL_FEATURES = frozenset([
+  _INST_CREATE_REQV1,
+  _INST_REINSTALL_REQV1,
+  _NODE_MIGRATE_REQV1,
+  _NODE_EVAC_RES1,
+  ])
+
+# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
+_WFJC_TIMEOUT = 10
+
+
+class R_root(baserlib.ResourceBase):
+  """/ resource.
+
+  """
+  @staticmethod
+  def GET():
+    """Supported for legacy reasons.
+
+    """
+    return None
 
-class R_version(baserlib.R_Generic):
+
+class R_2(R_root):
+  """/2 resource.
+
+  """
+
+
+class R_version(baserlib.ResourceBase):
   """/version resource.
 
   This resource should be used to determine the remote API version and
   to adapt clients accordingly.
 
   """
-  def GET(self):
+  @staticmethod
+  def GET():
     """Returns the remote API version.
 
     """
     return constants.RAPI_VERSION
 
 
-class R_2_info(baserlib.R_Generic):
-  """Cluster info.
+class R_2_info(baserlib.ResourceBase):
+  """/2/info resource.
 
   """
   def GET(self):
     """Returns cluster information.
 
     """
-    client = baserlib.GetClient()
+    client = self.GetClient()
     return client.QueryClusterInfo()
 
 
-class R_2_os(baserlib.R_Generic):
+class R_2_features(baserlib.ResourceBase):
+  """/2/features resource.
+
+  """
+  @staticmethod
+  def GET():
+    """Returns list of optional RAPI features implemented.
+
+    """
+    return list(ALL_FEATURES)
+
+
+class R_2_os(baserlib.ResourceBase):
   """/2/os resource.
 
   """
@@ -101,9 +218,9 @@ class R_2_os(baserlib.R_Generic):
     Example: ["debian-etch"]
 
     """
-    cl = baserlib.GetClient()
-    op = opcodes.OpDiagnoseOS(output_fields=["name", "valid"], names=[])
-    job_id = baserlib.SubmitJob([op], cl)
+    cl = self.GetClient()
+    op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
+    job_id = self.SubmitJob([op], cl=cl)
     # we use custom feedback function, instead of print we log the status
     result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
     diagnose_data = result[0]
@@ -111,10 +228,28 @@ class R_2_os(baserlib.R_Generic):
     if not isinstance(diagnose_data, list):
       raise http.HttpBadGateway(message="Can't get OS list")
 
-    return [row[0] for row in diagnose_data if row[1]]
+    os_names = []
+    for (name, variants) in diagnose_data:
+      os_names.extend(cli.CalculateOSNames(name, variants))
+
+    return os_names
+
+
+class R_2_redist_config(baserlib.OpcodeResource):
+  """/2/redistribute-config resource.
+
+  """
+  PUT_OPCODE = opcodes.OpClusterRedistConf
+
+
+class R_2_cluster_modify(baserlib.OpcodeResource):
+  """/2/modify resource.
+
+  """
+  PUT_OPCODE = opcodes.OpClusterSetParams
 
 
-class R_2_jobs(baserlib.R_Generic):
+class R_2_jobs(baserlib.ResourceBase):
   """/2/jobs resource.
 
   """
@@ -124,15 +259,18 @@ class R_2_jobs(baserlib.R_Generic):
     @return: a dictionary with jobs id and uri.
 
     """
-    fields = ["id"]
-    cl = baserlib.GetClient()
-    # Convert the list of lists to the list of ids
-    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
-    return baserlib.BuildUriList(result, "/2/jobs/%s",
-                                 uri_fields=("id", "uri"))
+    client = self.GetClient()
+
+    if self.useBulk():
+      bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
+      return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
+    else:
+      jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
+      return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
+                                   uri_fields=("id", "uri"))
 
 
-class R_2_jobs_id(baserlib.R_Generic):
+class R_2_jobs_id(baserlib.ResourceBase):
   """/2/jobs/[job_id] resource.
 
   """
@@ -149,26 +287,71 @@ class R_2_jobs_id(baserlib.R_Generic):
             - opresult: OpCodes results as a list of lists
 
     """
-    fields = ["id", "ops", "status", "summary",
-              "opstatus", "opresult", "oplog",
-              "received_ts", "start_ts", "end_ts",
-              ]
     job_id = self.items[0]
-    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
+    result = self.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
     if result is None:
       raise http.HttpNotFound()
-    return baserlib.MapFields(fields, result)
+    return baserlib.MapFields(J_FIELDS, result)
 
   def DELETE(self):
     """Cancel not-yet-started job.
 
     """
     job_id = self.items[0]
-    result = baserlib.GetClient().CancelJob(job_id)
+    result = self.GetClient().CancelJob(job_id)
     return result
 
 
-class R_2_nodes(baserlib.R_Generic):
+class R_2_jobs_id_wait(baserlib.ResourceBase):
+  """/2/jobs/[job_id]/wait resource.
+
+  """
+  # WaitForJobChange provides access to sensitive information and blocks
+  # machine resources (it's a blocking RAPI call), hence restricting access.
+  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+
+  def GET(self):
+    """Waits for job changes.
+
+    """
+    job_id = self.items[0]
+
+    fields = self.getBodyParameter("fields")
+    prev_job_info = self.getBodyParameter("previous_job_info", None)
+    prev_log_serial = self.getBodyParameter("previous_log_serial", None)
+
+    if not isinstance(fields, list):
+      raise http.HttpBadRequest("The 'fields' parameter should be a list")
+
+    if not (prev_job_info is None or isinstance(prev_job_info, list)):
+      raise http.HttpBadRequest("The 'previous_job_info' parameter should"
+                                " be a list")
+
+    if not (prev_log_serial is None or
+            isinstance(prev_log_serial, (int, long))):
+      raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
+                                " be a number")
+
+    client = self.GetClient()
+    result = client.WaitForJobChangeOnce(job_id, fields,
+                                         prev_job_info, prev_log_serial,
+                                         timeout=_WFJC_TIMEOUT)
+    if not result:
+      raise http.HttpNotFound()
+
+    if result == constants.JOB_NOTCHANGED:
+      # No changes
+      return None
+
+    (job_info, log_entries) = result
+
+    return {
+      "job_info": job_info,
+      "log_entries": log_entries,
+      }
+
+
+class R_2_nodes(baserlib.ResourceBase):
   """/2/nodes resource.
 
   """
@@ -176,7 +359,7 @@ class R_2_nodes(baserlib.R_Generic):
     """Returns a list of all nodes.
 
     """
-    client = baserlib.GetClient()
+    client = self.GetClient()
 
     if self.useBulk():
       bulkdata = client.QueryNodes([], N_FIELDS, False)
@@ -188,8 +371,8 @@ class R_2_nodes(baserlib.R_Generic):
                                    uri_fields=("id", "uri"))
 
 
-class R_2_nodes_name(baserlib.R_Generic):
-  """/2/nodes/[node_name] resources.
+class R_2_nodes_name(baserlib.ResourceBase):
+  """/2/nodes/[node_name] resource.
 
   """
   def GET(self):
@@ -197,17 +380,21 @@ class R_2_nodes_name(baserlib.R_Generic):
 
     """
     node_name = self.items[0]
-    client = baserlib.GetClient()
-    result = client.QueryNodes(names=[node_name], fields=N_FIELDS,
-                               use_locking=self.useLocking())
+    client = self.GetClient()
+
+    result = baserlib.HandleItemQueryErrors(client.QueryNodes,
+                                            names=[node_name], fields=N_FIELDS,
+                                            use_locking=self.useLocking())
 
     return baserlib.MapFields(N_FIELDS, result[0])
 
 
-class R_2_nodes_name_role(baserlib.R_Generic):
-  """ /2/nodes/[node_name]/role resource.
+class R_2_nodes_name_role(baserlib.OpcodeResource):
+  """/2/nodes/[node_name]/role resource.
 
   """
+  PUT_OPCODE = opcodes.OpNodeSetParams
+
   def GET(self):
     """Returns the current node role.
 
@@ -215,30 +402,26 @@ class R_2_nodes_name_role(baserlib.R_Generic):
 
     """
     node_name = self.items[0]
-    client = baserlib.GetClient()
+    client = self.GetClient()
     result = client.QueryNodes(names=[node_name], fields=["role"],
                                use_locking=self.useLocking())
 
     return _NR_MAP[result[0][0]]
 
-  def PUT(self):
+  def GetPutOpInput(self):
     """Sets the node role.
 
-    @return: a job id
-
     """
-    if not isinstance(self.req.request_body, basestring):
-      raise http.HttpBadRequest("Invalid body contents, not a string")
+    baserlib.CheckType(self.request_body, basestring, "Body contents")
 
-    node_name = self.items[0]
-    role = self.req.request_body
+    role = self.request_body
 
     if role == _NR_REGULAR:
       candidate = False
       offline = False
       drained = False
 
-    elif role == _NR_MASTER_CANDIATE:
+    elif role == _NR_MASTER_CANDIDATE:
       candidate = True
       offline = drained = None
 
@@ -253,89 +436,107 @@ class R_2_nodes_name_role(baserlib.R_Generic):
     else:
       raise http.HttpBadRequest("Can't set '%s' role" % role)
 
-    op = opcodes.OpSetNodeParams(node_name=node_name,
-                                 master_candidate=candidate,
-                                 offline=offline,
-                                 drained=drained,
-                                 force=bool(self.useForce()))
+    assert len(self.items) == 1
 
-    return baserlib.SubmitJob([op])
+    return ({}, {
+      "node_name": self.items[0],
+      "master_candidate": candidate,
+      "offline": offline,
+      "drained": drained,
+      "force": self.useForce(),
+      })
 
 
-class R_2_nodes_name_evacuate(baserlib.R_Generic):
+class R_2_nodes_name_evacuate(baserlib.OpcodeResource):
   """/2/nodes/[node_name]/evacuate resource.
 
   """
-  def POST(self):
-    """Evacuate all secondary instances off a node.
+  POST_OPCODE = opcodes.OpNodeEvacuate
 
-    """
-    node_name = self.items[0]
-    remote_node = self._checkStringVariable("remote_node", default=None)
-    iallocator = self._checkStringVariable("iallocator", default=None)
+  def GetPostOpInput(self):
+    """Evacuate all instances off a node.
 
-    op = opcodes.OpEvacuateNode(node_name=node_name,
-                                remote_node=remote_node,
-                                iallocator=iallocator)
-
-    return baserlib.SubmitJob([op])
+    """
+    return (self.request_body, {
+      "node_name": self.items[0],
+      "dry_run": self.dryRun(),
+      })
 
 
-class R_2_nodes_name_migrate(baserlib.R_Generic):
+class R_2_nodes_name_migrate(baserlib.OpcodeResource):
   """/2/nodes/[node_name]/migrate resource.
 
   """
-  def POST(self):
+  POST_OPCODE = opcodes.OpNodeMigrate
+
+  def GetPostOpInput(self):
     """Migrate all primary instances from a node.
 
     """
-    node_name = self.items[0]
-    live = bool(self._checkIntVariable("live", default=1))
-
-    op = opcodes.OpMigrateNode(node_name=node_name, live=live)
+    if self.queryargs:
+      # Support old-style requests
+      if "live" in self.queryargs and "mode" in self.queryargs:
+        raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
+                                  " be passed")
+
+      if "live" in self.queryargs:
+        if self._checkIntVariable("live", default=1):
+          mode = constants.HT_MIGRATION_LIVE
+        else:
+          mode = constants.HT_MIGRATION_NONLIVE
+      else:
+        mode = self._checkStringVariable("mode", default=None)
+
+      data = {
+        "mode": mode,
+        }
+    else:
+      data = self.request_body
 
-    return baserlib.SubmitJob([op])
+    return (data, {
+      "node_name": self.items[0],
+      })
 
 
-class R_2_nodes_name_storage(baserlib.R_Generic):
-  """/2/nodes/[node_name]/storage ressource.
+class R_2_nodes_name_storage(baserlib.OpcodeResource):
+  """/2/nodes/[node_name]/storage resource.
 
   """
-  # LUQueryNodeStorage acquires locks, hence restricting access to GET
+  # LUNodeQueryStorage acquires locks, hence restricting access to GET
   GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+  GET_OPCODE = opcodes.OpNodeQueryStorage
 
-  def GET(self):
-    node_name = self.items[0]
+  def GetGetOpInput(self):
+    """List storage available on a node.
 
+    """
     storage_type = self._checkStringVariable("storage_type", None)
-    if not storage_type:
-      raise http.HttpBadRequest("Missing the required 'storage_type'"
-                                " parameter")
-
     output_fields = self._checkStringVariable("output_fields", None)
+
     if not output_fields:
       raise http.HttpBadRequest("Missing the required 'output_fields'"
                                 " parameter")
 
-    op = opcodes.OpQueryNodeStorage(nodes=[node_name],
-                                    storage_type=storage_type,
-                                    output_fields=output_fields.split(","))
-    return baserlib.SubmitJob([op])
+    return ({}, {
+      "nodes": [self.items[0]],
+      "storage_type": storage_type,
+      "output_fields": output_fields.split(","),
+      })
 
 
-class R_2_nodes_name_storage_modify(baserlib.R_Generic):
-  """/2/nodes/[node_name]/storage/modify ressource.
+class R_2_nodes_name_storage_modify(baserlib.OpcodeResource):
+  """/2/nodes/[node_name]/storage/modify resource.
 
   """
-  def PUT(self):
-    node_name = self.items[0]
+  PUT_OPCODE = opcodes.OpNodeModifyStorage
 
-    storage_type = self._checkStringVariable("storage_type", None)
-    if not storage_type:
-      raise http.HttpBadRequest("Missing the required 'storage_type'"
-                                " parameter")
+  def GetPutOpInput(self):
+    """Modifies a storage volume on a node.
 
+    """
+    storage_type = self._checkStringVariable("storage_type", None)
     name = self._checkStringVariable("name", None)
+
     if not name:
       raise http.HttpBadRequest("Missing the required 'name'"
                                 " parameter")
@@ -346,22 +547,167 @@ class R_2_nodes_name_storage_modify(baserlib.R_Generic):
       changes[constants.SF_ALLOCATABLE] = \
         bool(self._checkIntVariable("allocatable", default=1))
 
-    op = opcodes.OpModifyNodeStorage(node_name=node_name,
-                                     storage_type=storage_type,
-                                     name=name,
-                                     changes=changes)
-    return baserlib.SubmitJob([op])
+    return ({}, {
+      "node_name": self.items[0],
+      "storage_type": storage_type,
+      "name": name,
+      "changes": changes,
+      })
+
+
+class R_2_nodes_name_storage_repair(baserlib.OpcodeResource):
+  """/2/nodes/[node_name]/storage/repair resource.
+
+  """
+  PUT_OPCODE = opcodes.OpRepairNodeStorage
+
+  def GetPutOpInput(self):
+    """Repairs a storage volume on a node.
+
+    """
+    storage_type = self._checkStringVariable("storage_type", None)
+    name = self._checkStringVariable("name", None)
+    if not name:
+      raise http.HttpBadRequest("Missing the required 'name'"
+                                " parameter")
+
+    return ({}, {
+      "node_name": self.items[0],
+      "storage_type": storage_type,
+      "name": name,
+      })
+
+
+class R_2_groups(baserlib.OpcodeResource):
+  """/2/groups resource.
+
+  """
+  POST_OPCODE = opcodes.OpGroupAdd
+  POST_RENAME = {
+    "name": "group_name",
+    }
+
+  def GetPostOpInput(self):
+    """Create a node group.
+
+    """
+    assert not self.items
+    return (self.request_body, {
+      "dry_run": self.dryRun(),
+      })
+
+  def GET(self):
+    """Returns a list of all node groups.
+
+    """
+    client = self.GetClient()
+
+    if self.useBulk():
+      bulkdata = client.QueryGroups([], G_FIELDS, False)
+      return baserlib.MapBulkFields(bulkdata, G_FIELDS)
+    else:
+      data = client.QueryGroups([], ["name"], False)
+      groupnames = [row[0] for row in data]
+      return baserlib.BuildUriList(groupnames, "/2/groups/%s",
+                                   uri_fields=("name", "uri"))
+
 
+class R_2_groups_name(baserlib.OpcodeResource):
+  """/2/groups/[group_name] resource.
 
-class R_2_instances(baserlib.R_Generic):
+  """
+  DELETE_OPCODE = opcodes.OpGroupRemove
+
+  def GET(self):
+    """Send information about a node group.
+
+    """
+    group_name = self.items[0]
+    client = self.GetClient()
+
+    result = baserlib.HandleItemQueryErrors(client.QueryGroups,
+                                            names=[group_name], fields=G_FIELDS,
+                                            use_locking=self.useLocking())
+
+    return baserlib.MapFields(G_FIELDS, result[0])
+
+  def GetDeleteOpInput(self):
+    """Delete a node group.
+
+    """
+    assert len(self.items) == 1
+    return ({}, {
+      "group_name": self.items[0],
+      "dry_run": self.dryRun(),
+      })
+
+
+class R_2_groups_name_modify(baserlib.OpcodeResource):
+  """/2/groups/[group_name]/modify resource.
+
+  """
+  PUT_OPCODE = opcodes.OpGroupSetParams
+
+  def GetPutOpInput(self):
+    """Changes some parameters of node group.
+
+    """
+    assert self.items
+    return (self.request_body, {
+      "group_name": self.items[0],
+      })
+
+
+class R_2_groups_name_rename(baserlib.OpcodeResource):
+  """/2/groups/[group_name]/rename resource.
+
+  """
+  PUT_OPCODE = opcodes.OpGroupRename
+
+  def GetPutOpInput(self):
+    """Changes the name of a node group.
+
+    """
+    assert len(self.items) == 1
+    return (self.request_body, {
+      "group_name": self.items[0],
+      "dry_run": self.dryRun(),
+      })
+
+
+class R_2_groups_name_assign_nodes(baserlib.OpcodeResource):
+  """/2/groups/[group_name]/assign-nodes resource.
+
+  """
+  PUT_OPCODE = opcodes.OpGroupAssignNodes
+
+  def GetPutOpInput(self):
+    """Assigns nodes to a group.
+
+    """
+    assert len(self.items) == 1
+    return (self.request_body, {
+      "group_name": self.items[0],
+      "dry_run": self.dryRun(),
+      "force": self.useForce(),
+      })
+
+
+class R_2_instances(baserlib.OpcodeResource):
   """/2/instances resource.
 
   """
+  POST_OPCODE = opcodes.OpInstanceCreate
+  POST_RENAME = {
+    "os": "os_type",
+    "name": "instance_name",
+    }
+
   def GET(self):
     """Returns a list of all available instances.
 
     """
-    client = baserlib.GetClient()
+    client = self.GetClient()
 
     use_locking = self.useLocking()
     if self.useBulk():
@@ -373,157 +719,173 @@ class R_2_instances(baserlib.R_Generic):
       return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
                                    uri_fields=("id", "uri"))
 
-  def POST(self):
+  def GetPostOpInput(self):
     """Create an instance.
 
     @return: a job id
 
     """
-    if not isinstance(self.req.request_body, dict):
-      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
-
-    beparams = baserlib.MakeParamsDict(self.req.request_body,
-                                       constants.BES_PARAMETERS)
-    hvparams = baserlib.MakeParamsDict(self.req.request_body,
-                                       constants.HVS_PARAMETERS)
-    fn = self.getBodyParameter
-
-    # disk processing
-    disk_data = fn('disks')
-    if not isinstance(disk_data, list):
-      raise http.HttpBadRequest("The 'disks' parameter should be a list")
-    disks = []
-    for idx, d in enumerate(disk_data):
-      if not isinstance(d, int):
-        raise http.HttpBadRequest("Disk %d specification wrong: should"
-                                  " be an integer")
-      disks.append({"size": d})
-    # nic processing (one nic only)
-    nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
-    if fn("ip", None) is not None:
-      nics[0]["ip"] = fn("ip")
-    if fn("mode", None) is not None:
-      nics[0]["mode"] = fn("mode")
-    if fn("link", None) is not None:
-      nics[0]["link"] = fn("link")
-    if fn("bridge", None) is not None:
-       nics[0]["bridge"] = fn("bridge")
-
-    op = opcodes.OpCreateInstance(
-      mode=constants.INSTANCE_CREATE,
-      instance_name=fn('name'),
-      disks=disks,
-      disk_template=fn('disk_template'),
-      os_type=fn('os'),
-      pnode=fn('pnode', None),
-      snode=fn('snode', None),
-      iallocator=fn('iallocator', None),
-      nics=nics,
-      start=fn('start', True),
-      ip_check=fn('ip_check', True),
-      wait_for_sync=True,
-      hypervisor=fn('hypervisor', None),
-      hvparams=hvparams,
-      beparams=beparams,
-      file_storage_dir=fn('file_storage_dir', None),
-      file_driver=fn('file_driver', 'loop'),
-      dry_run=bool(self.dryRun()),
-      )
-
-    return baserlib.SubmitJob([op])
-
-
-class R_2_instances_name(baserlib.R_Generic):
-  """/2/instances/[instance_name] resources.
+    baserlib.CheckType(self.request_body, dict, "Body contents")
+
+    # Default to request data version 0
+    data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
+
+    if data_version == 0:
+      raise http.HttpBadRequest("Instance creation request version 0 is no"
+                                " longer supported")
+    elif data_version != 1:
+      raise http.HttpBadRequest("Unsupported request data version %s" %
+                                data_version)
+
+    data = self.request_body.copy()
+    # Remove "__version__"
+    data.pop(_REQ_DATA_VERSION, None)
+
+    return (data, {
+      "dry_run": self.dryRun(),
+      })
+
+
+class R_2_instances_name(baserlib.OpcodeResource):
+  """/2/instances/[instance_name] resource.
 
   """
+  DELETE_OPCODE = opcodes.OpInstanceRemove
+
   def GET(self):
     """Send information about an instance.
 
     """
-    client = baserlib.GetClient()
+    client = self.GetClient()
     instance_name = self.items[0]
-    result = client.QueryInstances(names=[instance_name], fields=I_FIELDS,
-                                   use_locking=self.useLocking())
+
+    result = baserlib.HandleItemQueryErrors(client.QueryInstances,
+                                            names=[instance_name],
+                                            fields=I_FIELDS,
+                                            use_locking=self.useLocking())
 
     return baserlib.MapFields(I_FIELDS, result[0])
 
-  def DELETE(self):
+  def GetDeleteOpInput(self):
     """Delete an instance.
 
     """
-    op = opcodes.OpRemoveInstance(instance_name=self.items[0],
-                                  ignore_failures=False,
-                                  dry_run=bool(self.dryRun()))
-    return baserlib.SubmitJob([op])
+    assert len(self.items) == 1
+    return ({}, {
+      "instance_name": self.items[0],
+      "ignore_failures": False,
+      "dry_run": self.dryRun(),
+      })
 
 
-class R_2_instances_name_reboot(baserlib.R_Generic):
+class R_2_instances_name_info(baserlib.OpcodeResource):
+  """/2/instances/[instance_name]/info resource.
+
+  """
+  GET_OPCODE = opcodes.OpInstanceQueryData
+
+  def GetGetOpInput(self):
+    """Request detailed instance information.
+
+    """
+    assert len(self.items) == 1
+    return ({}, {
+      "instances": [self.items[0]],
+      "static": bool(self._checkIntVariable("static", default=0)),
+      })
+
+
+class R_2_instances_name_reboot(baserlib.OpcodeResource):
   """/2/instances/[instance_name]/reboot resource.
 
   Implements an instance reboot.
 
   """
-  def POST(self):
+  POST_OPCODE = opcodes.OpInstanceReboot
+
+  def GetPostOpInput(self):
     """Reboot an instance.
 
     The URI takes type=[hard|soft|full] and
     ignore_secondaries=[False|True] parameters.
 
     """
-    instance_name = self.items[0]
-    reboot_type = self.queryargs.get('type',
-                                     [constants.INSTANCE_REBOOT_HARD])[0]
-    ignore_secondaries = bool(self.queryargs.get('ignore_secondaries',
-                                                 [False])[0])
-    op = opcodes.OpRebootInstance(instance_name=instance_name,
-                                  reboot_type=reboot_type,
-                                  ignore_secondaries=ignore_secondaries,
-                                  dry_run=bool(self.dryRun()))
-
-    return baserlib.SubmitJob([op])
+    return ({}, {
+      "instance_name": self.items[0],
+      "reboot_type":
+        self.queryargs.get("type", [constants.INSTANCE_REBOOT_HARD])[0],
+      "ignore_secondaries": bool(self._checkIntVariable("ignore_secondaries")),
+      "dry_run": self.dryRun(),
+      })
 
 
-class R_2_instances_name_startup(baserlib.R_Generic):
+class R_2_instances_name_startup(baserlib.OpcodeResource):
   """/2/instances/[instance_name]/startup resource.
 
   Implements an instance startup.
 
   """
-  def PUT(self):
+  PUT_OPCODE = opcodes.OpInstanceStartup
+
+  def GetPutOpInput(self):
     """Startup an instance.
 
     The URI takes force=[False|True] parameter to start the instance
     if even if secondary disks are failing.
 
     """
-    instance_name = self.items[0]
-    force_startup = bool(self.queryargs.get('force', [False])[0])
-    op = opcodes.OpStartupInstance(instance_name=instance_name,
-                                   force=force_startup,
-                                   dry_run=bool(self.dryRun()))
-
-    return baserlib.SubmitJob([op])
+    return ({}, {
+      "instance_name": self.items[0],
+      "force": self.useForce(),
+      "dry_run": self.dryRun(),
+      "no_remember": bool(self._checkIntVariable("no_remember")),
+      })
 
 
-class R_2_instances_name_shutdown(baserlib.R_Generic):
+class R_2_instances_name_shutdown(baserlib.OpcodeResource):
   """/2/instances/[instance_name]/shutdown resource.
 
   Implements an instance shutdown.
 
   """
-  def PUT(self):
+  PUT_OPCODE = opcodes.OpInstanceShutdown
+
+  def GetPutOpInput(self):
     """Shutdown an instance.
 
     """
-    instance_name = self.items[0]
-    op = opcodes.OpShutdownInstance(instance_name=instance_name,
-                                    dry_run=bool(self.dryRun()))
+    return (self.request_body, {
+      "instance_name": self.items[0],
+      "no_remember": bool(self._checkIntVariable("no_remember")),
+      "dry_run": self.dryRun(),
+      })
 
-    return baserlib.SubmitJob([op])
 
+def _ParseInstanceReinstallRequest(name, data):
+  """Parses a request for reinstalling an instance.
 
-class R_2_instances_name_reinstall(baserlib.R_Generic):
+  """
+  if not isinstance(data, dict):
+    raise http.HttpBadRequest("Invalid body contents, not a dictionary")
+
+  ostype = baserlib.CheckParameter(data, "os", default=None)
+  start = baserlib.CheckParameter(data, "start", exptype=bool,
+                                  default=True)
+  osparams = baserlib.CheckParameter(data, "osparams", default=None)
+
+  ops = [
+    opcodes.OpInstanceShutdown(instance_name=name),
+    opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
+                                osparams=osparams),
+    ]
+
+  if start:
+    ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
+
+  return ops
+
+
+class R_2_instances_name_reinstall(baserlib.ResourceBase):
   """/2/instances/[instance_name]/reinstall resource.
 
   Implements an instance reinstall.
@@ -537,51 +899,299 @@ class R_2_instances_name_reinstall(baserlib.R_Generic):
     automatically.
 
     """
-    instance_name = self.items[0]
-    ostype = self._checkStringVariable('os')
-    nostartup = self._checkIntVariable('nostartup')
-    ops = [
-      opcodes.OpShutdownInstance(instance_name=instance_name),
-      opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype),
-      ]
-    if not nostartup:
-      ops.append(opcodes.OpStartupInstance(instance_name=instance_name,
-                                           force=False))
-    return baserlib.SubmitJob(ops)
-
-
-class R_2_instances_name_replace_disks(baserlib.R_Generic):
+    if self.request_body:
+      if self.queryargs:
+        raise http.HttpBadRequest("Can't combine query and body parameters")
+
+      body = self.request_body
+    elif self.queryargs:
+      # Legacy interface, do not modify/extend
+      body = {
+        "os": self._checkStringVariable("os"),
+        "start": not self._checkIntVariable("nostartup"),
+        }
+    else:
+      body = {}
+
+    ops = _ParseInstanceReinstallRequest(self.items[0], body)
+
+    return self.SubmitJob(ops)
+
+
+class R_2_instances_name_replace_disks(baserlib.OpcodeResource):
   """/2/instances/[instance_name]/replace-disks resource.
 
   """
-  def POST(self):
+  POST_OPCODE = opcodes.OpInstanceReplaceDisks
+
+  def GetPostOpInput(self):
     """Replaces disks on an instance.
 
     """
-    instance_name = self.items[0]
-    remote_node = self._checkStringVariable("remote_node", default=None)
-    mode = self._checkStringVariable("mode", default=None)
-    raw_disks = self._checkStringVariable("disks", default=None)
-    iallocator = self._checkStringVariable("iallocator", default=None)
-
-    if raw_disks:
-      try:
-        disks = [int(part) for part in raw_disks.split(",")]
-      except ValueError, err:
-        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
+    data = self.request_body.copy()
+    static = {
+      "instance_name": self.items[0],
+      }
+
+    # Parse disks
+    try:
+      raw_disks = data["disks"]
+    except KeyError:
+      pass
     else:
-      disks = []
+      if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
+        # Backwards compatibility for strings of the format "1, 2, 3"
+        try:
+          data["disks"] = [int(part) for part in raw_disks.split(",")]
+        except (TypeError, ValueError), err:
+          raise http.HttpBadRequest("Invalid disk index passed: %s" % err)
+
+    return (data, static)
+
+
+class R_2_instances_name_activate_disks(baserlib.OpcodeResource):
+  """/2/instances/[instance_name]/activate-disks resource.
+
+  """
+  PUT_OPCODE = opcodes.OpInstanceActivateDisks
+
+  def GetPutOpInput(self):
+    """Activate disks for an instance.
+
+    The URI might contain ignore_size to ignore current recorded size.
+
+    """
+    return ({}, {
+      "instance_name": self.items[0],
+      "ignore_size": bool(self._checkIntVariable("ignore_size")),
+      })
+
+
+class R_2_instances_name_deactivate_disks(baserlib.OpcodeResource):
+  """/2/instances/[instance_name]/deactivate-disks resource.
+
+  """
+  PUT_OPCODE = opcodes.OpInstanceDeactivateDisks
+
+  def GetPutOpInput(self):
+    """Deactivate disks for an instance.
+
+    """
+    return ({}, {
+      "instance_name": self.items[0],
+      })
+
+
+class R_2_instances_name_prepare_export(baserlib.OpcodeResource):
+  """/2/instances/[instance_name]/prepare-export resource.
+
+  """
+  PUT_OPCODE = opcodes.OpBackupPrepare
+
+  def GetPutOpInput(self):
+    """Prepares an export for an instance.
+
+    """
+    return ({}, {
+      "instance_name": self.items[0],
+      "mode": self._checkStringVariable("mode"),
+      })
+
+
+class R_2_instances_name_export(baserlib.OpcodeResource):
+  """/2/instances/[instance_name]/export resource.
+
+  """
+  PUT_OPCODE = opcodes.OpBackupExport
+  PUT_RENAME = {
+    "destination": "target_node",
+    }
+
+  def GetPutOpInput(self):
+    """Exports an instance.
+
+    """
+    return (self.request_body, {
+      "instance_name": self.items[0],
+      })
+
+
+class R_2_instances_name_migrate(baserlib.OpcodeResource):
+  """/2/instances/[instance_name]/migrate resource.
+
+  """
+  PUT_OPCODE = opcodes.OpInstanceMigrate
+
+  def GetPutOpInput(self):
+    """Migrates an instance.
+
+    """
+    return (self.request_body, {
+      "instance_name": self.items[0],
+      })
+
+
+class R_2_instances_name_failover(baserlib.OpcodeResource):
+  """/2/instances/[instance_name]/failover resource.
+
+  """
+  PUT_OPCODE = opcodes.OpInstanceFailover
+
+  def GetPutOpInput(self):
+    """Does a failover of an instance.
+
+    """
+    return (self.request_body, {
+      "instance_name": self.items[0],
+      })
+
 
-    op = opcodes.OpReplaceDisks(instance_name=instance_name,
-                                remote_node=remote_node,
-                                mode=mode,
-                                disks=disks,
-                                iallocator=iallocator)
+class R_2_instances_name_rename(baserlib.OpcodeResource):
+  """/2/instances/[instance_name]/rename resource.
+
+  """
+  PUT_OPCODE = opcodes.OpInstanceRename
+
+  def GetPutOpInput(self):
+    """Changes the name of an instance.
+
+    """
+    return (self.request_body, {
+      "instance_name": self.items[0],
+      })
+
+
+class R_2_instances_name_modify(baserlib.OpcodeResource):
+  """/2/instances/[instance_name]/modify resource.
+
+  """
+  PUT_OPCODE = opcodes.OpInstanceSetParams
+
+  def GetPutOpInput(self):
+    """Changes parameters of an instance.
+
+    """
+    return (self.request_body, {
+      "instance_name": self.items[0],
+      })
+
+
+class R_2_instances_name_disk_grow(baserlib.OpcodeResource):
+  """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
+
+  """
+  POST_OPCODE = opcodes.OpInstanceGrowDisk
+
+  def GetPostOpInput(self):
+    """Increases the size of an instance disk.
+
+    """
+    return (self.request_body, {
+      "instance_name": self.items[0],
+      "disk": int(self.items[1]),
+      })
+
+
+class R_2_instances_name_console(baserlib.ResourceBase):
+  """/2/instances/[instance_name]/console resource.
+
+  """
+  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+
+  def GET(self):
+    """Request information for connecting to instance's console.
+
+    @return: Serialized instance console description, see
+             L{objects.InstanceConsole}
+
+    """
+    client = self.GetClient()
+
+    ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
+
+    if console is None:
+      raise http.HttpServiceUnavailable("Instance console unavailable")
+
+    assert isinstance(console, dict)
+    return console
+
+
+def _GetQueryFields(args):
+  """
+
+  """
+  try:
+    fields = args["fields"]
+  except KeyError:
+    raise http.HttpBadRequest("Missing 'fields' query argument")
+
+  return _SplitQueryFields(fields[0])
+
+
+def _SplitQueryFields(fields):
+  """
+
+  """
+  return [i.strip() for i in fields.split(",")]
+
+
+class R_2_query(baserlib.ResourceBase):
+  """/2/query/[resource] resource.
+
+  """
+  # Results might contain sensitive information
+  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+
+  def _Query(self, fields, filter_):
+    return self.GetClient().Query(self.items[0], fields, filter_).ToDict()
+
+  def GET(self):
+    """Returns resource information.
+
+    @return: Query result, see L{objects.QueryResponse}
+
+    """
+    return self._Query(_GetQueryFields(self.queryargs), None)
+
+  def PUT(self):
+    """Submits job querying for resources.
+
+    @return: Query result, see L{objects.QueryResponse}
+
+    """
+    body = self.request_body
+
+    baserlib.CheckType(body, dict, "Body contents")
+
+    try:
+      fields = body["fields"]
+    except KeyError:
+      fields = _GetQueryFields(self.queryargs)
+
+    return self._Query(fields, self.request_body.get("filter", None))
 
-    return baserlib.SubmitJob([op])
 
+class R_2_query_fields(baserlib.ResourceBase):
+  """/2/query/[resource]/fields resource.
 
-class _R_Tags(baserlib.R_Generic):
+  """
+  def GET(self):
+    """Retrieves list of available fields for a resource.
+
+    @return: List of serialized L{objects.QueryFieldDefinition}
+
+    """
+    try:
+      raw_fields = self.queryargs["fields"]
+    except KeyError:
+      fields = None
+    else:
+      fields = _SplitQueryFields(raw_fields[0])
+
+    return self.GetClient().QueryFields(self.items[0], fields).ToDict()
+
+
+class _R_Tags(baserlib.OpcodeResource):
   """ Quasiclass for tagging resources
 
   Manages tags. When inheriting this class you must define the
@@ -589,19 +1199,21 @@ class _R_Tags(baserlib.R_Generic):
 
   """
   TAG_LEVEL = None
+  PUT_OPCODE = opcodes.OpTagsSet
+  DELETE_OPCODE = opcodes.OpTagsDel
 
-  def __init__(self, items, queryargs, req):
+  def __init__(self, items, queryargs, req, **kwargs):
     """A tag resource constructor.
 
     We have to override the default to sort out cluster naming case.
 
     """
-    baserlib.R_Generic.__init__(self, items, queryargs, req)
+    baserlib.OpcodeResource.__init__(self, items, queryargs, req, **kwargs)
 
-    if self.TAG_LEVEL != constants.TAG_CLUSTER:
-      self.name = items[0]
+    if self.TAG_LEVEL == constants.TAG_CLUSTER:
+      self.name = None
     else:
-      self.name = ""
+      self.name = items[0]
 
   def GET(self):
     """Returns a list of tags.
@@ -609,23 +1221,49 @@ class _R_Tags(baserlib.R_Generic):
     Example: ["tag1", "tag2", "tag3"]
 
     """
-    return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
-
-  def PUT(self):
+    kind = self.TAG_LEVEL
+
+    if kind in (constants.TAG_INSTANCE,
+                constants.TAG_NODEGROUP,
+                constants.TAG_NODE):
+      if not self.name:
+        raise http.HttpBadRequest("Missing name on tag request")
+
+      cl = self.GetClient()
+      if kind == constants.TAG_INSTANCE:
+        fn = cl.QueryInstances
+      elif kind == constants.TAG_NODEGROUP:
+        fn = cl.QueryGroups
+      else:
+        fn = cl.QueryNodes
+      result = fn(names=[self.name], fields=["tags"], use_locking=False)
+      if not result or not result[0]:
+        raise http.HttpBadGateway("Invalid response from tag query")
+      tags = result[0][0]
+
+    elif kind == constants.TAG_CLUSTER:
+      assert not self.name
+      # TODO: Use query API?
+      ssc = ssconf.SimpleStore()
+      tags = ssc.GetClusterTags()
+
+    return list(tags)
+
+  def GetPutOpInput(self):
     """Add a set of tags.
 
     The request as a list of strings should be PUT to this URI. And
     you'll have back a job id.
 
     """
-    if 'tag' not in self.queryargs:
-      raise http.HttpBadRequest("Please specify tag(s) to add using the"
-                                " the 'tag' parameter")
-    return baserlib._Tags_PUT(self.TAG_LEVEL,
-                              self.queryargs['tag'], name=self.name,
-                              dry_run=bool(self.dryRun()))
-
-  def DELETE(self):
+    return ({}, {
+      "kind": self.TAG_LEVEL,
+      "name": self.name,
+      "tags": self.queryargs.get("tag", []),
+      "dry_run": self.dryRun(),
+      })
+
+  def GetDeleteOpInput(self):
     """Delete a tag.
 
     In order to delete a set of tags, the DELETE
@@ -633,14 +1271,8 @@ class _R_Tags(baserlib.R_Generic):
     /tags?tag=[tag]&tag=[tag]
 
     """
-    if 'tag' not in self.queryargs:
-      # no we not gonna delete all tags
-      raise http.HttpBadRequest("Cannot delete all tags - please specify"
-                                " tag(s) using the 'tag' parameter")
-    return baserlib._Tags_DELETE(self.TAG_LEVEL,
-                                 self.queryargs['tag'],
-                                 name=self.name,
-                                 dry_run=bool(self.dryRun()))
+    # Re-use code
+    return self.GetPutOpInput()
 
 
 class R_2_instances_name_tags(_R_Tags):
@@ -661,8 +1293,17 @@ class R_2_nodes_name_tags(_R_Tags):
   TAG_LEVEL = constants.TAG_NODE
 
 
+class R_2_groups_name_tags(_R_Tags):
+  """ /2/groups/[group_name]/tags resource.
+
+  Manages per-nodegroup tags.
+
+  """
+  TAG_LEVEL = constants.TAG_NODEGROUP
+
+
 class R_2_tags(_R_Tags):
-  """ /2/instances/tags resource.
+  """ /2/tags resource.
 
   Manages cluster tags.