Add test for backend._GetBlockDevSymlinkPath
[ganeti-local] / lib / rapi / rlib2.py
index c34ad06..bfd5415 100644 (file)
@@ -1,7 +1,7 @@
 #
 #
 
 #
 #
 
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -51,11 +51,12 @@ PUT should be prefered over POST.
 
 """
 
 
 """
 
-# pylint: disable-msg=C0103
+# pylint: disable=C0103
 
 # C0103: Invalid name, since the R_* names are not conforming
 
 from ganeti import opcodes
 
 # C0103: Invalid name, since the R_* names are not conforming
 
 from ganeti import opcodes
+from ganeti import objects
 from ganeti import http
 from ganeti import constants
 from ganeti import cli
 from ganeti import http
 from ganeti import constants
 from ganeti import cli
@@ -70,7 +71,8 @@ _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
 I_FIELDS = ["name", "admin_state", "os",
             "pnode", "snodes",
             "disk_template",
 I_FIELDS = ["name", "admin_state", "os",
             "pnode", "snodes",
             "disk_template",
-            "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
+            "nic.ips", "nic.macs", "nic.modes",
+            "nic.links", "nic.networks", "nic.bridges",
             "network_port",
             "disk.sizes", "disk_usage",
             "beparams", "hvparams",
             "network_port",
             "disk.sizes", "disk_usage",
             "beparams", "hvparams",
@@ -86,14 +88,29 @@ N_FIELDS = ["name", "offline", "master_candidate", "drained",
             "pip", "sip", "role",
             "pinst_list", "sinst_list",
             "master_capable", "vm_capable",
             "pip", "sip", "role",
             "pinst_list", "sinst_list",
             "master_capable", "vm_capable",
+            "ndparams",
             "group.uuid",
             ] + _COMMON_FIELDS
 
             "group.uuid",
             ] + _COMMON_FIELDS
 
+NET_FIELDS = ["name", "network", "gateway",
+              "network6", "gateway6",
+              "mac_prefix", "network_type",
+              "free_count", "reserved_count",
+              "map", "group_list", "inst_list",
+              "external_reservations", "tags",
+              ]
+
 G_FIELDS = [
   "alloc_policy",
   "name",
   "node_cnt",
   "node_list",
 G_FIELDS = [
   "alloc_policy",
   "name",
   "node_cnt",
   "node_list",
+  "ipolicy",
+  "custom_ipolicy",
+  "diskparams",
+  "custom_diskparams",
+  "ndparams",
+  "custom_ndparams",
   ] + _COMMON_FIELDS
 
 J_FIELDS_BULK = [
   ] + _COMMON_FIELDS
 
 J_FIELDS_BULK = [
@@ -108,14 +125,14 @@ J_FIELDS = J_FIELDS_BULK + [
   ]
 
 _NR_DRAINED = "drained"
   ]
 
 _NR_DRAINED = "drained"
-_NR_MASTER_CANDIATE = "master-candidate"
+_NR_MASTER_CANDIDATE = "master-candidate"
 _NR_MASTER = "master"
 _NR_OFFLINE = "offline"
 _NR_REGULAR = "regular"
 
 _NR_MAP = {
   constants.NR_MASTER: _NR_MASTER,
 _NR_MASTER = "master"
 _NR_OFFLINE = "offline"
 _NR_REGULAR = "regular"
 
 _NR_MAP = {
   constants.NR_MASTER: _NR_MASTER,
-  constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE,
+  constants.NR_MCANDIDATE: _NR_MASTER_CANDIDATE,
   constants.NR_DRAINED: _NR_DRAINED,
   constants.NR_OFFLINE: _NR_OFFLINE,
   constants.NR_REGULAR: _NR_REGULAR,
   constants.NR_DRAINED: _NR_DRAINED,
   constants.NR_OFFLINE: _NR_OFFLINE,
   constants.NR_REGULAR: _NR_REGULAR,
@@ -138,7 +155,7 @@ _NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
 # Feature string for node evacuation with LU-generated jobs
 _NODE_EVAC_RES1 = "node-evac-res1"
 
 # Feature string for node evacuation with LU-generated jobs
 _NODE_EVAC_RES1 = "node-evac-res1"
 
-ALL_FEATURES = frozenset([
+ALL_FEATURES = compat.UniqueFrozenset([
   _INST_CREATE_REQV1,
   _INST_REINSTALL_REQV1,
   _NODE_MIGRATE_REQV1,
   _INST_CREATE_REQV1,
   _INST_REINSTALL_REQV1,
   _NODE_MIGRATE_REQV1,
@@ -149,6 +166,21 @@ ALL_FEATURES = frozenset([
 _WFJC_TIMEOUT = 10
 
 
 _WFJC_TIMEOUT = 10
 
 
+# FIXME: For compatibility we update the beparams/memory field. Needs to be
+#        removed in Ganeti 2.7
+def _UpdateBeparams(inst):
+  """Updates the beparams dict of inst to support the memory field.
+
+  @param inst: Inst dict
+  @return: Updated inst dict
+
+  """
+  beparams = inst["beparams"]
+  beparams[constants.BE_MEMORY] = beparams[constants.BE_MAXMEM]
+
+  return inst
+
+
 class R_root(baserlib.ResourceBase):
   """/ resource.
 
 class R_root(baserlib.ResourceBase):
   """/ resource.
 
@@ -161,6 +193,12 @@ class R_root(baserlib.ResourceBase):
     return None
 
 
     return None
 
 
+class R_2(R_root):
+  """/2 resource.
+
+  """
+
+
 class R_version(baserlib.ResourceBase):
   """/version resource.
 
 class R_version(baserlib.ResourceBase):
   """/version resource.
 
@@ -176,15 +214,17 @@ class R_version(baserlib.ResourceBase):
     return constants.RAPI_VERSION
 
 
     return constants.RAPI_VERSION
 
 
-class R_2_info(baserlib.ResourceBase):
+class R_2_info(baserlib.OpcodeResource):
   """/2/info resource.
 
   """
   """/2/info resource.
 
   """
+  GET_OPCODE = opcodes.OpClusterQuery
+
   def GET(self):
     """Returns cluster information.
 
     """
   def GET(self):
     """Returns cluster information.
 
     """
-    client = self.GetClient()
+    client = self.GetClient(query=True)
     return client.QueryClusterInfo()
 
 
     return client.QueryClusterInfo()
 
 
@@ -200,10 +240,12 @@ class R_2_features(baserlib.ResourceBase):
     return list(ALL_FEATURES)
 
 
     return list(ALL_FEATURES)
 
 
-class R_2_os(baserlib.ResourceBase):
+class R_2_os(baserlib.OpcodeResource):
   """/2/os resource.
 
   """
   """/2/os resource.
 
   """
+  GET_OPCODE = opcodes.OpOsDiagnose
+
   def GET(self):
     """Return a list of all OSes.
 
   def GET(self):
     """Return a list of all OSes.
 
@@ -253,7 +295,7 @@ class R_2_jobs(baserlib.ResourceBase):
     @return: a dictionary with jobs id and uri.
 
     """
     @return: a dictionary with jobs id and uri.
 
     """
-    client = self.GetClient()
+    client = self.GetClient(query=True)
 
     if self.useBulk():
       bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
 
     if self.useBulk():
       bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
@@ -282,7 +324,7 @@ class R_2_jobs_id(baserlib.ResourceBase):
 
     """
     job_id = self.items[0]
 
     """
     job_id = self.items[0]
-    result = self.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
+    result = self.GetClient(query=True).QueryJobs([job_id, ], J_FIELDS)[0]
     if result is None:
       raise http.HttpNotFound()
     return baserlib.MapFields(J_FIELDS, result)
     if result is None:
       raise http.HttpNotFound()
     return baserlib.MapFields(J_FIELDS, result)
@@ -345,15 +387,17 @@ class R_2_jobs_id_wait(baserlib.ResourceBase):
       }
 
 
       }
 
 
-class R_2_nodes(baserlib.ResourceBase):
+class R_2_nodes(baserlib.OpcodeResource):
   """/2/nodes resource.
 
   """
   """/2/nodes resource.
 
   """
+  GET_OPCODE = opcodes.OpNodeQuery
+
   def GET(self):
     """Returns a list of all nodes.
 
     """
   def GET(self):
     """Returns a list of all nodes.
 
     """
-    client = self.GetClient()
+    client = self.GetClient(query=True)
 
     if self.useBulk():
       bulkdata = client.QueryNodes([], N_FIELDS, False)
 
     if self.useBulk():
       bulkdata = client.QueryNodes([], N_FIELDS, False)
@@ -365,16 +409,18 @@ class R_2_nodes(baserlib.ResourceBase):
                                    uri_fields=("id", "uri"))
 
 
                                    uri_fields=("id", "uri"))
 
 
-class R_2_nodes_name(baserlib.ResourceBase):
+class R_2_nodes_name(baserlib.OpcodeResource):
   """/2/nodes/[node_name] resource.
 
   """
   """/2/nodes/[node_name] resource.
 
   """
+  GET_OPCODE = opcodes.OpNodeQuery
+
   def GET(self):
     """Send information about a node.
 
     """
     node_name = self.items[0]
   def GET(self):
     """Send information about a node.
 
     """
     node_name = self.items[0]
-    client = self.GetClient()
+    client = self.GetClient(query=True)
 
     result = baserlib.HandleItemQueryErrors(client.QueryNodes,
                                             names=[node_name], fields=N_FIELDS,
 
     result = baserlib.HandleItemQueryErrors(client.QueryNodes,
                                             names=[node_name], fields=N_FIELDS,
@@ -383,10 +429,28 @@ class R_2_nodes_name(baserlib.ResourceBase):
     return baserlib.MapFields(N_FIELDS, result[0])
 
 
     return baserlib.MapFields(N_FIELDS, result[0])
 
 
-class R_2_nodes_name_role(baserlib.ResourceBase):
-  """ /2/nodes/[node_name]/role resource.
+class R_2_nodes_name_powercycle(baserlib.OpcodeResource):
+  """/2/nodes/[node_name]/powercycle resource.
+
+  """
+  POST_OPCODE = opcodes.OpNodePowercycle
+
+  def GetPostOpInput(self):
+    """Tries to powercycle a node.
+
+    """
+    return (self.request_body, {
+      "node_name": self.items[0],
+      "force": self.useForce(),
+      })
+
+
+class R_2_nodes_name_role(baserlib.OpcodeResource):
+  """/2/nodes/[node_name]/role resource.
 
   """
 
   """
+  PUT_OPCODE = opcodes.OpNodeSetParams
+
   def GET(self):
     """Returns the current node role.
 
   def GET(self):
     """Returns the current node role.
 
@@ -394,22 +458,18 @@ class R_2_nodes_name_role(baserlib.ResourceBase):
 
     """
     node_name = self.items[0]
 
     """
     node_name = self.items[0]
-    client = self.GetClient()
+    client = self.GetClient(query=True)
     result = client.QueryNodes(names=[node_name], fields=["role"],
                                use_locking=self.useLocking())
 
     return _NR_MAP[result[0][0]]
 
     result = client.QueryNodes(names=[node_name], fields=["role"],
                                use_locking=self.useLocking())
 
     return _NR_MAP[result[0][0]]
 
-  def PUT(self):
+  def GetPutOpInput(self):
     """Sets the node role.
 
     """Sets the node role.
 
-    @return: a job id
-
     """
     """
-    if not isinstance(self.request_body, basestring):
-      raise http.HttpBadRequest("Invalid body contents, not a string")
+    baserlib.CheckType(self.request_body, basestring, "Body contents")
 
 
-    node_name = self.items[0]
     role = self.request_body
 
     if role == _NR_REGULAR:
     role = self.request_body
 
     if role == _NR_REGULAR:
@@ -417,7 +477,7 @@ class R_2_nodes_name_role(baserlib.ResourceBase):
       offline = False
       drained = False
 
       offline = False
       drained = False
 
-    elif role == _NR_MASTER_CANDIATE:
+    elif role == _NR_MASTER_CANDIDATE:
       candidate = True
       offline = drained = None
 
       candidate = True
       offline = drained = None
 
@@ -432,13 +492,16 @@ class R_2_nodes_name_role(baserlib.ResourceBase):
     else:
       raise http.HttpBadRequest("Can't set '%s' role" % role)
 
     else:
       raise http.HttpBadRequest("Can't set '%s' role" % role)
 
-    op = opcodes.OpNodeSetParams(node_name=node_name,
-                                 master_candidate=candidate,
-                                 offline=offline,
-                                 drained=drained,
-                                 force=bool(self.useForce()))
+    assert len(self.items) == 1
 
 
-    return self.SubmitJob([op])
+    return ({}, {
+      "node_name": self.items[0],
+      "master_candidate": candidate,
+      "offline": offline,
+      "drained": drained,
+      "force": self.useForce(),
+      "auto_promote": bool(self._checkIntVariable("auto-promote", default=0)),
+      })
 
 
 class R_2_nodes_name_evacuate(baserlib.OpcodeResource):
 
 
 class R_2_nodes_name_evacuate(baserlib.OpcodeResource):
@@ -492,45 +555,62 @@ class R_2_nodes_name_migrate(baserlib.OpcodeResource):
       })
 
 
       })
 
 
-class R_2_nodes_name_storage(baserlib.ResourceBase):
+class R_2_nodes_name_modify(baserlib.OpcodeResource):
+  """/2/nodes/[node_name]/modify resource.
+
+  """
+  POST_OPCODE = opcodes.OpNodeSetParams
+
+  def GetPostOpInput(self):
+    """Changes parameters of a node.
+
+    """
+    assert len(self.items) == 1
+
+    return (self.request_body, {
+      "node_name": self.items[0],
+      })
+
+
+class R_2_nodes_name_storage(baserlib.OpcodeResource):
   """/2/nodes/[node_name]/storage resource.
 
   """
   # LUNodeQueryStorage acquires locks, hence restricting access to GET
   GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
   """/2/nodes/[node_name]/storage resource.
 
   """
   # LUNodeQueryStorage acquires locks, hence restricting access to GET
   GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+  GET_OPCODE = opcodes.OpNodeQueryStorage
 
 
-  def GET(self):
-    node_name = self.items[0]
+  def GetGetOpInput(self):
+    """List storage available on a node.
 
 
+    """
     storage_type = self._checkStringVariable("storage_type", None)
     storage_type = self._checkStringVariable("storage_type", None)
-    if not storage_type:
-      raise http.HttpBadRequest("Missing the required 'storage_type'"
-                                " parameter")
-
     output_fields = self._checkStringVariable("output_fields", None)
     output_fields = self._checkStringVariable("output_fields", None)
+
     if not output_fields:
       raise http.HttpBadRequest("Missing the required 'output_fields'"
                                 " parameter")
 
     if not output_fields:
       raise http.HttpBadRequest("Missing the required 'output_fields'"
                                 " parameter")
 
-    op = opcodes.OpNodeQueryStorage(nodes=[node_name],
-                                    storage_type=storage_type,
-                                    output_fields=output_fields.split(","))
-    return self.SubmitJob([op])
+    return ({}, {
+      "nodes": [self.items[0]],
+      "storage_type": storage_type,
+      "output_fields": output_fields.split(","),
+      })
 
 
 
 
-class R_2_nodes_name_storage_modify(baserlib.ResourceBase):
+class R_2_nodes_name_storage_modify(baserlib.OpcodeResource):
   """/2/nodes/[node_name]/storage/modify resource.
 
   """
   """/2/nodes/[node_name]/storage/modify resource.
 
   """
-  def PUT(self):
-    node_name = self.items[0]
+  PUT_OPCODE = opcodes.OpNodeModifyStorage
 
 
-    storage_type = self._checkStringVariable("storage_type", None)
-    if not storage_type:
-      raise http.HttpBadRequest("Missing the required 'storage_type'"
-                                " parameter")
+  def GetPutOpInput(self):
+    """Modifies a storage volume on a node.
 
 
+    """
+    storage_type = self._checkStringVariable("storage_type", None)
     name = self._checkStringVariable("name", None)
     name = self._checkStringVariable("name", None)
+
     if not name:
       raise http.HttpBadRequest("Missing the required 'name'"
                                 " parameter")
     if not name:
       raise http.HttpBadRequest("Missing the required 'name'"
                                 " parameter")
@@ -541,40 +621,158 @@ class R_2_nodes_name_storage_modify(baserlib.ResourceBase):
       changes[constants.SF_ALLOCATABLE] = \
         bool(self._checkIntVariable("allocatable", default=1))
 
       changes[constants.SF_ALLOCATABLE] = \
         bool(self._checkIntVariable("allocatable", default=1))
 
-    op = opcodes.OpNodeModifyStorage(node_name=node_name,
-                                     storage_type=storage_type,
-                                     name=name,
-                                     changes=changes)
-    return self.SubmitJob([op])
+    return ({}, {
+      "node_name": self.items[0],
+      "storage_type": storage_type,
+      "name": name,
+      "changes": changes,
+      })
 
 
 
 
-class R_2_nodes_name_storage_repair(baserlib.ResourceBase):
+class R_2_nodes_name_storage_repair(baserlib.OpcodeResource):
   """/2/nodes/[node_name]/storage/repair resource.
 
   """
   """/2/nodes/[node_name]/storage/repair resource.
 
   """
-  def PUT(self):
-    node_name = self.items[0]
+  PUT_OPCODE = opcodes.OpRepairNodeStorage
 
 
-    storage_type = self._checkStringVariable("storage_type", None)
-    if not storage_type:
-      raise http.HttpBadRequest("Missing the required 'storage_type'"
-                                " parameter")
+  def GetPutOpInput(self):
+    """Repairs a storage volume on a node.
 
 
+    """
+    storage_type = self._checkStringVariable("storage_type", None)
     name = self._checkStringVariable("name", None)
     if not name:
       raise http.HttpBadRequest("Missing the required 'name'"
                                 " parameter")
 
     name = self._checkStringVariable("name", None)
     if not name:
       raise http.HttpBadRequest("Missing the required 'name'"
                                 " parameter")
 
-    op = opcodes.OpRepairNodeStorage(node_name=node_name,
-                                     storage_type=storage_type,
-                                     name=name)
-    return self.SubmitJob([op])
+    return ({}, {
+      "node_name": self.items[0],
+      "storage_type": storage_type,
+      "name": name,
+      })
+
+
+class R_2_networks(baserlib.OpcodeResource):
+  """/2/networks resource.
+
+  """
+  GET_OPCODE = opcodes.OpNetworkQuery
+  POST_OPCODE = opcodes.OpNetworkAdd
+  POST_RENAME = {
+    "name": "network_name",
+    }
+
+  def GetPostOpInput(self):
+    """Create a network.
+
+    """
+    assert not self.items
+    return (self.request_body, {
+      "dry_run": self.dryRun(),
+      })
+
+  def GET(self):
+    """Returns a list of all networks.
+
+    """
+    client = self.GetClient()
+
+    if self.useBulk():
+      bulkdata = client.QueryNetworks([], NET_FIELDS, False)
+      return baserlib.MapBulkFields(bulkdata, NET_FIELDS)
+    else:
+      data = client.QueryNetworks([], ["name"], False)
+      networknames = [row[0] for row in data]
+      return baserlib.BuildUriList(networknames, "/2/networks/%s",
+                                   uri_fields=("name", "uri"))
+
+
+class R_2_networks_name(baserlib.OpcodeResource):
+  """/2/networks/[network_name] resource.
+
+  """
+  DELETE_OPCODE = opcodes.OpNetworkRemove
+
+  def GET(self):
+    """Send information about a network.
+
+    """
+    network_name = self.items[0]
+    client = self.GetClient()
+
+    result = baserlib.HandleItemQueryErrors(client.QueryNetworks,
+                                            names=[network_name],
+                                            fields=NET_FIELDS,
+                                            use_locking=self.useLocking())
+
+    return baserlib.MapFields(NET_FIELDS, result[0])
+
+  def GetDeleteOpInput(self):
+    """Delete a network.
+
+    """
+    assert len(self.items) == 1
+    return (self.request_body, {
+      "network_name": self.items[0],
+      "dry_run": self.dryRun(),
+      })
+
+
+class R_2_networks_name_connect(baserlib.OpcodeResource):
+  """/2/networks/[network_name]/connect resource.
+
+  """
+  PUT_OPCODE = opcodes.OpNetworkConnect
+
+  def GetPutOpInput(self):
+    """Changes some parameters of node group.
+
+    """
+    assert self.items
+    return (self.request_body, {
+      "network_name": self.items[0],
+      "dry_run": self.dryRun(),
+      })
+
+
+class R_2_networks_name_disconnect(baserlib.OpcodeResource):
+  """/2/networks/[network_name]/disconnect resource.
+
+  """
+  PUT_OPCODE = opcodes.OpNetworkDisconnect
+
+  def GetPutOpInput(self):
+    """Changes some parameters of node group.
+
+    """
+    assert self.items
+    return (self.request_body, {
+      "network_name": self.items[0],
+      "dry_run": self.dryRun(),
+      })
+
+
+class R_2_networks_name_modify(baserlib.OpcodeResource):
+  """/2/networks/[network_name]/modify resource.
+
+  """
+  PUT_OPCODE = opcodes.OpNetworkSetParams
+
+  def GetPutOpInput(self):
+    """Changes some parameters of network.
+
+    """
+    assert self.items
+    return (self.request_body, {
+      "network_name": self.items[0],
+      })
 
 
 class R_2_groups(baserlib.OpcodeResource):
   """/2/groups resource.
 
   """
 
 
 class R_2_groups(baserlib.OpcodeResource):
   """/2/groups resource.
 
   """
+  GET_OPCODE = opcodes.OpGroupQuery
   POST_OPCODE = opcodes.OpGroupAdd
   POST_RENAME = {
     "name": "group_name",
   POST_OPCODE = opcodes.OpGroupAdd
   POST_RENAME = {
     "name": "group_name",
@@ -583,6 +781,7 @@ class R_2_groups(baserlib.OpcodeResource):
   def GetPostOpInput(self):
     """Create a node group.
 
   def GetPostOpInput(self):
     """Create a node group.
 
+
     """
     assert not self.items
     return (self.request_body, {
     """
     assert not self.items
     return (self.request_body, {
@@ -593,7 +792,7 @@ class R_2_groups(baserlib.OpcodeResource):
     """Returns a list of all node groups.
 
     """
     """Returns a list of all node groups.
 
     """
-    client = self.GetClient()
+    client = self.GetClient(query=True)
 
     if self.useBulk():
       bulkdata = client.QueryGroups([], G_FIELDS, False)
 
     if self.useBulk():
       bulkdata = client.QueryGroups([], G_FIELDS, False)
@@ -605,16 +804,18 @@ class R_2_groups(baserlib.OpcodeResource):
                                    uri_fields=("name", "uri"))
 
 
                                    uri_fields=("name", "uri"))
 
 
-class R_2_groups_name(baserlib.ResourceBase):
+class R_2_groups_name(baserlib.OpcodeResource):
   """/2/groups/[group_name] resource.
 
   """
   """/2/groups/[group_name] resource.
 
   """
+  DELETE_OPCODE = opcodes.OpGroupRemove
+
   def GET(self):
     """Send information about a node group.
 
     """
     group_name = self.items[0]
   def GET(self):
     """Send information about a node group.
 
     """
     group_name = self.items[0]
-    client = self.GetClient()
+    client = self.GetClient(query=True)
 
     result = baserlib.HandleItemQueryErrors(client.QueryGroups,
                                             names=[group_name], fields=G_FIELDS,
 
     result = baserlib.HandleItemQueryErrors(client.QueryGroups,
                                             names=[group_name], fields=G_FIELDS,
@@ -622,14 +823,15 @@ class R_2_groups_name(baserlib.ResourceBase):
 
     return baserlib.MapFields(G_FIELDS, result[0])
 
 
     return baserlib.MapFields(G_FIELDS, result[0])
 
-  def DELETE(self):
+  def GetDeleteOpInput(self):
     """Delete a node group.
 
     """
     """Delete a node group.
 
     """
-    op = opcodes.OpGroupRemove(group_name=self.items[0],
-                               dry_run=bool(self.dryRun()))
-
-    return self.SubmitJob([op])
+    assert len(self.items) == 1
+    return ({}, {
+      "group_name": self.items[0],
+      "dry_run": self.dryRun(),
+      })
 
 
 class R_2_groups_name_modify(baserlib.OpcodeResource):
 
 
 class R_2_groups_name_modify(baserlib.OpcodeResource):
@@ -683,30 +885,17 @@ class R_2_groups_name_assign_nodes(baserlib.OpcodeResource):
       })
 
 
       })
 
 
-def _ParseInstanceCreateRequestVersion1(data, dry_run):
-  """Parses an instance creation request version 1.
-
-  @rtype: L{opcodes.OpInstanceCreate}
-  @return: Instance creation opcode
+class R_2_instances(baserlib.OpcodeResource):
+  """/2/instances resource.
 
   """
 
   """
-  override = {
-    "dry_run": dry_run,
-    }
-
-  rename = {
+  GET_OPCODE = opcodes.OpInstanceQuery
+  POST_OPCODE = opcodes.OpInstanceCreate
+  POST_RENAME = {
     "os": "os_type",
     "name": "instance_name",
     }
 
     "os": "os_type",
     "name": "instance_name",
     }
 
-  return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override,
-                             rename=rename)
-
-
-class R_2_instances(baserlib.ResourceBase):
-  """/2/instances resource.
-
-  """
   def GET(self):
     """Returns a list of all available instances.
 
   def GET(self):
     """Returns a list of all available instances.
 
@@ -716,21 +905,20 @@ class R_2_instances(baserlib.ResourceBase):
     use_locking = self.useLocking()
     if self.useBulk():
       bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
     use_locking = self.useLocking()
     if self.useBulk():
       bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
-      return baserlib.MapBulkFields(bulkdata, I_FIELDS)
+      return map(_UpdateBeparams, baserlib.MapBulkFields(bulkdata, I_FIELDS))
     else:
       instancesdata = client.QueryInstances([], ["name"], use_locking)
       instanceslist = [row[0] for row in instancesdata]
       return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
                                    uri_fields=("id", "uri"))
 
     else:
       instancesdata = client.QueryInstances([], ["name"], use_locking)
       instanceslist = [row[0] for row in instancesdata]
       return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
                                    uri_fields=("id", "uri"))
 
-  def POST(self):
+  def GetPostOpInput(self):
     """Create an instance.
 
     @return: a job id
 
     """
     """Create an instance.
 
     @return: a job id
 
     """
-    if not isinstance(self.request_body, dict):
-      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
+    baserlib.CheckType(self.request_body, dict, "Body contents")
 
     # Default to request data version 0
     data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
 
     # Default to request data version 0
     data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
@@ -738,22 +926,54 @@ class R_2_instances(baserlib.ResourceBase):
     if data_version == 0:
       raise http.HttpBadRequest("Instance creation request version 0 is no"
                                 " longer supported")
     if data_version == 0:
       raise http.HttpBadRequest("Instance creation request version 0 is no"
                                 " longer supported")
-    elif data_version == 1:
-      data = self.request_body.copy()
-      # Remove "__version__"
-      data.pop(_REQ_DATA_VERSION, None)
-      op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
-    else:
+    elif data_version != 1:
       raise http.HttpBadRequest("Unsupported request data version %s" %
                                 data_version)
 
       raise http.HttpBadRequest("Unsupported request data version %s" %
                                 data_version)
 
-    return self.SubmitJob([op])
+    data = self.request_body.copy()
+    # Remove "__version__"
+    data.pop(_REQ_DATA_VERSION, None)
+
+    return (data, {
+      "dry_run": self.dryRun(),
+      })
+
+
+class R_2_instances_multi_alloc(baserlib.OpcodeResource):
+  """/2/instances-multi-alloc resource.
+
+  """
+  POST_OPCODE = opcodes.OpInstanceMultiAlloc
+
+  def GetPostOpInput(self):
+    """Try to allocate multiple instances.
+
+    @return: A dict with submitted jobs, allocatable instances and failed
+             allocations
+
+    """
+    if "instances" not in self.request_body:
+      raise http.HttpBadRequest("Request is missing required 'instances' field"
+                                " in body")
+
+    op_id = {
+      "OP_ID": self.POST_OPCODE.OP_ID, # pylint: disable=E1101
+      }
+    body = objects.FillDict(self.request_body, {
+      "instances": [objects.FillDict(inst, op_id)
+                    for inst in self.request_body["instances"]],
+      })
+
+    return (body, {
+      "dry_run": self.dryRun(),
+      })
 
 
 class R_2_instances_name(baserlib.OpcodeResource):
   """/2/instances/[instance_name] resource.
 
   """
 
 
 class R_2_instances_name(baserlib.OpcodeResource):
   """/2/instances/[instance_name] resource.
 
   """
+  GET_OPCODE = opcodes.OpInstanceQuery
   DELETE_OPCODE = opcodes.OpInstanceRemove
 
   def GET(self):
   DELETE_OPCODE = opcodes.OpInstanceRemove
 
   def GET(self):
@@ -768,7 +988,7 @@ class R_2_instances_name(baserlib.OpcodeResource):
                                             fields=I_FIELDS,
                                             use_locking=self.useLocking())
 
                                             fields=I_FIELDS,
                                             use_locking=self.useLocking())
 
-    return baserlib.MapFields(I_FIELDS, result[0])
+    return _UpdateBeparams(baserlib.MapFields(I_FIELDS, result[0]))
 
   def GetDeleteOpInput(self):
     """Delete an instance.
 
   def GetDeleteOpInput(self):
     """Delete an instance.
@@ -889,12 +1109,14 @@ def _ParseInstanceReinstallRequest(name, data):
   return ops
 
 
   return ops
 
 
-class R_2_instances_name_reinstall(baserlib.ResourceBase):
+class R_2_instances_name_reinstall(baserlib.OpcodeResource):
   """/2/instances/[instance_name]/reinstall resource.
 
   Implements an instance reinstall.
 
   """
   """/2/instances/[instance_name]/reinstall resource.
 
   Implements an instance reinstall.
 
   """
+  POST_OPCODE = opcodes.OpInstanceReinstall
+
   def POST(self):
     """Reinstall an instance.
 
   def POST(self):
     """Reinstall an instance.
 
@@ -932,23 +1154,38 @@ class R_2_instances_name_replace_disks(baserlib.OpcodeResource):
     """Replaces disks on an instance.
 
     """
     """Replaces disks on an instance.
 
     """
-    data = self.request_body.copy()
     static = {
       "instance_name": self.items[0],
       }
 
     static = {
       "instance_name": self.items[0],
       }
 
+    if self.request_body:
+      data = self.request_body
+    elif self.queryargs:
+      # Legacy interface, do not modify/extend
+      data = {
+        "remote_node": self._checkStringVariable("remote_node", default=None),
+        "mode": self._checkStringVariable("mode", default=None),
+        "disks": self._checkStringVariable("disks", default=None),
+        "iallocator": self._checkStringVariable("iallocator", default=None),
+        }
+    else:
+      data = {}
+
     # Parse disks
     try:
     # Parse disks
     try:
-      raw_disks = data["disks"]
+      raw_disks = data.pop("disks")
     except KeyError:
       pass
     else:
     except KeyError:
       pass
     else:
-      if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
-        # Backwards compatibility for strings of the format "1, 2, 3"
-        try:
-          data["disks"] = [int(part) for part in raw_disks.split(",")]
-        except (TypeError, ValueError), err:
-          raise http.HttpBadRequest("Invalid disk index passed: %s" % err)
+      if raw_disks:
+        if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102
+          data["disks"] = raw_disks
+        else:
+          # Backwards compatibility for strings of the format "1, 2, 3"
+          try:
+            data["disks"] = [int(part) for part in raw_disks.split(",")]
+          except (TypeError, ValueError), err:
+            raise http.HttpBadRequest("Invalid disk index passed: %s" % err)
 
     return (data, static)
 
 
     return (data, static)
 
@@ -971,205 +1208,152 @@ class R_2_instances_name_activate_disks(baserlib.OpcodeResource):
       })
 
 
       })
 
 
-class R_2_instances_name_deactivate_disks(baserlib.ResourceBase):
+class R_2_instances_name_deactivate_disks(baserlib.OpcodeResource):
   """/2/instances/[instance_name]/deactivate-disks resource.
 
   """
   """/2/instances/[instance_name]/deactivate-disks resource.
 
   """
-  def PUT(self):
+  PUT_OPCODE = opcodes.OpInstanceDeactivateDisks
+
+  def GetPutOpInput(self):
     """Deactivate disks for an instance.
 
     """
     """Deactivate disks for an instance.
 
     """
-    instance_name = self.items[0]
-
-    op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
-
-    return self.SubmitJob([op])
+    return ({}, {
+      "instance_name": self.items[0],
+      })
 
 
 
 
-class R_2_instances_name_prepare_export(baserlib.ResourceBase):
-  """/2/instances/[instance_name]/prepare-export resource.
+class R_2_instances_name_recreate_disks(baserlib.OpcodeResource):
+  """/2/instances/[instance_name]/recreate-disks resource.
 
   """
 
   """
-  def PUT(self):
-    """Prepares an export for an instance.
+  POST_OPCODE = opcodes.OpInstanceRecreateDisks
 
 
-    @return: a job id
+  def GetPostOpInput(self):
+    """Recreate disks for an instance.
 
     """
 
     """
-    instance_name = self.items[0]
-    mode = self._checkStringVariable("mode")
-
-    op = opcodes.OpBackupPrepare(instance_name=instance_name,
-                                 mode=mode)
-
-    return self.SubmitJob([op])
-
+    return ({}, {
+      "instance_name": self.items[0],
+      })
 
 
-def _ParseExportInstanceRequest(name, data):
-  """Parses a request for an instance export.
 
 
-  @rtype: L{opcodes.OpBackupExport}
-  @return: Instance export opcode
+class R_2_instances_name_prepare_export(baserlib.OpcodeResource):
+  """/2/instances/[instance_name]/prepare-export resource.
 
   """
 
   """
-  # Rename "destination" to "target_node"
-  try:
-    data["target_node"] = data.pop("destination")
-  except KeyError:
-    pass
+  PUT_OPCODE = opcodes.OpBackupPrepare
 
 
-  return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
-    "instance_name": name,
-    })
+  def GetPutOpInput(self):
+    """Prepares an export for an instance.
+
+    """
+    return ({}, {
+      "instance_name": self.items[0],
+      "mode": self._checkStringVariable("mode"),
+      })
 
 
 
 
-class R_2_instances_name_export(baserlib.ResourceBase):
+class R_2_instances_name_export(baserlib.OpcodeResource):
   """/2/instances/[instance_name]/export resource.
 
   """
   """/2/instances/[instance_name]/export resource.
 
   """
-  def PUT(self):
-    """Exports an instance.
+  PUT_OPCODE = opcodes.OpBackupExport
+  PUT_RENAME = {
+    "destination": "target_node",
+    }
 
 
-    @return: a job id
+  def GetPutOpInput(self):
+    """Exports an instance.
 
     """
 
     """
-    if not isinstance(self.request_body, dict):
-      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
-
-    op = _ParseExportInstanceRequest(self.items[0], self.request_body)
-
-    return self.SubmitJob([op])
-
-
-def _ParseMigrateInstanceRequest(name, data):
-  """Parses a request for an instance migration.
-
-  @rtype: L{opcodes.OpInstanceMigrate}
-  @return: Instance migration opcode
-
-  """
-  return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
-    "instance_name": name,
-    })
+    return (self.request_body, {
+      "instance_name": self.items[0],
+      })
 
 
 
 
-class R_2_instances_name_migrate(baserlib.ResourceBase):
+class R_2_instances_name_migrate(baserlib.OpcodeResource):
   """/2/instances/[instance_name]/migrate resource.
 
   """
   """/2/instances/[instance_name]/migrate resource.
 
   """
-  def PUT(self):
-    """Migrates an instance.
+  PUT_OPCODE = opcodes.OpInstanceMigrate
 
 
-    @return: a job id
+  def GetPutOpInput(self):
+    """Migrates an instance.
 
     """
 
     """
-    baserlib.CheckType(self.request_body, dict, "Body contents")
-
-    op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
-
-    return self.SubmitJob([op])
+    return (self.request_body, {
+      "instance_name": self.items[0],
+      })
 
 
 
 
-class R_2_instances_name_failover(baserlib.ResourceBase):
+class R_2_instances_name_failover(baserlib.OpcodeResource):
   """/2/instances/[instance_name]/failover resource.
 
   """
   """/2/instances/[instance_name]/failover resource.
 
   """
-  def PUT(self):
-    """Does a failover of an instance.
+  PUT_OPCODE = opcodes.OpInstanceFailover
 
 
-    @return: a job id
+  def GetPutOpInput(self):
+    """Does a failover of an instance.
 
     """
 
     """
-    baserlib.CheckType(self.request_body, dict, "Body contents")
-
-    op = baserlib.FillOpcode(opcodes.OpInstanceFailover, self.request_body, {
+    return (self.request_body, {
       "instance_name": self.items[0],
       })
 
       "instance_name": self.items[0],
       })
 
-    return self.SubmitJob([op])
-
-
-def _ParseRenameInstanceRequest(name, data):
-  """Parses a request for renaming an instance.
-
-  @rtype: L{opcodes.OpInstanceRename}
-  @return: Instance rename opcode
-
-  """
-  return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
-    "instance_name": name,
-    })
-
 
 
-class R_2_instances_name_rename(baserlib.ResourceBase):
+class R_2_instances_name_rename(baserlib.OpcodeResource):
   """/2/instances/[instance_name]/rename resource.
 
   """
   """/2/instances/[instance_name]/rename resource.
 
   """
-  def PUT(self):
-    """Changes the name of an instance.
+  PUT_OPCODE = opcodes.OpInstanceRename
 
 
-    @return: a job id
+  def GetPutOpInput(self):
+    """Changes the name of an instance.
 
     """
 
     """
-    baserlib.CheckType(self.request_body, dict, "Body contents")
-
-    op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
-
-    return self.SubmitJob([op])
-
-
-def _ParseModifyInstanceRequest(name, data):
-  """Parses a request for modifying an instance.
-
-  @rtype: L{opcodes.OpInstanceSetParams}
-  @return: Instance modify opcode
-
-  """
-  return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
-    "instance_name": name,
-    })
+    return (self.request_body, {
+      "instance_name": self.items[0],
+      })
 
 
 
 
-class R_2_instances_name_modify(baserlib.ResourceBase):
+class R_2_instances_name_modify(baserlib.OpcodeResource):
   """/2/instances/[instance_name]/modify resource.
 
   """
   """/2/instances/[instance_name]/modify resource.
 
   """
-  def PUT(self):
-    """Changes some parameters of an instance.
+  PUT_OPCODE = opcodes.OpInstanceSetParams
 
 
-    @return: a job id
+  def GetPutOpInput(self):
+    """Changes parameters of an instance.
 
     """
 
     """
-    baserlib.CheckType(self.request_body, dict, "Body contents")
-
-    op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
-
-    return self.SubmitJob([op])
+    return (self.request_body, {
+      "instance_name": self.items[0],
+      })
 
 
 
 
-class R_2_instances_name_disk_grow(baserlib.ResourceBase):
+class R_2_instances_name_disk_grow(baserlib.OpcodeResource):
   """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
 
   """
   """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
 
   """
-  def POST(self):
-    """Increases the size of an instance disk.
+  POST_OPCODE = opcodes.OpInstanceGrowDisk
 
 
-    @return: a job id
+  def GetPostOpInput(self):
+    """Increases the size of an instance disk.
 
     """
 
     """
-    op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
+    return (self.request_body, {
       "instance_name": self.items[0],
       "disk": int(self.items[1]),
       })
 
       "instance_name": self.items[0],
       "disk": int(self.items[1]),
       })
 
-    return self.SubmitJob([op])
-
 
 class R_2_instances_name_console(baserlib.ResourceBase):
   """/2/instances/[instance_name]/console resource.
 
   """
 
 class R_2_instances_name_console(baserlib.ResourceBase):
   """/2/instances/[instance_name]/console resource.
 
   """
-  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
+  GET_OPCODE = opcodes.OpInstanceConsole
 
   def GET(self):
     """Request information for connecting to instance's console.
 
   def GET(self):
     """Request information for connecting to instance's console.
@@ -1190,7 +1374,11 @@ class R_2_instances_name_console(baserlib.ResourceBase):
 
 
 def _GetQueryFields(args):
 
 
 def _GetQueryFields(args):
-  """
+  """Tries to extract C{fields} query parameter.
+
+  @type args: dictionary
+  @rtype: list of string
+  @raise http.HttpBadRequest: When parameter can't be found
 
   """
   try:
 
   """
   try:
@@ -1202,7 +1390,10 @@ def _GetQueryFields(args):
 
 
 def _SplitQueryFields(fields):
 
 
 def _SplitQueryFields(fields):
-  """
+  """Splits fields as given for a query request.
+
+  @type fields: string
+  @rtype: list of string
 
   """
   return [i.strip() for i in fields.split(",")]
 
   """
   return [i.strip() for i in fields.split(",")]
@@ -1213,10 +1404,13 @@ class R_2_query(baserlib.ResourceBase):
 
   """
   # Results might contain sensitive information
 
   """
   # Results might contain sensitive information
-  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
+  PUT_ACCESS = GET_ACCESS
+  GET_OPCODE = opcodes.OpQuery
+  PUT_OPCODE = opcodes.OpQuery
 
 
-  def _Query(self, fields, filter_):
-    return self.GetClient().Query(self.items[0], fields, filter_).ToDict()
+  def _Query(self, fields, qfilter):
+    return self.GetClient().Query(self.items[0], fields, qfilter).ToDict()
 
   def GET(self):
     """Returns resource information.
 
   def GET(self):
     """Returns resource information.
@@ -1241,13 +1435,20 @@ class R_2_query(baserlib.ResourceBase):
     except KeyError:
       fields = _GetQueryFields(self.queryargs)
 
     except KeyError:
       fields = _GetQueryFields(self.queryargs)
 
-    return self._Query(fields, self.request_body.get("filter", None))
+    qfilter = body.get("qfilter", None)
+    # TODO: remove this after 2.7
+    if qfilter is None:
+      qfilter = body.get("filter", None)
+
+    return self._Query(fields, qfilter)
 
 
 class R_2_query_fields(baserlib.ResourceBase):
   """/2/query/[resource]/fields resource.
 
   """
 
 
 class R_2_query_fields(baserlib.ResourceBase):
   """/2/query/[resource]/fields resource.
 
   """
+  GET_OPCODE = opcodes.OpQueryFields
+
   def GET(self):
     """Retrieves list of available fields for a resource.
 
   def GET(self):
     """Retrieves list of available fields for a resource.
 
@@ -1264,22 +1465,25 @@ class R_2_query_fields(baserlib.ResourceBase):
     return self.GetClient().QueryFields(self.items[0], fields).ToDict()
 
 
     return self.GetClient().QueryFields(self.items[0], fields).ToDict()
 
 
-class _R_Tags(baserlib.ResourceBase):
-  """ Quasiclass for tagging resources
+class _R_Tags(baserlib.OpcodeResource):
+  """Quasiclass for tagging resources.
 
   Manages tags. When inheriting this class you must define the
   TAG_LEVEL for it.
 
   """
   TAG_LEVEL = None
 
   Manages tags. When inheriting this class you must define the
   TAG_LEVEL for it.
 
   """
   TAG_LEVEL = None
+  GET_OPCODE = opcodes.OpTagsGet
+  PUT_OPCODE = opcodes.OpTagsSet
+  DELETE_OPCODE = opcodes.OpTagsDel
 
 
-  def __init__(self, items, queryargs, req):
+  def __init__(self, items, queryargs, req, **kwargs):
     """A tag resource constructor.
 
     We have to override the default to sort out cluster naming case.
 
     """
     """A tag resource constructor.
 
     We have to override the default to sort out cluster naming case.
 
     """
-    baserlib.ResourceBase.__init__(self, items, queryargs, req)
+    baserlib.OpcodeResource.__init__(self, items, queryargs, req, **kwargs)
 
     if self.TAG_LEVEL == constants.TAG_CLUSTER:
       self.name = None
 
     if self.TAG_LEVEL == constants.TAG_CLUSTER:
       self.name = None
@@ -1300,17 +1504,8 @@ class _R_Tags(baserlib.ResourceBase):
       if not self.name:
         raise http.HttpBadRequest("Missing name on tag request")
 
       if not self.name:
         raise http.HttpBadRequest("Missing name on tag request")
 
-      cl = self.GetClient()
-      if kind == constants.TAG_INSTANCE:
-        fn = cl.QueryInstances
-      elif kind == constants.TAG_NODEGROUP:
-        fn = cl.QueryGroups
-      else:
-        fn = cl.QueryNodes
-      result = fn(names=[self.name], fields=["tags"], use_locking=False)
-      if not result or not result[0]:
-        raise http.HttpBadGateway("Invalid response from tag query")
-      tags = result[0][0]
+      cl = self.GetClient(query=True)
+      tags = list(cl.QueryTags(kind, self.name))
 
     elif kind == constants.TAG_CLUSTER:
       assert not self.name
 
     elif kind == constants.TAG_CLUSTER:
       assert not self.name
@@ -1320,22 +1515,21 @@ class _R_Tags(baserlib.ResourceBase):
 
     return list(tags)
 
 
     return list(tags)
 
-  def PUT(self):
+  def GetPutOpInput(self):
     """Add a set of tags.
 
     The request as a list of strings should be PUT to this URI. And
     you'll have back a job id.
 
     """
     """Add a set of tags.
 
     The request as a list of strings should be PUT to this URI. And
     you'll have back a job id.
 
     """
-    # pylint: disable-msg=W0212
-    if "tag" not in self.queryargs:
-      raise http.HttpBadRequest("Please specify tag(s) to add using the"
-                                " the 'tag' parameter")
-    op = opcodes.OpTagsSet(kind=self.TAG_LEVEL, name=self.name,
-                           tags=self.queryargs["tag"], dry_run=self.dryRun())
-    return self.SubmitJob([op])
+    return ({}, {
+      "kind": self.TAG_LEVEL,
+      "name": self.name,
+      "tags": self.queryargs.get("tag", []),
+      "dry_run": self.dryRun(),
+      })
 
 
-  def DELETE(self):
+  def GetDeleteOpInput(self):
     """Delete a tag.
 
     In order to delete a set of tags, the DELETE
     """Delete a tag.
 
     In order to delete a set of tags, the DELETE
@@ -1343,14 +1537,8 @@ class _R_Tags(baserlib.ResourceBase):
     /tags?tag=[tag]&tag=[tag]
 
     """
     /tags?tag=[tag]&tag=[tag]
 
     """
-    # pylint: disable-msg=W0212
-    if "tag" not in self.queryargs:
-      # no we not gonna delete all tags
-      raise http.HttpBadRequest("Cannot delete all tags - please specify"
-                                " tag(s) using the 'tag' parameter")
-    op = opcodes.OpTagsDel(kind=self.TAG_LEVEL, name=self.name,
-                           tags=self.queryargs["tag"], dry_run=self.dryRun())
-    return self.SubmitJob([op])
+    # Re-use code
+    return self.GetPutOpInput()
 
 
 class R_2_instances_name_tags(_R_Tags):
 
 
 class R_2_instances_name_tags(_R_Tags):
@@ -1380,6 +1568,15 @@ class R_2_groups_name_tags(_R_Tags):
   TAG_LEVEL = constants.TAG_NODEGROUP
 
 
   TAG_LEVEL = constants.TAG_NODEGROUP
 
 
+class R_2_networks_name_tags(_R_Tags):
+  """ /2/networks/[network_name]/tags resource.
+
+  Manages per-network tags.
+
+  """
+  TAG_LEVEL = constants.TAG_NETWORK
+
+
 class R_2_tags(_R_Tags):
   """ /2/tags resource.
 
 class R_2_tags(_R_Tags):
   """ /2/tags resource.