RAPI: Allow waiting for job changes
[ganeti-local] / lib / rapi / rlib2.py
index 1566e1d..2dffc52 100644 (file)
 
 """Remote API version 2 baserlib.library.
 
+  PUT or POST?
+  ============
+
+  According to RFC2616 the main difference between PUT and POST is that
+  POST can create new resources but PUT can only create the resource the
+  URI was pointing to on the PUT request.
+
+  To be in context of this module for instance creation POST on
+  /2/instances is legitim while PUT would be not, due to it does create a
+  new entity and not just replace /2/instances with it.
+
+  So when adding new methods, if they are operating on the URI entity itself,
+  PUT should be prefered over POST.
+
 """
 
-import ganeti.opcodes
+# pylint: disable-msg=C0103
+
+# C0103: Invalid name, since the R_* names are not conforming
+
+from ganeti import opcodes
 from ganeti import http
-from ganeti import luxi
 from ganeti import constants
+from ganeti import cli
+from ganeti import rapi
 from ganeti.rapi import baserlib
 
 
+_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
 I_FIELDS = ["name", "admin_state", "os",
             "pnode", "snodes",
             "disk_template",
-            "nic.ips", "nic.macs", "nic.bridges",
+            "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
+            "network_port",
             "disk.sizes", "disk_usage",
             "beparams", "hvparams",
             "oper_state", "oper_ram", "status",
-            "tags"]
+            ] + _COMMON_FIELDS
 
 N_FIELDS = ["name", "offline", "master_candidate", "drained",
             "dtotal", "dfree",
             "mtotal", "mnode", "mfree",
-            "pinst_cnt", "sinst_cnt", "tags",
+            "pinst_cnt", "sinst_cnt",
             "ctotal", "cnodes", "csockets",
-            ]
+            "pip", "sip", "role",
+            "pinst_list", "sinst_list",
+            ] + _COMMON_FIELDS
+
+_NR_DRAINED = "drained"
+_NR_MASTER_CANDIATE = "master-candidate"
+_NR_MASTER = "master"
+_NR_OFFLINE = "offline"
+_NR_REGULAR = "regular"
+
+_NR_MAP = {
+  "M": _NR_MASTER,
+  "C": _NR_MASTER_CANDIATE,
+  "D": _NR_DRAINED,
+  "O": _NR_OFFLINE,
+  "R": _NR_REGULAR,
+  }
+
+# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
+_WFJC_TIMEOUT = 10
 
 
 class R_version(baserlib.R_Generic):
@@ -54,9 +94,8 @@ class R_version(baserlib.R_Generic):
   to adapt clients accordingly.
 
   """
-  DOC_URI = "/version"
-
-  def GET(self):
+  @staticmethod
+  def GET():
     """Returns the remote API version.
 
     """
@@ -67,30 +106,12 @@ class R_2_info(baserlib.R_Generic):
   """Cluster info.
 
   """
-  DOC_URI = "/2/info"
-
-  def GET(self):
+  @staticmethod
+  def GET():
     """Returns cluster information.
 
-    Example::
-
-      {
-        "config_version": 3,
-        "name": "cluster1.example.com",
-        "software_version": "1.2.4",
-        "os_api_version": 5,
-        "export_version": 0,
-        "master": "node1.example.com",
-        "architecture": [
-          "64bit",
-          "x86_64"
-        ],
-        "hypervisor_type": "xen-pvm",
-        "protocol_version": 12
-      }
-
     """
-    client = luxi.Client()
+    client = baserlib.GetClient()
     return client.QueryClusterInfo()
 
 
@@ -98,9 +119,8 @@ class R_2_os(baserlib.R_Generic):
   """/2/os resource.
 
   """
-  DOC_URI = "/2/os"
-
-  def GET(self):
+  @staticmethod
+  def GET():
     """Return a list of all OSes.
 
     Can return error 500 in case of a problem.
@@ -108,31 +128,52 @@ class R_2_os(baserlib.R_Generic):
     Example: ["debian-etch"]
 
     """
-    op = ganeti.opcodes.OpDiagnoseOS(output_fields=["name", "valid"],
-                                     names=[])
-    diagnose_data = ganeti.cli.SubmitOpCode(op)
+    cl = baserlib.GetClient()
+    op = opcodes.OpDiagnoseOS(output_fields=["name", "valid", "variants"],
+                              names=[])
+    job_id = baserlib.SubmitJob([op], cl)
+    # we use custom feedback function, instead of print we log the status
+    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
+    diagnose_data = result[0]
 
     if not isinstance(diagnose_data, list):
-      raise http.HttpInternalServerError(message="Can't get OS list")
+      raise http.HttpBadGateway(message="Can't get OS list")
+
+    os_names = []
+    for (name, valid, variants) in diagnose_data:
+      if valid:
+        os_names.extend(cli.CalculateOSNames(name, variants))
 
-    return [row[0] for row in diagnose_data if row[1]]
+    return os_names
+
+
+class R_2_redist_config(baserlib.R_Generic):
+  """/2/redistribute-config resource.
+
+  """
+  @staticmethod
+  def PUT():
+    """Redistribute configuration to all nodes.
+
+    """
+    return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
 
 
 class R_2_jobs(baserlib.R_Generic):
   """/2/jobs resource.
 
   """
-  DOC_URI = "/2/jobs"
-
-  def GET(self):
+  @staticmethod
+  def GET():
     """Returns a dictionary of jobs.
 
     @return: a dictionary with jobs id and uri.
 
     """
     fields = ["id"]
+    cl = baserlib.GetClient()
     # Convert the list of lists to the list of ids
-    result = [job_id for [job_id] in luxi.Client().QueryJobs(None, fields)]
+    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
     return baserlib.BuildUriList(result, "/2/jobs/%s",
                                  uri_fields=("id", "uri"))
 
@@ -141,8 +182,6 @@ class R_2_jobs_id(baserlib.R_Generic):
   """/2/jobs/[job_id] resource.
 
   """
-  DOC_URI = "/2/jobs/[job_id]"
-
   def GET(self):
     """Returns a job status.
 
@@ -161,7 +200,7 @@ class R_2_jobs_id(baserlib.R_Generic):
               "received_ts", "start_ts", "end_ts",
               ]
     job_id = self.items[0]
-    result = luxi.Client().QueryJobs([job_id, ], fields)[0]
+    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
     if result is None:
       raise http.HttpNotFound()
     return baserlib.MapFields(fields, result)
@@ -171,58 +210,68 @@ class R_2_jobs_id(baserlib.R_Generic):
 
     """
     job_id = self.items[0]
-    result = luxi.Client().CancelJob(job_id)
+    result = baserlib.GetClient().CancelJob(job_id)
     return result
 
 
-class R_2_nodes(baserlib.R_Generic):
-  """/2/nodes resource.
+class R_2_jobs_id_wait(baserlib.R_Generic):
+  """/2/jobs/[job_id]/wait resource.
 
   """
-  DOC_URI = "/2/nodes"
+  # WaitForJobChange provides access to sensitive information and blocks
+  # machine resources (it's a blocking RAPI call), hence restricting access.
+  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
 
   def GET(self):
-    """Returns a list of all nodes.
+    """Waits for job changes.
 
-    Example::
-
-      [
-        {
-          "id": "node1.example.com",
-          "uri": "\/instances\/node1.example.com"
-        },
-        {
-          "id": "node2.example.com",
-          "uri": "\/instances\/node2.example.com"
-        }
-      ]
+    """
+    job_id = self.items[0]
 
-    If the optional 'bulk' argument is provided and set to 'true'
-    value (i.e '?bulk=1'), the output contains detailed
-    information about nodes as a list.
-
-    Example::
-
-      [
-        {
-          "pinst_cnt": 1,
-          "mfree": 31280,
-          "mtotal": 32763,
-          "name": "www.example.com",
-          "tags": [],
-          "mnode": 512,
-          "dtotal": 5246208,
-          "sinst_cnt": 2,
-          "dfree": 5171712,
-          "offline": false
-        },
-        ...
-      ]
+    fields = self.getBodyParameter("fields")
+    prev_job_info = self.getBodyParameter("previous_job_info", None)
+    prev_log_serial = self.getBodyParameter("previous_log_serial", None)
+
+    if not isinstance(fields, list):
+      raise http.HttpBadRequest("The 'fields' parameter should be a list")
+
+    if not (prev_job_info is None or isinstance(prev_job_info, list)):
+      raise http.HttpBadRequest("The 'previous_job_info' parameter should"
+                                " be a list")
+
+    if not (prev_log_serial is None or
+            isinstance(prev_log_serial, (int, long))):
+      raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
+                                " be a number")
+
+    client = baserlib.GetClient()
+    result = client.WaitForJobChangeOnce(job_id, fields,
+                                         prev_job_info, prev_log_serial,
+                                         timeout=_WFJC_TIMEOUT)
+    if not result:
+      raise http.HttpNotFound()
+
+    if result == constants.JOB_NOTCHANGED:
+      # No changes
+      return None
+
+    (job_info, log_entries) = result
+
+    return {
+      "job_info": job_info,
+      "log_entries": log_entries,
+      }
+
+
+class R_2_nodes(baserlib.R_Generic):
+  """/2/nodes resource.
 
-    @return: a dictionary with 'name' and 'uri' keys for each of them
+  """
+  def GET(self):
+    """Returns a list of all nodes.
 
     """
-    client = luxi.Client()
+    client = baserlib.GetClient()
 
     if self.useBulk():
       bulkdata = client.QueryNodes([], N_FIELDS, False)
@@ -238,75 +287,199 @@ class R_2_nodes_name(baserlib.R_Generic):
   """/2/nodes/[node_name] resources.
 
   """
-  DOC_URI = "/nodes/[node_name]"
-
   def GET(self):
     """Send information about a node.
 
     """
     node_name = self.items[0]
-    client = luxi.Client()
+    client = baserlib.GetClient()
     result = client.QueryNodes(names=[node_name], fields=N_FIELDS,
                                use_locking=self.useLocking())
 
     return baserlib.MapFields(N_FIELDS, result[0])
 
 
-class R_2_instances(baserlib.R_Generic):
-  """/2/instances resource.
+class R_2_nodes_name_role(baserlib.R_Generic):
+  """ /2/nodes/[node_name]/role resource.
 
   """
-  DOC_URI = "/2/instances"
+  def GET(self):
+    """Returns the current node role.
+
+    @return: Node role
+
+    """
+    node_name = self.items[0]
+    client = baserlib.GetClient()
+    result = client.QueryNodes(names=[node_name], fields=["role"],
+                               use_locking=self.useLocking())
+
+    return _NR_MAP[result[0][0]]
+
+  def PUT(self):
+    """Sets the node role.
+
+    @return: a job id
+
+    """
+    if not isinstance(self.req.request_body, basestring):
+      raise http.HttpBadRequest("Invalid body contents, not a string")
+
+    node_name = self.items[0]
+    role = self.req.request_body
+
+    if role == _NR_REGULAR:
+      candidate = False
+      offline = False
+      drained = False
+
+    elif role == _NR_MASTER_CANDIATE:
+      candidate = True
+      offline = drained = None
+
+    elif role == _NR_DRAINED:
+      drained = True
+      candidate = offline = None
+
+    elif role == _NR_OFFLINE:
+      offline = True
+      candidate = drained = None
+
+    else:
+      raise http.HttpBadRequest("Can't set '%s' role" % role)
+
+    op = opcodes.OpSetNodeParams(node_name=node_name,
+                                 master_candidate=candidate,
+                                 offline=offline,
+                                 drained=drained,
+                                 force=bool(self.useForce()))
+
+    return baserlib.SubmitJob([op])
+
+
+class R_2_nodes_name_evacuate(baserlib.R_Generic):
+  """/2/nodes/[node_name]/evacuate resource.
+
+  """
+  def POST(self):
+    """Evacuate all secondary instances off a node.
+
+    """
+    node_name = self.items[0]
+    remote_node = self._checkStringVariable("remote_node", default=None)
+    iallocator = self._checkStringVariable("iallocator", default=None)
+
+    op = opcodes.OpEvacuateNode(node_name=node_name,
+                                remote_node=remote_node,
+                                iallocator=iallocator)
+
+    return baserlib.SubmitJob([op])
+
+
+class R_2_nodes_name_migrate(baserlib.R_Generic):
+  """/2/nodes/[node_name]/migrate resource.
+
+  """
+  def POST(self):
+    """Migrate all primary instances from a node.
+
+    """
+    node_name = self.items[0]
+    live = bool(self._checkIntVariable("live", default=1))
+
+    op = opcodes.OpMigrateNode(node_name=node_name, live=live)
+
+    return baserlib.SubmitJob([op])
+
+
+class R_2_nodes_name_storage(baserlib.R_Generic):
+  """/2/nodes/[node_name]/storage ressource.
+
+  """
+  # LUQueryNodeStorage acquires locks, hence restricting access to GET
+  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
 
   def GET(self):
-    """Returns a list of all available instances.
+    node_name = self.items[0]
 
+    storage_type = self._checkStringVariable("storage_type", None)
+    if not storage_type:
+      raise http.HttpBadRequest("Missing the required 'storage_type'"
+                                " parameter")
 
-    Example::
+    output_fields = self._checkStringVariable("output_fields", None)
+    if not output_fields:
+      raise http.HttpBadRequest("Missing the required 'output_fields'"
+                                " parameter")
 
-      [
-        {
-          "name": "web.example.com",
-          "uri": "\/instances\/web.example.com"
-        },
-        {
-          "name": "mail.example.com",
-          "uri": "\/instances\/mail.example.com"
-        }
-      ]
+    op = opcodes.OpQueryNodeStorage(nodes=[node_name],
+                                    storage_type=storage_type,
+                                    output_fields=output_fields.split(","))
+    return baserlib.SubmitJob([op])
 
-    If the optional 'bulk' argument is provided and set to 'true'
-    value (i.e '?bulk=1'), the output contains detailed
-    information about instances as a list.
-
-    Example::
-
-      [
-        {
-           "status": "running",
-           "bridge": "xen-br0",
-           "name": "web.example.com",
-           "tags": ["tag1", "tag2"],
-           "admin_ram": 512,
-           "sda_size": 20480,
-           "pnode": "node1.example.com",
-           "mac": "01:23:45:67:89:01",
-           "sdb_size": 4096,
-           "snodes": ["node2.example.com"],
-           "disk_template": "drbd",
-           "ip": null,
-           "admin_state": true,
-           "os": "debian-etch",
-           "vcpus": 2,
-           "oper_state": true
-        },
-        ...
-      ]
 
-    @returns: a dictionary with 'name' and 'uri' keys for each of them.
+class R_2_nodes_name_storage_modify(baserlib.R_Generic):
+  """/2/nodes/[node_name]/storage/modify ressource.
+
+  """
+  def PUT(self):
+    node_name = self.items[0]
+
+    storage_type = self._checkStringVariable("storage_type", None)
+    if not storage_type:
+      raise http.HttpBadRequest("Missing the required 'storage_type'"
+                                " parameter")
+
+    name = self._checkStringVariable("name", None)
+    if not name:
+      raise http.HttpBadRequest("Missing the required 'name'"
+                                " parameter")
+
+    changes = {}
+
+    if "allocatable" in self.queryargs:
+      changes[constants.SF_ALLOCATABLE] = \
+        bool(self._checkIntVariable("allocatable", default=1))
+
+    op = opcodes.OpModifyNodeStorage(node_name=node_name,
+                                     storage_type=storage_type,
+                                     name=name,
+                                     changes=changes)
+    return baserlib.SubmitJob([op])
+
+
+class R_2_nodes_name_storage_repair(baserlib.R_Generic):
+  """/2/nodes/[node_name]/storage/repair ressource.
+
+  """
+  def PUT(self):
+    node_name = self.items[0]
+
+    storage_type = self._checkStringVariable("storage_type", None)
+    if not storage_type:
+      raise http.HttpBadRequest("Missing the required 'storage_type'"
+                                " parameter")
+
+    name = self._checkStringVariable("name", None)
+    if not name:
+      raise http.HttpBadRequest("Missing the required 'name'"
+                                " parameter")
+
+    op = opcodes.OpRepairNodeStorage(node_name=node_name,
+                                     storage_type=storage_type,
+                                     name=name)
+    return baserlib.SubmitJob([op])
+
+
+class R_2_instances(baserlib.R_Generic):
+  """/2/instances resource.
+
+  """
+  def GET(self):
+    """Returns a list of all available instances.
 
     """
-    client = luxi.Client()
+    client = baserlib.GetClient()
 
     use_locking = self.useLocking()
     if self.useBulk():
@@ -321,7 +494,7 @@ class R_2_instances(baserlib.R_Generic):
   def POST(self):
     """Create an instance.
 
-    @returns: a job id
+    @return: a job id
 
     """
     if not isinstance(self.req.request_body, dict):
@@ -341,48 +514,53 @@ class R_2_instances(baserlib.R_Generic):
     for idx, d in enumerate(disk_data):
       if not isinstance(d, int):
         raise http.HttpBadRequest("Disk %d specification wrong: should"
-                                  " be an integer")
+                                  " be an integer" % idx)
       disks.append({"size": d})
     # nic processing (one nic only)
-    nics = [{"mac": fn("mac", constants.VALUE_AUTO),
-             "ip": fn("ip", None),
-             "bridge": fn("bridge", None)}]
-
-    op = ganeti.opcodes.OpCreateInstance(
-        mode=constants.INSTANCE_CREATE,
-        instance_name=fn('name'),
-        disks=disks,
-        disk_template=fn('disk_template'),
-        os_type=fn('os'),
-        pnode=fn('pnode', None),
-        snode=fn('snode', None),
-        iallocator=fn('iallocator', None),
-        nics=nics,
-        start=fn('start', True),
-        ip_check=fn('ip_check', True),
-        wait_for_sync=True,
-        hypervisor=fn('hypervisor', None),
-        hvparams=hvparams,
-        beparams=beparams,
-        file_storage_dir=fn('file_storage_dir', None),
-        file_driver=fn('file_driver', 'loop'),
-        )
-
-    job_id = ganeti.cli.SendJob([op])
-    return job_id
+    nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
+    if fn("ip", None) is not None:
+      nics[0]["ip"] = fn("ip")
+    if fn("mode", None) is not None:
+      nics[0]["mode"] = fn("mode")
+    if fn("link", None) is not None:
+      nics[0]["link"] = fn("link")
+    if fn("bridge", None) is not None:
+      nics[0]["bridge"] = fn("bridge")
+
+    op = opcodes.OpCreateInstance(
+      mode=constants.INSTANCE_CREATE,
+      instance_name=fn('name'),
+      disks=disks,
+      disk_template=fn('disk_template'),
+      os_type=fn('os'),
+      pnode=fn('pnode', None),
+      snode=fn('snode', None),
+      iallocator=fn('iallocator', None),
+      nics=nics,
+      start=fn('start', True),
+      ip_check=fn('ip_check', True),
+      name_check=fn('name_check', True),
+      wait_for_sync=True,
+      hypervisor=fn('hypervisor', None),
+      hvparams=hvparams,
+      beparams=beparams,
+      file_storage_dir=fn('file_storage_dir', None),
+      file_driver=fn('file_driver', 'loop'),
+      dry_run=bool(self.dryRun()),
+      )
+
+    return baserlib.SubmitJob([op])
 
 
 class R_2_instances_name(baserlib.R_Generic):
   """/2/instances/[instance_name] resources.
 
   """
-  DOC_URI = "/2/instances/[instance_name]"
-
   def GET(self):
     """Send information about an instance.
 
     """
-    client = luxi.Client()
+    client = baserlib.GetClient()
     instance_name = self.items[0]
     result = client.QueryInstances(names=[instance_name], fields=I_FIELDS,
                                    use_locking=self.useLocking())
@@ -393,10 +571,26 @@ class R_2_instances_name(baserlib.R_Generic):
     """Delete an instance.
 
     """
-    op = ganeti.opcodes.OpRemoveInstance(instance_name=self.items[0],
-                                         ignore_failures=False)
-    job_id = ganeti.cli.SendJob([op])
-    return job_id
+    op = opcodes.OpRemoveInstance(instance_name=self.items[0],
+                                  ignore_failures=False,
+                                  dry_run=bool(self.dryRun()))
+    return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_info(baserlib.R_Generic):
+  """/2/instances/[instance_name]/info resource.
+
+  """
+  def GET(self):
+    """Request detailed instance information.
+
+    """
+    instance_name = self.items[0]
+    static = bool(self._checkIntVariable("static", default=0))
+
+    op = opcodes.OpQueryInstanceData(instances=[instance_name],
+                                     static=static)
+    return baserlib.SubmitJob([op])
 
 
 class R_2_instances_name_reboot(baserlib.R_Generic):
@@ -405,9 +599,6 @@ class R_2_instances_name_reboot(baserlib.R_Generic):
   Implements an instance reboot.
 
   """
-
-  DOC_URI = "/2/instances/[instance_name]/reboot"
-
   def POST(self):
     """Reboot an instance.
 
@@ -418,16 +609,13 @@ class R_2_instances_name_reboot(baserlib.R_Generic):
     instance_name = self.items[0]
     reboot_type = self.queryargs.get('type',
                                      [constants.INSTANCE_REBOOT_HARD])[0]
-    ignore_secondaries = bool(self.queryargs.get('ignore_secondaries',
-                                                 [False])[0])
-    op = ganeti.opcodes.OpRebootInstance(
-        instance_name=instance_name,
-        reboot_type=reboot_type,
-        ignore_secondaries=ignore_secondaries)
+    ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
+    op = opcodes.OpRebootInstance(instance_name=instance_name,
+                                  reboot_type=reboot_type,
+                                  ignore_secondaries=ignore_secondaries,
+                                  dry_run=bool(self.dryRun()))
 
-    job_id = ganeti.cli.SendJob([op])
-
-    return job_id
+    return baserlib.SubmitJob([op])
 
 
 class R_2_instances_name_startup(baserlib.R_Generic):
@@ -436,9 +624,6 @@ class R_2_instances_name_startup(baserlib.R_Generic):
   Implements an instance startup.
 
   """
-
-  DOC_URI = "/2/instances/[instance_name]/startup"
-
   def PUT(self):
     """Startup an instance.
 
@@ -447,13 +632,12 @@ class R_2_instances_name_startup(baserlib.R_Generic):
 
     """
     instance_name = self.items[0]
-    force_startup = bool(self.queryargs.get('force', [False])[0])
-    op = ganeti.opcodes.OpStartupInstance(instance_name=instance_name,
-                                          force=force_startup)
+    force_startup = bool(self._checkIntVariable('force'))
+    op = opcodes.OpStartupInstance(instance_name=instance_name,
+                                   force=force_startup,
+                                   dry_run=bool(self.dryRun()))
 
-    job_id = ganeti.cli.SendJob([op])
-
-    return job_id
+    return baserlib.SubmitJob([op])
 
 
 class R_2_instances_name_shutdown(baserlib.R_Generic):
@@ -462,25 +646,113 @@ class R_2_instances_name_shutdown(baserlib.R_Generic):
   Implements an instance shutdown.
 
   """
+  def PUT(self):
+    """Shutdown an instance.
+
+    """
+    instance_name = self.items[0]
+    op = opcodes.OpShutdownInstance(instance_name=instance_name,
+                                    dry_run=bool(self.dryRun()))
+
+    return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_reinstall(baserlib.R_Generic):
+  """/2/instances/[instance_name]/reinstall resource.
+
+  Implements an instance reinstall.
+
+  """
+  def POST(self):
+    """Reinstall an instance.
+
+    The URI takes os=name and nostartup=[0|1] optional
+    parameters. By default, the instance will be started
+    automatically.
+
+    """
+    instance_name = self.items[0]
+    ostype = self._checkStringVariable('os')
+    nostartup = self._checkIntVariable('nostartup')
+    ops = [
+      opcodes.OpShutdownInstance(instance_name=instance_name),
+      opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype),
+      ]
+    if not nostartup:
+      ops.append(opcodes.OpStartupInstance(instance_name=instance_name,
+                                           force=False))
+    return baserlib.SubmitJob(ops)
+
+
+class R_2_instances_name_replace_disks(baserlib.R_Generic):
+  """/2/instances/[instance_name]/replace-disks resource.
+
+  """
+  def POST(self):
+    """Replaces disks on an instance.
+
+    """
+    instance_name = self.items[0]
+    remote_node = self._checkStringVariable("remote_node", default=None)
+    mode = self._checkStringVariable("mode", default=None)
+    raw_disks = self._checkStringVariable("disks", default=None)
+    iallocator = self._checkStringVariable("iallocator", default=None)
+
+    if raw_disks:
+      try:
+        disks = [int(part) for part in raw_disks.split(",")]
+      except ValueError, err:
+        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
+    else:
+      disks = []
+
+    op = opcodes.OpReplaceDisks(instance_name=instance_name,
+                                remote_node=remote_node,
+                                mode=mode,
+                                disks=disks,
+                                iallocator=iallocator)
+
+    return baserlib.SubmitJob([op])
 
-  DOC_URI = "/2/instances/[instance_name]/shutdown"
 
+class R_2_instances_name_activate_disks(baserlib.R_Generic):
+  """/2/instances/[instance_name]/activate-disks resource.
+
+  """
   def PUT(self):
-    """Shutdown an instance.
+    """Activate disks for an instance.
+
+    The URI might contain ignore_size to ignore current recorded size.
+
+    """
+    instance_name = self.items[0]
+    ignore_size = bool(self._checkIntVariable('ignore_size'))
+
+    op = opcodes.OpActivateInstanceDisks(instance_name=instance_name,
+                                         ignore_size=ignore_size)
+
+    return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
+  """/2/instances/[instance_name]/deactivate-disks resource.
+
+  """
+  def PUT(self):
+    """Deactivate disks for an instance.
 
     """
     instance_name = self.items[0]
-    op = ganeti.opcodes.OpShutdownInstance(instance_name=instance_name)
 
-    job_id = ganeti.cli.SendJob([op])
+    op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name)
 
-    return job_id
+    return baserlib.SubmitJob([op])
 
 
 class _R_Tags(baserlib.R_Generic):
   """ Quasiclass for tagging resources
 
-  Manages tags. Inheriting this class you suppose to define DOC_URI and
+  Manages tags. When inheriting this class you must define the
   TAG_LEVEL for it.
 
   """
@@ -505,6 +777,7 @@ class _R_Tags(baserlib.R_Generic):
     Example: ["tag1", "tag2", "tag3"]
 
     """
+    # pylint: disable-msg=W0212
     return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
 
   def PUT(self):
@@ -514,11 +787,13 @@ class _R_Tags(baserlib.R_Generic):
     you'll have back a job id.
 
     """
+    # pylint: disable-msg=W0212
     if 'tag' not in self.queryargs:
       raise http.HttpBadRequest("Please specify tag(s) to add using the"
                                 " the 'tag' parameter")
     return baserlib._Tags_PUT(self.TAG_LEVEL,
-                              self.queryargs['tag'], name=self.name)
+                              self.queryargs['tag'], name=self.name,
+                              dry_run=bool(self.dryRun()))
 
   def DELETE(self):
     """Delete a tag.
@@ -528,13 +803,15 @@ class _R_Tags(baserlib.R_Generic):
     /tags?tag=[tag]&tag=[tag]
 
     """
+    # pylint: disable-msg=W0212
     if 'tag' not in self.queryargs:
       # no we not gonna delete all tags
       raise http.HttpBadRequest("Cannot delete all tags - please specify"
                                 " tag(s) using the 'tag' parameter")
     return baserlib._Tags_DELETE(self.TAG_LEVEL,
                                  self.queryargs['tag'],
-                                 name=self.name)
+                                 name=self.name,
+                                 dry_run=bool(self.dryRun()))
 
 
 class R_2_instances_name_tags(_R_Tags):
@@ -543,7 +820,6 @@ class R_2_instances_name_tags(_R_Tags):
   Manages per-instance tags.
 
   """
-  DOC_URI = "/2/instances/[instance_name]/tags"
   TAG_LEVEL = constants.TAG_INSTANCE
 
 
@@ -553,7 +829,6 @@ class R_2_nodes_name_tags(_R_Tags):
   Manages per-node tags.
 
   """
-  DOC_URI = "/2/nodes/[node_name]/tags"
   TAG_LEVEL = constants.TAG_NODE
 
 
@@ -563,5 +838,4 @@ class R_2_tags(_R_Tags):
   Manages cluster tags.
 
   """
-  DOC_URI = "/2/tags"
   TAG_LEVEL = constants.TAG_CLUSTER