RAPI: Allow waiting for job changes
[ganeti-local] / lib / rapi / rlib2.py
index 3e52085..2dffc52 100644 (file)
 
 """Remote API version 2 baserlib.library.
 
+  PUT or POST?
+  ============
+
+  According to RFC2616 the main difference between PUT and POST is that
+  POST can create new resources but PUT can only create the resource the
+  URI was pointing to on the PUT request.
+
+  To be in context of this module for instance creation POST on
+  /2/instances is legitim while PUT would be not, due to it does create a
+  new entity and not just replace /2/instances with it.
+
+  So when adding new methods, if they are operating on the URI entity itself,
+  PUT should be prefered over POST.
+
 """
 
+# pylint: disable-msg=C0103
+
+# C0103: Invalid name, since the R_* names are not conforming
+
 from ganeti import opcodes
 from ganeti import http
 from ganeti import constants
 from ganeti import cli
+from ganeti import rapi
 from ganeti.rapi import baserlib
 
 
-
+_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
 I_FIELDS = ["name", "admin_state", "os",
             "pnode", "snodes",
             "disk_template",
-            "nic.ips", "nic.macs", "nic.modes", "nic.links",
+            "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
             "network_port",
             "disk.sizes", "disk_usage",
             "beparams", "hvparams",
             "oper_state", "oper_ram", "status",
-            "tags"]
+            ] + _COMMON_FIELDS
 
 N_FIELDS = ["name", "offline", "master_candidate", "drained",
             "dtotal", "dfree",
             "mtotal", "mnode", "mfree",
-            "pinst_cnt", "sinst_cnt", "tags",
+            "pinst_cnt", "sinst_cnt",
             "ctotal", "cnodes", "csockets",
-            ]
+            "pip", "sip", "role",
+            "pinst_list", "sinst_list",
+            ] + _COMMON_FIELDS
+
+_NR_DRAINED = "drained"
+_NR_MASTER_CANDIATE = "master-candidate"
+_NR_MASTER = "master"
+_NR_OFFLINE = "offline"
+_NR_REGULAR = "regular"
+
+_NR_MAP = {
+  "M": _NR_MASTER,
+  "C": _NR_MASTER_CANDIATE,
+  "D": _NR_DRAINED,
+  "O": _NR_OFFLINE,
+  "R": _NR_REGULAR,
+  }
+
+# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
+_WFJC_TIMEOUT = 10
 
 
 class R_version(baserlib.R_Generic):
@@ -56,7 +94,8 @@ class R_version(baserlib.R_Generic):
   to adapt clients accordingly.
 
   """
-  def GET(self):
+  @staticmethod
+  def GET():
     """Returns the remote API version.
 
     """
@@ -67,7 +106,8 @@ class R_2_info(baserlib.R_Generic):
   """Cluster info.
 
   """
-  def GET(self):
+  @staticmethod
+  def GET():
     """Returns cluster information.
 
     """
@@ -79,7 +119,8 @@ class R_2_os(baserlib.R_Generic):
   """/2/os resource.
 
   """
-  def GET(self):
+  @staticmethod
+  def GET():
     """Return a list of all OSes.
 
     Can return error 500 in case of a problem.
@@ -88,7 +129,8 @@ class R_2_os(baserlib.R_Generic):
 
     """
     cl = baserlib.GetClient()
-    op = opcodes.OpDiagnoseOS(output_fields=["name", "valid"], names=[])
+    op = opcodes.OpDiagnoseOS(output_fields=["name", "valid", "variants"],
+                              names=[])
     job_id = baserlib.SubmitJob([op], cl)
     # we use custom feedback function, instead of print we log the status
     result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
@@ -97,14 +139,32 @@ class R_2_os(baserlib.R_Generic):
     if not isinstance(diagnose_data, list):
       raise http.HttpBadGateway(message="Can't get OS list")
 
-    return [row[0] for row in diagnose_data if row[1]]
+    os_names = []
+    for (name, valid, variants) in diagnose_data:
+      if valid:
+        os_names.extend(cli.CalculateOSNames(name, variants))
+
+    return os_names
+
+
+class R_2_redist_config(baserlib.R_Generic):
+  """/2/redistribute-config resource.
+
+  """
+  @staticmethod
+  def PUT():
+    """Redistribute configuration to all nodes.
+
+    """
+    return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
 
 
 class R_2_jobs(baserlib.R_Generic):
   """/2/jobs resource.
 
   """
-  def GET(self):
+  @staticmethod
+  def GET():
     """Returns a dictionary of jobs.
 
     @return: a dictionary with jobs id and uri.
@@ -154,6 +214,55 @@ class R_2_jobs_id(baserlib.R_Generic):
     return result
 
 
+class R_2_jobs_id_wait(baserlib.R_Generic):
+  """/2/jobs/[job_id]/wait resource.
+
+  """
+  # WaitForJobChange provides access to sensitive information and blocks
+  # machine resources (it's a blocking RAPI call), hence restricting access.
+  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+
+  def GET(self):
+    """Waits for job changes.
+
+    """
+    job_id = self.items[0]
+
+    fields = self.getBodyParameter("fields")
+    prev_job_info = self.getBodyParameter("previous_job_info", None)
+    prev_log_serial = self.getBodyParameter("previous_log_serial", None)
+
+    if not isinstance(fields, list):
+      raise http.HttpBadRequest("The 'fields' parameter should be a list")
+
+    if not (prev_job_info is None or isinstance(prev_job_info, list)):
+      raise http.HttpBadRequest("The 'previous_job_info' parameter should"
+                                " be a list")
+
+    if not (prev_log_serial is None or
+            isinstance(prev_log_serial, (int, long))):
+      raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
+                                " be a number")
+
+    client = baserlib.GetClient()
+    result = client.WaitForJobChangeOnce(job_id, fields,
+                                         prev_job_info, prev_log_serial,
+                                         timeout=_WFJC_TIMEOUT)
+    if not result:
+      raise http.HttpNotFound()
+
+    if result == constants.JOB_NOTCHANGED:
+      # No changes
+      return None
+
+    (job_info, log_entries) = result
+
+    return {
+      "job_info": job_info,
+      "log_entries": log_entries,
+      }
+
+
 class R_2_nodes(baserlib.R_Generic):
   """/2/nodes resource.
 
@@ -190,6 +299,178 @@ class R_2_nodes_name(baserlib.R_Generic):
     return baserlib.MapFields(N_FIELDS, result[0])
 
 
+class R_2_nodes_name_role(baserlib.R_Generic):
+  """ /2/nodes/[node_name]/role resource.
+
+  """
+  def GET(self):
+    """Returns the current node role.
+
+    @return: Node role
+
+    """
+    node_name = self.items[0]
+    client = baserlib.GetClient()
+    result = client.QueryNodes(names=[node_name], fields=["role"],
+                               use_locking=self.useLocking())
+
+    return _NR_MAP[result[0][0]]
+
+  def PUT(self):
+    """Sets the node role.
+
+    @return: a job id
+
+    """
+    if not isinstance(self.req.request_body, basestring):
+      raise http.HttpBadRequest("Invalid body contents, not a string")
+
+    node_name = self.items[0]
+    role = self.req.request_body
+
+    if role == _NR_REGULAR:
+      candidate = False
+      offline = False
+      drained = False
+
+    elif role == _NR_MASTER_CANDIATE:
+      candidate = True
+      offline = drained = None
+
+    elif role == _NR_DRAINED:
+      drained = True
+      candidate = offline = None
+
+    elif role == _NR_OFFLINE:
+      offline = True
+      candidate = drained = None
+
+    else:
+      raise http.HttpBadRequest("Can't set '%s' role" % role)
+
+    op = opcodes.OpSetNodeParams(node_name=node_name,
+                                 master_candidate=candidate,
+                                 offline=offline,
+                                 drained=drained,
+                                 force=bool(self.useForce()))
+
+    return baserlib.SubmitJob([op])
+
+
+class R_2_nodes_name_evacuate(baserlib.R_Generic):
+  """/2/nodes/[node_name]/evacuate resource.
+
+  """
+  def POST(self):
+    """Evacuate all secondary instances off a node.
+
+    """
+    node_name = self.items[0]
+    remote_node = self._checkStringVariable("remote_node", default=None)
+    iallocator = self._checkStringVariable("iallocator", default=None)
+
+    op = opcodes.OpEvacuateNode(node_name=node_name,
+                                remote_node=remote_node,
+                                iallocator=iallocator)
+
+    return baserlib.SubmitJob([op])
+
+
+class R_2_nodes_name_migrate(baserlib.R_Generic):
+  """/2/nodes/[node_name]/migrate resource.
+
+  """
+  def POST(self):
+    """Migrate all primary instances from a node.
+
+    """
+    node_name = self.items[0]
+    live = bool(self._checkIntVariable("live", default=1))
+
+    op = opcodes.OpMigrateNode(node_name=node_name, live=live)
+
+    return baserlib.SubmitJob([op])
+
+
+class R_2_nodes_name_storage(baserlib.R_Generic):
+  """/2/nodes/[node_name]/storage ressource.
+
+  """
+  # LUQueryNodeStorage acquires locks, hence restricting access to GET
+  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+
+  def GET(self):
+    node_name = self.items[0]
+
+    storage_type = self._checkStringVariable("storage_type", None)
+    if not storage_type:
+      raise http.HttpBadRequest("Missing the required 'storage_type'"
+                                " parameter")
+
+    output_fields = self._checkStringVariable("output_fields", None)
+    if not output_fields:
+      raise http.HttpBadRequest("Missing the required 'output_fields'"
+                                " parameter")
+
+    op = opcodes.OpQueryNodeStorage(nodes=[node_name],
+                                    storage_type=storage_type,
+                                    output_fields=output_fields.split(","))
+    return baserlib.SubmitJob([op])
+
+
+class R_2_nodes_name_storage_modify(baserlib.R_Generic):
+  """/2/nodes/[node_name]/storage/modify ressource.
+
+  """
+  def PUT(self):
+    node_name = self.items[0]
+
+    storage_type = self._checkStringVariable("storage_type", None)
+    if not storage_type:
+      raise http.HttpBadRequest("Missing the required 'storage_type'"
+                                " parameter")
+
+    name = self._checkStringVariable("name", None)
+    if not name:
+      raise http.HttpBadRequest("Missing the required 'name'"
+                                " parameter")
+
+    changes = {}
+
+    if "allocatable" in self.queryargs:
+      changes[constants.SF_ALLOCATABLE] = \
+        bool(self._checkIntVariable("allocatable", default=1))
+
+    op = opcodes.OpModifyNodeStorage(node_name=node_name,
+                                     storage_type=storage_type,
+                                     name=name,
+                                     changes=changes)
+    return baserlib.SubmitJob([op])
+
+
+class R_2_nodes_name_storage_repair(baserlib.R_Generic):
+  """/2/nodes/[node_name]/storage/repair ressource.
+
+  """
+  def PUT(self):
+    node_name = self.items[0]
+
+    storage_type = self._checkStringVariable("storage_type", None)
+    if not storage_type:
+      raise http.HttpBadRequest("Missing the required 'storage_type'"
+                                " parameter")
+
+    name = self._checkStringVariable("name", None)
+    if not name:
+      raise http.HttpBadRequest("Missing the required 'name'"
+                                " parameter")
+
+    op = opcodes.OpRepairNodeStorage(node_name=node_name,
+                                     storage_type=storage_type,
+                                     name=name)
+    return baserlib.SubmitJob([op])
+
+
 class R_2_instances(baserlib.R_Generic):
   """/2/instances resource.
 
@@ -233,7 +514,7 @@ class R_2_instances(baserlib.R_Generic):
     for idx, d in enumerate(disk_data):
       if not isinstance(d, int):
         raise http.HttpBadRequest("Disk %d specification wrong: should"
-                                  " be an integer")
+                                  " be an integer" % idx)
       disks.append({"size": d})
     # nic processing (one nic only)
     nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
@@ -244,7 +525,7 @@ class R_2_instances(baserlib.R_Generic):
     if fn("link", None) is not None:
       nics[0]["link"] = fn("link")
     if fn("bridge", None) is not None:
-       nics[0]["bridge"] = fn("bridge")
+      nics[0]["bridge"] = fn("bridge")
 
     op = opcodes.OpCreateInstance(
       mode=constants.INSTANCE_CREATE,
@@ -258,12 +539,14 @@ class R_2_instances(baserlib.R_Generic):
       nics=nics,
       start=fn('start', True),
       ip_check=fn('ip_check', True),
+      name_check=fn('name_check', True),
       wait_for_sync=True,
       hypervisor=fn('hypervisor', None),
       hvparams=hvparams,
       beparams=beparams,
       file_storage_dir=fn('file_storage_dir', None),
       file_driver=fn('file_driver', 'loop'),
+      dry_run=bool(self.dryRun()),
       )
 
     return baserlib.SubmitJob([op])
@@ -289,7 +572,24 @@ class R_2_instances_name(baserlib.R_Generic):
 
     """
     op = opcodes.OpRemoveInstance(instance_name=self.items[0],
-                                  ignore_failures=False)
+                                  ignore_failures=False,
+                                  dry_run=bool(self.dryRun()))
+    return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_info(baserlib.R_Generic):
+  """/2/instances/[instance_name]/info resource.
+
+  """
+  def GET(self):
+    """Request detailed instance information.
+
+    """
+    instance_name = self.items[0]
+    static = bool(self._checkIntVariable("static", default=0))
+
+    op = opcodes.OpQueryInstanceData(instances=[instance_name],
+                                     static=static)
     return baserlib.SubmitJob([op])
 
 
@@ -309,11 +609,11 @@ class R_2_instances_name_reboot(baserlib.R_Generic):
     instance_name = self.items[0]
     reboot_type = self.queryargs.get('type',
                                      [constants.INSTANCE_REBOOT_HARD])[0]
-    ignore_secondaries = bool(self.queryargs.get('ignore_secondaries',
-                                                 [False])[0])
+    ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
     op = opcodes.OpRebootInstance(instance_name=instance_name,
                                   reboot_type=reboot_type,
-                                  ignore_secondaries=ignore_secondaries)
+                                  ignore_secondaries=ignore_secondaries,
+                                  dry_run=bool(self.dryRun()))
 
     return baserlib.SubmitJob([op])
 
@@ -332,9 +632,10 @@ class R_2_instances_name_startup(baserlib.R_Generic):
 
     """
     instance_name = self.items[0]
-    force_startup = bool(self.queryargs.get('force', [False])[0])
+    force_startup = bool(self._checkIntVariable('force'))
     op = opcodes.OpStartupInstance(instance_name=instance_name,
-                                   force=force_startup)
+                                   force=force_startup,
+                                   dry_run=bool(self.dryRun()))
 
     return baserlib.SubmitJob([op])
 
@@ -350,7 +651,100 @@ class R_2_instances_name_shutdown(baserlib.R_Generic):
 
     """
     instance_name = self.items[0]
-    op = opcodes.OpShutdownInstance(instance_name=instance_name)
+    op = opcodes.OpShutdownInstance(instance_name=instance_name,
+                                    dry_run=bool(self.dryRun()))
+
+    return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_reinstall(baserlib.R_Generic):
+  """/2/instances/[instance_name]/reinstall resource.
+
+  Implements an instance reinstall.
+
+  """
+  def POST(self):
+    """Reinstall an instance.
+
+    The URI takes os=name and nostartup=[0|1] optional
+    parameters. By default, the instance will be started
+    automatically.
+
+    """
+    instance_name = self.items[0]
+    ostype = self._checkStringVariable('os')
+    nostartup = self._checkIntVariable('nostartup')
+    ops = [
+      opcodes.OpShutdownInstance(instance_name=instance_name),
+      opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype),
+      ]
+    if not nostartup:
+      ops.append(opcodes.OpStartupInstance(instance_name=instance_name,
+                                           force=False))
+    return baserlib.SubmitJob(ops)
+
+
+class R_2_instances_name_replace_disks(baserlib.R_Generic):
+  """/2/instances/[instance_name]/replace-disks resource.
+
+  """
+  def POST(self):
+    """Replaces disks on an instance.
+
+    """
+    instance_name = self.items[0]
+    remote_node = self._checkStringVariable("remote_node", default=None)
+    mode = self._checkStringVariable("mode", default=None)
+    raw_disks = self._checkStringVariable("disks", default=None)
+    iallocator = self._checkStringVariable("iallocator", default=None)
+
+    if raw_disks:
+      try:
+        disks = [int(part) for part in raw_disks.split(",")]
+      except ValueError, err:
+        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
+    else:
+      disks = []
+
+    op = opcodes.OpReplaceDisks(instance_name=instance_name,
+                                remote_node=remote_node,
+                                mode=mode,
+                                disks=disks,
+                                iallocator=iallocator)
+
+    return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_activate_disks(baserlib.R_Generic):
+  """/2/instances/[instance_name]/activate-disks resource.
+
+  """
+  def PUT(self):
+    """Activate disks for an instance.
+
+    The URI might contain ignore_size to ignore current recorded size.
+
+    """
+    instance_name = self.items[0]
+    ignore_size = bool(self._checkIntVariable('ignore_size'))
+
+    op = opcodes.OpActivateInstanceDisks(instance_name=instance_name,
+                                         ignore_size=ignore_size)
+
+    return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
+  """/2/instances/[instance_name]/deactivate-disks resource.
+
+  """
+  def PUT(self):
+    """Deactivate disks for an instance.
+
+    """
+    instance_name = self.items[0]
+
+    op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name)
 
     return baserlib.SubmitJob([op])
 
@@ -383,6 +777,7 @@ class _R_Tags(baserlib.R_Generic):
     Example: ["tag1", "tag2", "tag3"]
 
     """
+    # pylint: disable-msg=W0212
     return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
 
   def PUT(self):
@@ -392,11 +787,13 @@ class _R_Tags(baserlib.R_Generic):
     you'll have back a job id.
 
     """
+    # pylint: disable-msg=W0212
     if 'tag' not in self.queryargs:
       raise http.HttpBadRequest("Please specify tag(s) to add using the"
                                 " the 'tag' parameter")
     return baserlib._Tags_PUT(self.TAG_LEVEL,
-                              self.queryargs['tag'], name=self.name)
+                              self.queryargs['tag'], name=self.name,
+                              dry_run=bool(self.dryRun()))
 
   def DELETE(self):
     """Delete a tag.
@@ -406,13 +803,15 @@ class _R_Tags(baserlib.R_Generic):
     /tags?tag=[tag]&tag=[tag]
 
     """
+    # pylint: disable-msg=W0212
     if 'tag' not in self.queryargs:
       # no we not gonna delete all tags
       raise http.HttpBadRequest("Cannot delete all tags - please specify"
                                 " tag(s) using the 'tag' parameter")
     return baserlib._Tags_DELETE(self.TAG_LEVEL,
                                  self.queryargs['tag'],
-                                 name=self.name)
+                                 name=self.name,
+                                 dry_run=bool(self.dryRun()))
 
 
 class R_2_instances_name_tags(_R_Tags):