Rapi support for networks
[ganeti-local] / lib / rapi / connector.py
index 7820654..a833fcf 100644 (file)
 
 """
 
+# pylint: disable=C0103
+
+# C0103: Invalid name, since the R_* names are not conforming
+
 import cgi
 import re
 
-from ganeti import constants 
+from ganeti import constants
+from ganeti import http
+from ganeti import utils
 
-from ganeti.rapi import baserlib 
-from ganeti.rapi import httperror 
-from ganeti.rapi import rlib1
 from ganeti.rapi import rlib2
 
-# the connection map created at the end of this file
+
+_NAME_PATTERN = r"[\w\._-]+"
+_DISK_PATTERN = r"\d+"
+
+# the connection map is created at the end of this file
 CONNECTOR = {}
 
 
@@ -40,102 +47,166 @@ class Mapper:
   """Map resource to method.
 
   """
-  def __init__(self, connector=CONNECTOR):
+  def __init__(self, connector=None):
     """Resource mapper constructor.
 
-    Args:
-      con: a dictionary, mapping method name with URL path regexp
+    @param connector: a dictionary, mapping method name with URL path regexp
 
     """
+    if connector is None:
+      connector = CONNECTOR
     self._connector = connector
 
   def getController(self, uri):
     """Find method for a given URI.
 
-    Args:
-      uri: string with URI
+    @param uri: string with URI
 
-    Returns:
-      None if no method is found or a tuple containing the following fields:
-        methd: name of method mapped to URI
-        items: a list of variable intems in the path
-        args: a dictionary with additional parameters from URL
+    @return: None if no method is found or a tuple containing
+        the following fields:
+            - method: name of method mapped to URI
+            - items: a list of variable intems in the path
+            - args: a dictionary with additional parameters from URL
 
     """
-    if '?' in uri:
-      (path, query) = uri.split('?', 1)
+    if "?" in uri:
+      (path, query) = uri.split("?", 1)
       args = cgi.parse_qs(query)
     else:
       path = uri
       query = None
       args = {}
 
-    result = None
+    # Try to find handler for request path
+    result = utils.FindMatch(self._connector, path)
 
-    for key, handler in self._connector.iteritems():
-      # Regex objects
-      if hasattr(key, "match"):
-        m = key.match(path)
-        if m:
-          result = (handler, list(m.groups()), args)
-          break
+    if result is None:
+      raise http.HttpNotFound()
 
-      # String objects
-      elif key == path:
-        result = (handler, [], args)
-        break
+    (handler, groups) = result
 
-    if result is not None:
-      return result
-    else:
-      raise httperror.HTTPNotFound()
+    return (handler, groups, args)
 
 
-class R_root(baserlib.R_Generic):
-  """/ resource.
+def GetHandlers(node_name_pattern, instance_name_pattern,
+                group_name_pattern, network_name_pattern,
+                job_id_pattern, disk_pattern,
+                query_res_pattern):
+  """Returns all supported resources and their handlers.
 
   """
-  DOC_URI = "/"
-
-  def GET(self):
-    """Show the list of mapped resources.
-    
-    Returns:
-      A dictionary with 'name' and 'uri' keys for each of them.
-
-    """
-    root_pattern = re.compile('^R_([a-zA-Z0-9]+)$')
-
-    rootlist = []
-    for handler in CONNECTOR.values():
-      m = root_pattern.match(handler.__name__)
-      if m:
-        name = m.group(1)
-        if name != 'root':
-          rootlist.append(name)
-
-    return baserlib.BuildUriList(rootlist, "/%s")
-
-
-CONNECTOR.update({
-  "/": R_root,
-
-  "/version": rlib1.R_version,
-
-  "/tags": rlib1.R_tags,
-  "/info": rlib1.R_info,
-
-  "/nodes": rlib1.R_nodes,
-  re.compile(r'^/nodes/([\w\._-]+)$'): rlib1.R_nodes_name,
-  re.compile(r'^/nodes/([\w\._-]+)/tags$'): rlib1.R_nodes_name_tags,
-
-  "/instances": rlib1.R_instances,
-  re.compile(r'^/instances/([\w\._-]+)$'): rlib1.R_instances_name,
-  re.compile(r'^/instances/([\w\._-]+)/tags$'): rlib1.R_instances_name_tags,
-
-  "/os": rlib1.R_os,
-
-  "/2/jobs": rlib2.R_2_jobs,
-  "/2/nodes": rlib2.R_2_nodes,
-  re.compile(r'/2/jobs/(%s)$' % constants.JOB_ID_TEMPLATE): rlib2.R_2_jobs_id,
-  })
+  # Important note: New resources should always be added under /2. During a
+  # discussion in July 2010 it was decided that having per-resource versions
+  # is more flexible and future-compatible than versioning the whole remote
+  # API.
+  return {
+    "/": rlib2.R_root,
+    "/2": rlib2.R_2,
+
+    "/version": rlib2.R_version,
+
+    "/2/nodes": rlib2.R_2_nodes,
+    re.compile(r"^/2/nodes/(%s)$" % node_name_pattern):
+      rlib2.R_2_nodes_name,
+    re.compile(r"^/2/nodes/(%s)/powercycle$" % node_name_pattern):
+      rlib2.R_2_nodes_name_powercycle,
+    re.compile(r"^/2/nodes/(%s)/tags$" % node_name_pattern):
+      rlib2.R_2_nodes_name_tags,
+    re.compile(r"^/2/nodes/(%s)/role$" % node_name_pattern):
+      rlib2.R_2_nodes_name_role,
+    re.compile(r"^/2/nodes/(%s)/evacuate$" % node_name_pattern):
+      rlib2.R_2_nodes_name_evacuate,
+    re.compile(r"^/2/nodes/(%s)/migrate$" % node_name_pattern):
+      rlib2.R_2_nodes_name_migrate,
+    re.compile(r"^/2/nodes/(%s)/modify$" % node_name_pattern):
+      rlib2.R_2_nodes_name_modify,
+    re.compile(r"^/2/nodes/(%s)/storage$" % node_name_pattern):
+      rlib2.R_2_nodes_name_storage,
+    re.compile(r"^/2/nodes/(%s)/storage/modify$" % node_name_pattern):
+      rlib2.R_2_nodes_name_storage_modify,
+    re.compile(r"^/2/nodes/(%s)/storage/repair$" % node_name_pattern):
+      rlib2.R_2_nodes_name_storage_repair,
+
+    "/2/instances": rlib2.R_2_instances,
+    re.compile(r"^/2/instances/(%s)$" % instance_name_pattern):
+      rlib2.R_2_instances_name,
+    re.compile(r"^/2/instances/(%s)/info$" % instance_name_pattern):
+      rlib2.R_2_instances_name_info,
+    re.compile(r"^/2/instances/(%s)/tags$" % instance_name_pattern):
+      rlib2.R_2_instances_name_tags,
+    re.compile(r"^/2/instances/(%s)/reboot$" % instance_name_pattern):
+      rlib2.R_2_instances_name_reboot,
+    re.compile(r"^/2/instances/(%s)/reinstall$" % instance_name_pattern):
+      rlib2.R_2_instances_name_reinstall,
+    re.compile(r"^/2/instances/(%s)/replace-disks$" % instance_name_pattern):
+      rlib2.R_2_instances_name_replace_disks,
+    re.compile(r"^/2/instances/(%s)/shutdown$" % instance_name_pattern):
+      rlib2.R_2_instances_name_shutdown,
+    re.compile(r"^/2/instances/(%s)/startup$" % instance_name_pattern):
+      rlib2.R_2_instances_name_startup,
+    re.compile(r"^/2/instances/(%s)/activate-disks$" % instance_name_pattern):
+      rlib2.R_2_instances_name_activate_disks,
+    re.compile(r"^/2/instances/(%s)/deactivate-disks$" % instance_name_pattern):
+      rlib2.R_2_instances_name_deactivate_disks,
+    re.compile(r"^/2/instances/(%s)/recreate-disks$" % instance_name_pattern):
+      rlib2.R_2_instances_name_recreate_disks,
+    re.compile(r"^/2/instances/(%s)/prepare-export$" % instance_name_pattern):
+      rlib2.R_2_instances_name_prepare_export,
+    re.compile(r"^/2/instances/(%s)/export$" % instance_name_pattern):
+      rlib2.R_2_instances_name_export,
+    re.compile(r"^/2/instances/(%s)/migrate$" % instance_name_pattern):
+      rlib2.R_2_instances_name_migrate,
+    re.compile(r"^/2/instances/(%s)/failover$" % instance_name_pattern):
+      rlib2.R_2_instances_name_failover,
+    re.compile(r"^/2/instances/(%s)/rename$" % instance_name_pattern):
+      rlib2.R_2_instances_name_rename,
+    re.compile(r"^/2/instances/(%s)/modify$" % instance_name_pattern):
+      rlib2.R_2_instances_name_modify,
+    re.compile(r"^/2/instances/(%s)/disk/(%s)/grow$" %
+               (instance_name_pattern, disk_pattern)):
+      rlib2.R_2_instances_name_disk_grow,
+    re.compile(r"^/2/instances/(%s)/console$" % instance_name_pattern):
+      rlib2.R_2_instances_name_console,
+
+    "/2/networks": rlib2.R_2_networks,
+    re.compile(r"^/2/networks/(%s)$" % network_name_pattern):
+      rlib2.R_2_networks_name,
+    re.compile(r"^/2/networks/(%s)/connect$" % network_name_pattern):
+      rlib2.R_2_networks_name_connect,
+    re.compile(r"^/2/networks/(%s)/disconnect$" % network_name_pattern):
+      rlib2.R_2_networks_name_disconnect,
+
+    "/2/groups": rlib2.R_2_groups,
+    re.compile(r"^/2/groups/(%s)$" % group_name_pattern):
+      rlib2.R_2_groups_name,
+    re.compile(r"^/2/groups/(%s)/modify$" % group_name_pattern):
+      rlib2.R_2_groups_name_modify,
+    re.compile(r"^/2/groups/(%s)/rename$" % group_name_pattern):
+      rlib2.R_2_groups_name_rename,
+    re.compile(r"^/2/groups/(%s)/assign-nodes$" % group_name_pattern):
+      rlib2.R_2_groups_name_assign_nodes,
+    re.compile(r"^/2/groups/(%s)/tags$" % group_name_pattern):
+      rlib2.R_2_groups_name_tags,
+
+    "/2/jobs": rlib2.R_2_jobs,
+    re.compile(r"^/2/jobs/(%s)$" % job_id_pattern):
+      rlib2.R_2_jobs_id,
+    re.compile(r"^/2/jobs/(%s)/wait$" % job_id_pattern):
+      rlib2.R_2_jobs_id_wait,
+
+    "/2/tags": rlib2.R_2_tags,
+    "/2/info": rlib2.R_2_info,
+    "/2/os": rlib2.R_2_os,
+    "/2/redistribute-config": rlib2.R_2_redist_config,
+    "/2/features": rlib2.R_2_features,
+    "/2/modify": rlib2.R_2_cluster_modify,
+    re.compile(r"^/2/query/(%s)$" % query_res_pattern): rlib2.R_2_query,
+    re.compile(r"^/2/query/(%s)/fields$" % query_res_pattern):
+      rlib2.R_2_query_fields,
+    }
+
+
+CONNECTOR.update(GetHandlers(_NAME_PATTERN, _NAME_PATTERN,
+                             _NAME_PATTERN, _NAME_PATTERN,
+                             constants.JOB_ID_TEMPLATE, _DISK_PATTERN,
+                             _NAME_PATTERN))