Merge 'EvacNode' and 'NodeEvacMode'
[ganeti-local] / lib / masterd / iallocator.py
index e2e7901..e380db6 100644 (file)
@@ -58,6 +58,7 @@ _NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
                         ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))
 
 _INST_NAME = ("name", ht.TNonEmptyString)
+_INST_UUID = ("inst_uuid", ht.TNonEmptyString)
 
 
 class _AutoReqParam(outils.AutoSlots):
@@ -233,7 +234,7 @@ class IAReqRelocate(IARequestBase):
   # pylint: disable=E1101
   MODE = constants.IALLOCATOR_MODE_RELOC
   REQ_PARAMS = [
-    _INST_NAME,
+    _INST_UUID,
     ("relocate_from_node_uuids", _STRING_LIST),
     ]
   REQ_RESULT = ht.TList
@@ -245,10 +246,10 @@ class IAReqRelocate(IARequestBase):
     done.
 
     """
-    instance = cfg.GetInstanceInfo(self.name)
+    instance = cfg.GetInstanceInfo(self.inst_uuid)
     if instance is None:
       raise errors.ProgrammerError("Unknown instance '%s' passed to"
-                                   " IAllocator" % self.name)
+                                   " IAllocator" % self.inst_uuid)
 
     if instance.disk_template not in constants.DTS_MIRRORED:
       raise errors.OpPrereqError("Can't relocate non-mirrored instances",
@@ -263,7 +264,7 @@ class IAReqRelocate(IARequestBase):
     disk_space = gmi.ComputeDiskSize(instance.disk_template, disk_sizes)
 
     return {
-      "name": self.name,
+      "name": instance.name,
       "disk_space_total": disk_space,
       "required_nodes": 1,
       "relocate_from": cfg.GetNodeNames(self.relocate_from_node_uuids),
@@ -281,7 +282,7 @@ class IAReqRelocate(IARequestBase):
     fn = compat.partial(self._NodesToGroups, node2group,
                         ia.in_data["nodegroups"])
 
-    instance = ia.cfg.GetInstanceInfo(self.name)
+    instance = ia.cfg.GetInstanceInfo(self.inst_uuid)
     request_groups = fn(ia.cfg.GetNodeNames(self.relocate_from_node_uuids) +
                         ia.cfg.GetNodeNames([instance.primary_node]))
     result_groups = fn(result + ia.cfg.GetNodeNames([instance.primary_node]))
@@ -335,7 +336,7 @@ class IAReqNodeEvac(IARequestBase):
   MODE = constants.IALLOCATOR_MODE_NODE_EVAC
   REQ_PARAMS = [
     ("instances", _STRING_LIST),
-    ("evac_mode", ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)),
+    ("evac_mode", ht.TEvacMode),
     ]
   REQ_RESULT = _NEVAC_RESULT
 
@@ -398,6 +399,27 @@ class IAllocator(object):
 
     self._BuildInputData(req)
 
+  def _ComputeClusterDataNodeInfo(self, node_list, cluster_info,
+                                   hypervisor_name):
+    """Prepare and execute node info call.
+
+    @type node_list: list of strings
+    @param node_list: list of nodes' UUIDs
+    @type cluster_info: L{objects.Cluster}
+    @param cluster_info: the cluster's information from the config
+    @type hypervisor_name: string
+    @param hypervisor_name: the hypervisor name
+    @rtype: same as the result of the node info RPC call
+    @return: the result of the node info RPC call
+
+    """
+    storage_units_raw = utils.storage.GetStorageUnitsOfCluster(
+        self.cfg, include_spindles=True)
+    storage_units = rpc.PrepareStorageUnitsForNodes(self.cfg, storage_units_raw,
+                                                    node_list)
+    hvspecs = [(hypervisor_name, cluster_info.hvparams[hypervisor_name])]
+    return self.rpc.call_node_info(node_list, storage_units, hvspecs)
+
   def _ComputeClusterData(self):
     """Compute the generic allocator input data.
 
@@ -424,18 +446,16 @@ class IAllocator(object):
       hypervisor_name = self.req.hypervisor
       node_whitelist = self.req.node_whitelist
     elif isinstance(self.req, IAReqRelocate):
-      hypervisor_name = self.cfg.GetInstanceInfo(self.req.name).hypervisor
+      hypervisor_name = self.cfg.GetInstanceInfo(self.req.inst_uuid).hypervisor
       node_whitelist = None
     else:
       hypervisor_name = cluster_info.primary_hypervisor
       node_whitelist = None
 
-    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, node_list)
-    vg_req = rpc.BuildVgInfoQuery(self.cfg)
-    has_lvm = bool(vg_req)
-    hvspecs = [(hypervisor_name, cluster_info.hvparams[hypervisor_name])]
-    node_data = self.rpc.call_node_info(node_list, vg_req,
-                                        hvspecs, es_flags)
+    has_lvm = utils.storage.IsLvmEnabled(cluster_info.enabled_disk_templates)
+    node_data = self._ComputeClusterDataNodeInfo(node_list, cluster_info,
+                                                 hypervisor_name)
+
     node_iinfo = \
       self.rpc.call_all_instances_info(node_list,
                                        cluster_info.enabled_hypervisors,
@@ -499,7 +519,103 @@ class IAllocator(object):
     return node_results
 
   @staticmethod
-  def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
+  def _GetAttributeFromHypervisorNodeData(hv_info, node_name, attr):
+    """Extract an attribute from the hypervisor's node information.
+
+    This is a helper function to extract data from the hypervisor's information
+    about the node, as part of the result of a node_info query.
+
+    @type hv_info: dict of strings
+    @param hv_info: dictionary of node information from the hypervisor
+    @type node_name: string
+    @param node_name: name of the node
+    @type attr: string
+    @param attr: key of the attribute in the hv_info dictionary
+    @rtype: integer
+    @return: the value of the attribute
+    @raises errors.OpExecError: if key not in dictionary or value not
+      integer
+
+    """
+    if attr not in hv_info:
+      raise errors.OpExecError("Node '%s' didn't return attribute"
+                               " '%s'" % (node_name, attr))
+    value = hv_info[attr]
+    if not isinstance(value, int):
+      raise errors.OpExecError("Node '%s' returned invalid value"
+                               " for '%s': %s" %
+                               (node_name, attr, value))
+    return value
+
+  @staticmethod
+  def _ComputeStorageDataFromSpaceInfo(space_info, node_name, has_lvm):
+    """Extract storage data from node info.
+
+    @type space_info: see result of the RPC call node info
+    @param space_info: the storage reporting part of the result of the RPC call
+      node info
+    @type node_name: string
+    @param node_name: the node's name
+    @type has_lvm: boolean
+    @param has_lvm: whether or not LVM storage information is requested
+    @rtype: 4-tuple of integers
+    @return: tuple of storage info (total_disk, free_disk, total_spindles,
+       free_spindles)
+
+    """
+    # TODO: replace this with proper storage reporting
+    if has_lvm:
+      lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType(
+         space_info, constants.ST_LVM_VG)
+      if not lvm_vg_info:
+        raise errors.OpExecError("Node '%s' didn't return LVM vg space info."
+                                 % (node_name))
+      total_disk = lvm_vg_info["storage_size"]
+      free_disk = lvm_vg_info["storage_free"]
+      lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType(
+         space_info, constants.ST_LVM_PV)
+      if not lvm_vg_info:
+        raise errors.OpExecError("Node '%s' didn't return LVM pv space info."
+                                 % (node_name))
+      total_spindles = lvm_pv_info["storage_size"]
+      free_spindles = lvm_pv_info["storage_free"]
+    else:
+      # we didn't even ask the node for VG status, so use zeros
+      total_disk = free_disk = 0
+      total_spindles = free_spindles = 0
+    return (total_disk, free_disk, total_spindles, free_spindles)
+
+  @staticmethod
+  def _ComputeInstanceMemory(instance_list, node_instances_info, node_uuid,
+                             input_mem_free):
+    """Compute memory used by primary instances.
+
+    @rtype: tuple (int, int, int)
+    @returns: A tuple of three integers: 1. the sum of memory used by primary
+      instances on the node (including the ones that are currently down), 2.
+      the sum of memory used by primary instances of the node that are up, 3.
+      the amount of memory that is free on the node considering the current
+      usage of the instances.
+
+    """
+    i_p_mem = i_p_up_mem = 0
+    mem_free = input_mem_free
+    for iinfo, beinfo in instance_list:
+      if iinfo.primary_node == node_uuid:
+        i_p_mem += beinfo[constants.BE_MAXMEM]
+        if iinfo.name not in node_instances_info[node_uuid].payload:
+          i_used_mem = 0
+        else:
+          i_used_mem = int(node_instances_info[node_uuid]
+                           .payload[iinfo.name]["memory"])
+        i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem
+        mem_free -= max(0, i_mem_diff)
+
+        if iinfo.admin_state == constants.ADMINST_UP:
+          i_p_up_mem += beinfo[constants.BE_MAXMEM]
+    return (i_p_mem, i_p_up_mem, mem_free)
+
+  def _ComputeDynamicNodeData(self, node_cfg, node_data, node_iinfo, i_list,
                               node_results, has_lvm):
     """Compute global node data.
 
@@ -518,58 +634,32 @@ class IAllocator(object):
         nresult.Raise("Can't get data for node %s" % ninfo.name)
         node_iinfo[nuuid].Raise("Can't get node instance info from node %s" %
                                 ninfo.name)
-        remote_info = rpc.MakeLegacyNodeInfo(nresult.payload,
-                                             require_vg_info=has_lvm)
-
-        def get_attr(attr):
-          if attr not in remote_info:
-            raise errors.OpExecError("Node '%s' didn't return attribute"
-                                     " '%s'" % (ninfo.name, attr))
-          value = remote_info[attr]
-          if not isinstance(value, int):
-            raise errors.OpExecError("Node '%s' returned invalid value"
-                                     " for '%s': %s" %
-                                     (ninfo.name, attr, value))
-          return value
-
-        mem_free = get_attr("memory_free")
-
-        # compute memory used by primary instances
-        i_p_mem = i_p_up_mem = 0
-        for iinfo, beinfo in i_list:
-          if iinfo.primary_node == nuuid:
-            i_p_mem += beinfo[constants.BE_MAXMEM]
-            if iinfo.name not in node_iinfo[nuuid].payload:
-              i_used_mem = 0
-            else:
-              i_used_mem = int(node_iinfo[nuuid].payload[iinfo.name]["memory"])
-            i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem
-            mem_free -= max(0, i_mem_diff)
-
-            if iinfo.admin_state == constants.ADMINST_UP:
-              i_p_up_mem += beinfo[constants.BE_MAXMEM]
-
-        # TODO: replace this with proper storage reporting
-        if has_lvm:
-          total_disk = get_attr("vg_size")
-          free_disk = get_attr("vg_free")
-          total_spindles = get_attr("spindles_total")
-          free_spindles = get_attr("spindles_free")
-        else:
-          # we didn't even ask the node for VG status, so use zeros
-          total_disk = free_disk = 0
-          total_spindles = free_spindles = 0
+        (_, space_info, (hv_info, )) = nresult.payload
+
+        mem_free = self._GetAttributeFromHypervisorNodeData(hv_info, ninfo.name,
+                                                            "memory_free")
+
+        (i_p_mem, i_p_up_mem, mem_free) = self._ComputeInstanceMemory(
+             i_list, node_iinfo, nuuid, mem_free)
+        (total_disk, free_disk, total_spindles, free_spindles) = \
+            self._ComputeStorageDataFromSpaceInfo(space_info, ninfo.name,
+                                                  has_lvm)
 
         # compute memory used by instances
         pnr_dyn = {
-          "total_memory": get_attr("memory_total"),
-          "reserved_memory": get_attr("memory_dom0"),
+          "total_memory": self._GetAttributeFromHypervisorNodeData(
+              hv_info, ninfo.name, "memory_total"),
+          "reserved_memory": self._GetAttributeFromHypervisorNodeData(
+              hv_info, ninfo.name, "memory_dom0"),
           "free_memory": mem_free,
           "total_disk": total_disk,
           "free_disk": free_disk,
           "total_spindles": total_spindles,
           "free_spindles": free_spindles,
-          "total_cpus": get_attr("cpu_total"),
+          "total_cpus": self._GetAttributeFromHypervisorNodeData(
+              hv_info, ninfo.name, "cpu_total"),
+          "reserved_cpus": self._GetAttributeFromHypervisorNodeData(
+            hv_info, ninfo.name, "cpu_dom0"),
           "i_pri_memory": i_p_mem,
           "i_pri_up_memory": i_p_up_mem,
           }