cmdlib._CheckNodeFreeMemory: return free memory
[ganeti-local] / lib / cmdlib.py
index 1b9ac0e..8c46782 100644 (file)
@@ -734,14 +734,12 @@ def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
     if key not in constants.IPOLICY_ALL_KEYS:
       raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
                                  errors.ECODE_INVAL)
-    if key in constants.IPOLICY_PARAMETERS:
+    if key in constants.IPOLICY_ISPECS:
       utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
       ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
                                        use_none=use_none,
                                        use_default=use_default)
     else:
-      # FIXME: we assume all others are lists; this should be redone
-      # in a nicer way
       if not value or value == [constants.VALUE_DEFAULT]:
         if group_policy:
           del ipolicy[key]
@@ -750,7 +748,18 @@ def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
                                      " on the cluster'" % key,
                                      errors.ECODE_INVAL)
       else:
-        ipolicy[key] = list(value)
+        if key in constants.IPOLICY_PARAMETERS:
+          # FIXME: we assume all such values are float
+          try:
+            ipolicy[key] = float(value)
+          except (TypeError, ValueError), err:
+            raise errors.OpPrereqError("Invalid value for attribute"
+                                       " '%s': '%s', error: %s" %
+                                       (key, value, err), errors.ECODE_INVAL)
+        else:
+          # FIXME: we assume all others are lists; this should be redone
+          # in a nicer way
+          ipolicy[key] = list(value)
   try:
     objects.InstancePolicy.CheckParameterSyntax(ipolicy)
   except errors.ConfigurationError, err:
@@ -1196,6 +1205,19 @@ def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
       raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
 
+def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances):
+  """Computes a set of any instances that would violate the new ipolicy.
+
+  @param old_ipolicy: The current (still in-place) ipolicy
+  @param new_ipolicy: The new (to become) ipolicy
+  @param instances: List of instances to verify
+  @return: A list of instances which violates the new ipolicy but did not before
+
+  """
+  return (_ComputeViolatingInstances(old_ipolicy, instances) -
+          _ComputeViolatingInstances(new_ipolicy, instances))
+
+
 def _ExpandItemName(fn, name, kind):
   """Expand an item name.
 
@@ -1416,6 +1438,19 @@ def _CalculateGroupIPolicy(cluster, group):
   return cluster.SimpleFillIPolicy(group.ipolicy)
 
 
+def _ComputeViolatingInstances(ipolicy, instances):
+  """Computes a set of instances who violates given ipolicy.
+
+  @param ipolicy: The ipolicy to verify
+  @type instances: object.Instance
+  @param instances: List of instances to verify
+  @return: A frozenset of instance names violating the ipolicy
+
+  """
+  return frozenset([inst.name for inst in instances
+                    if _ComputeIPolicyInstanceViolation(ipolicy, inst)])
+
+
 def _CheckNicsBridgesExist(lu, target_nics, target_node):
   """Check that the brigdes needed by a list of nics exist.
 
@@ -3763,8 +3798,14 @@ class LUClusterSetParams(LogicalUnit):
     # all nodes to be modified.
     self.needed_locks = {
       locking.LEVEL_NODE: locking.ALL_SET,
+      locking.LEVEL_INSTANCE: locking.ALL_SET,
+      locking.LEVEL_NODEGROUP: locking.ALL_SET,
+    }
+    self.share_locks = {
+        locking.LEVEL_NODE: 1,
+        locking.LEVEL_INSTANCE: 1,
+        locking.LEVEL_NODEGROUP: 1,
     }
-    self.share_locks[locking.LEVEL_NODE] = 1
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -3871,6 +3912,24 @@ class LUClusterSetParams(LogicalUnit):
       self.new_ipolicy = _GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
                                             group_policy=False)
 
+      all_instances = self.cfg.GetAllInstancesInfo().values()
+      violations = set()
+      for group in self.cfg.GetAllNodeGroupsInfo().values():
+        instances = frozenset([inst for inst in all_instances
+                               if compat.any(node in group.members
+                                             for node in inst.all_nodes)])
+        new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
+        new = _ComputeNewInstanceViolations(_CalculateGroupIPolicy(cluster,
+                                                                   group),
+                                            new_ipolicy, instances)
+        if new:
+          violations.update(new)
+
+      if violations:
+        self.LogWarning("After the ipolicy change the following instances"
+                        " violate them: %s",
+                        utils.CommaJoin(violations))
+
     if self.op.nicparams:
       utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
       self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
@@ -6348,6 +6407,8 @@ def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
   @param requested: the amount of memory in MiB to check for
   @type hypervisor_name: C{str}
   @param hypervisor_name: the hypervisor to ask for memory stats
+  @rtype: integer
+  @return: node current free memory
   @raise errors.OpPrereqError: if the node doesn't have enough memory, or
       we cannot check the node
 
@@ -6367,6 +6428,7 @@ def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
                                " needed %s MiB, available %s MiB" %
                                (node, reason, requested, free_mem),
                                errors.ECODE_NORES)
+  return free_mem
 
 
 def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
@@ -6851,9 +6913,39 @@ class LUInstanceRecreateDisks(LogicalUnit):
   HTYPE = constants.HTYPE_INSTANCE
   REQ_BGL = False
 
+  _MODIFYABLE = frozenset([
+    constants.IDISK_SIZE,
+    constants.IDISK_MODE,
+    ])
+
+  # New or changed disk parameters may have different semantics
+  assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
+    constants.IDISK_ADOPT,
+
+    # TODO: Implement support changing VG while recreating
+    constants.IDISK_VG,
+    constants.IDISK_METAVG,
+    ]))
+
   def CheckArguments(self):
-    # normalise the disk list
-    self.op.disks = sorted(frozenset(self.op.disks))
+    if self.op.disks and ht.TPositiveInt(self.op.disks[0]):
+      # Normalize and convert deprecated list of disk indices
+      self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
+
+    duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
+    if duplicates:
+      raise errors.OpPrereqError("Some disks have been specified more than"
+                                 " once: %s" % utils.CommaJoin(duplicates),
+                                 errors.ECODE_INVAL)
+
+    for (idx, params) in self.op.disks:
+      utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
+      unsupported = frozenset(params.keys()) - self._MODIFYABLE
+      if unsupported:
+        raise errors.OpPrereqError("Parameters for disk %s try to change"
+                                   " unmodifyable parameter(s): %s" %
+                                   (idx, utils.CommaJoin(unsupported)),
+                                   errors.ECODE_INVAL)
 
   def ExpandNames(self):
     self._ExpandAndLockInstance()
@@ -6919,6 +7011,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
     if instance.disk_template == constants.DT_DISKLESS:
       raise errors.OpPrereqError("Instance '%s' has no disks" %
                                  self.op.instance_name, errors.ECODE_INVAL)
+
     # if we replace nodes *and* the old primary is offline, we don't
     # check
     assert instance.primary_node in self.owned_locks(locking.LEVEL_NODE)
@@ -6928,17 +7021,22 @@ class LUInstanceRecreateDisks(LogicalUnit):
       _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
                           msg="cannot recreate disks")
 
-    if not self.op.disks:
-      self.op.disks = range(len(instance.disks))
+    if self.op.disks:
+      self.disks = dict(self.op.disks)
     else:
-      for idx in self.op.disks:
-        if idx >= len(instance.disks):
-          raise errors.OpPrereqError("Invalid disk index '%s'" % idx,
-                                     errors.ECODE_INVAL)
-    if self.op.disks != range(len(instance.disks)) and self.op.nodes:
+      self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
+
+    maxidx = max(self.disks.keys())
+    if maxidx >= len(instance.disks):
+      raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
+                                 errors.ECODE_INVAL)
+
+    if (self.op.nodes and
+        sorted(self.disks.keys()) != range(len(instance.disks))):
       raise errors.OpPrereqError("Can't recreate disks partially and"
                                  " change the nodes at the same time",
                                  errors.ECODE_INVAL)
+
     self.instance = instance
 
   def Exec(self, feedback_fn):
@@ -6951,30 +7049,42 @@ class LUInstanceRecreateDisks(LogicalUnit):
             self.owned_locks(locking.LEVEL_NODE_RES))
 
     to_skip = []
-    mods = [] # keeps track of needed logical_id changes
+    mods = [] # keeps track of needed changes
 
     for idx, disk in enumerate(instance.disks):
-      if idx not in self.op.disks: # disk idx has not been passed in
+      try:
+        changes = self.disks[idx]
+      except KeyError:
+        # Disk should not be recreated
         to_skip.append(idx)
         continue
+
       # update secondaries for disks, if needed
-      if self.op.nodes:
-        if disk.dev_type == constants.LD_DRBD8:
-          # need to update the nodes and minors
-          assert len(self.op.nodes) == 2
-          assert len(disk.logical_id) == 6 # otherwise disk internals
-                                           # have changed
-          (_, _, old_port, _, _, old_secret) = disk.logical_id
-          new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name)
-          new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
-                    new_minors[0], new_minors[1], old_secret)
-          assert len(disk.logical_id) == len(new_id)
-          mods.append((idx, new_id))
+      if self.op.nodes and disk.dev_type == constants.LD_DRBD8:
+        # need to update the nodes and minors
+        assert len(self.op.nodes) == 2
+        assert len(disk.logical_id) == 6 # otherwise disk internals
+                                         # have changed
+        (_, _, old_port, _, _, old_secret) = disk.logical_id
+        new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name)
+        new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
+                  new_minors[0], new_minors[1], old_secret)
+        assert len(disk.logical_id) == len(new_id)
+      else:
+        new_id = None
+
+      mods.append((idx, new_id, changes))
 
     # now that we have passed all asserts above, we can apply the mods
     # in a single run (to avoid partial changes)
-    for idx, new_id in mods:
-      instance.disks[idx].logical_id = new_id
+    for idx, new_id, changes in mods:
+      disk = instance.disks[idx]
+      if new_id is not None:
+        assert disk.dev_type == constants.LD_DRBD8
+        disk.logical_id = new_id
+      if changes:
+        disk.Update(size=changes.get(constants.IDISK_SIZE, None),
+                    mode=changes.get(constants.IDISK_MODE, None))
 
     # change primary node, if needed
     if self.op.nodes:
@@ -13411,14 +13521,32 @@ class LUGroupSetParams(LogicalUnit):
     self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
 
     self.needed_locks = {
+      locking.LEVEL_INSTANCE: [],
       locking.LEVEL_NODEGROUP: [self.group_uuid],
       }
 
+    self.share_locks[locking.LEVEL_INSTANCE] = 1
+
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_INSTANCE:
+      assert not self.needed_locks[locking.LEVEL_INSTANCE]
+
+      # Lock instances optimistically, needs verification once group lock has
+      # been acquired
+      self.needed_locks[locking.LEVEL_INSTANCE] = \
+          self.cfg.GetNodeGroupInstances(self.group_uuid)
+
   def CheckPrereq(self):
     """Check prerequisites.
 
     """
+    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+
+    # Check if locked instances are still correct
+    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+
     self.group = self.cfg.GetNodeGroup(self.group_uuid)
+    cluster = self.cfg.GetClusterInfo()
 
     if self.group is None:
       raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
@@ -13453,6 +13581,19 @@ class LUGroupSetParams(LogicalUnit):
                                             self.op.ipolicy,
                                             group_policy=True)
 
+      new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
+      inst_filter = lambda inst: inst.name in owned_instances
+      instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
+      violations = \
+          _ComputeNewInstanceViolations(_CalculateGroupIPolicy(cluster,
+                                                               self.group),
+                                        new_ipolicy, instances)
+
+      if violations:
+        self.LogWarning("After the ipolicy change the following instances"
+                        " violate them: %s",
+                        utils.CommaJoin(violations))
+
   def BuildHooksEnv(self):
     """Build hooks env.
 
@@ -14205,7 +14346,7 @@ class IAllocator(object):
       "cluster_name": cfg.GetClusterName(),
       "cluster_tags": list(cluster_info.GetTags()),
       "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
-      # we don't have job IDs
+      "ipolicy": cluster_info.ipolicy,
       }
     ninfo = cfg.GetAllNodesInfo()
     iinfo = cfg.GetAllInstancesInfo().values()