ipolicy: Make the keys of the dict consistent
[ganeti-local] / lib / cmdlib.py
index e1df414..81f21ae 100644 (file)
@@ -734,14 +734,12 @@ def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
     if key not in constants.IPOLICY_ALL_KEYS:
       raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
                                  errors.ECODE_INVAL)
-    if key in constants.IPOLICY_PARAMETERS:
+    if key in constants.IPOLICY_ISPECS:
       utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
       ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
                                        use_none=use_none,
                                        use_default=use_default)
     else:
-      # FIXME: we assume all others are lists; this should be redone
-      # in a nicer way
       if not value or value == [constants.VALUE_DEFAULT]:
         if group_policy:
           del ipolicy[key]
@@ -750,7 +748,18 @@ def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
                                      " on the cluster'" % key,
                                      errors.ECODE_INVAL)
       else:
-        ipolicy[key] = list(value)
+        if key in constants.IPOLICY_PARAMETERS:
+          # FIXME: we assume all such values are float
+          try:
+            ipolicy[key] = float(value)
+          except (TypeError, ValueError), err:
+            raise errors.OpPrereqError("Invalid value for attribute"
+                                       " '%s': '%s', error: %s" %
+                                       (key, value, err), errors.ECODE_INVAL)
+        else:
+          # FIXME: we assume all others are lists; this should be redone
+          # in a nicer way
+          ipolicy[key] = list(value)
   try:
     objects.InstancePolicy.CheckParameterSyntax(ipolicy)
   except errors.ConfigurationError, err:
@@ -2420,14 +2429,13 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
         # we already list instances living on such nodes, and that's
         # enough warning
         continue
-      #TODO(dynmem): use MINMEM for checking
       #TODO(dynmem): also consider ballooning out other instances
       for prinode, instances in n_img.sbp.items():
         needed_mem = 0
         for instance in instances:
           bep = cluster_info.FillBE(instance_cfg[instance])
           if bep[constants.BE_AUTO_BALANCE]:
-            needed_mem += bep[constants.BE_MAXMEM]
+            needed_mem += bep[constants.BE_MINMEM]
         test = n_img.mfree < needed_mem
         self._ErrorIf(test, constants.CV_ENODEN1, node,
                       "not enough memory to accomodate instance failovers"
@@ -6398,6 +6406,8 @@ def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
   @param requested: the amount of memory in MiB to check for
   @type hypervisor_name: C{str}
   @param hypervisor_name: the hypervisor to ask for memory stats
+  @rtype: integer
+  @return: node current free memory
   @raise errors.OpPrereqError: if the node doesn't have enough memory, or
       we cannot check the node
 
@@ -6417,6 +6427,7 @@ def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
                                " needed %s MiB, available %s MiB" %
                                (node, reason, requested, free_mem),
                                errors.ECODE_NORES)
+  return free_mem
 
 
 def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
@@ -7351,6 +7362,9 @@ class LUInstanceFailover(LogicalUnit):
     self.needed_locks[locking.LEVEL_NODE] = []
     self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
+    self.needed_locks[locking.LEVEL_NODE_RES] = []
+    self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
+
     ignore_consistency = self.op.ignore_consistency
     shutdown_timeout = self.op.shutdown_timeout
     self._migrater = TLMigrateInstance(self, self.op.instance_name,
@@ -7373,6 +7387,10 @@ class LUInstanceFailover(LogicalUnit):
         del self.recalculate_locks[locking.LEVEL_NODE]
       else:
         self._LockInstancesNodes()
+    elif level == locking.LEVEL_NODE_RES:
+      # Copy node locks
+      self.needed_locks[locking.LEVEL_NODE_RES] = \
+        self.needed_locks[locking.LEVEL_NODE][:]
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -7429,11 +7447,16 @@ class LUInstanceMigrate(LogicalUnit):
     self.needed_locks[locking.LEVEL_NODE] = []
     self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
-    self._migrater = TLMigrateInstance(self, self.op.instance_name,
-                                       cleanup=self.op.cleanup,
-                                       failover=False,
-                                       fallback=self.op.allow_failover,
-                                       ignore_ipolicy=self.op.ignore_ipolicy)
+    self.needed_locks[locking.LEVEL_NODE] = []
+    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+    self._migrater = \
+      TLMigrateInstance(self, self.op.instance_name,
+                        cleanup=self.op.cleanup,
+                        failover=False,
+                        fallback=self.op.allow_failover,
+                        allow_runtime_changes=self.op.allow_runtime_changes,
+                        ignore_ipolicy=self.op.ignore_ipolicy)
     self.tasklets = [self._migrater]
 
   def DeclareLocks(self, level):
@@ -7448,6 +7471,10 @@ class LUInstanceMigrate(LogicalUnit):
         del self.recalculate_locks[locking.LEVEL_NODE]
       else:
         self._LockInstancesNodes()
+    elif level == locking.LEVEL_NODE_RES:
+      # Copy node locks
+      self.needed_locks[locking.LEVEL_NODE_RES] = \
+        self.needed_locks[locking.LEVEL_NODE][:]
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -7464,6 +7491,7 @@ class LUInstanceMigrate(LogicalUnit):
       "MIGRATE_CLEANUP": self.op.cleanup,
       "OLD_PRIMARY": source_node,
       "NEW_PRIMARY": target_node,
+      "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
       })
 
     if instance.disk_template in constants.DTS_INT_MIRROR:
@@ -7707,6 +7735,7 @@ class LUNodeMigrate(LogicalUnit):
     """
     return {
       "NODE_NAME": self.op.node_name,
+      "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
       }
 
   def BuildHooksNodes(self):
@@ -7721,12 +7750,14 @@ class LUNodeMigrate(LogicalUnit):
 
   def Exec(self, feedback_fn):
     # Prepare jobs for migration instances
+    allow_runtime_changes = self.op.allow_runtime_changes
     jobs = [
       [opcodes.OpInstanceMigrate(instance_name=inst.name,
                                  mode=self.op.mode,
                                  live=self.op.live,
                                  iallocator=self.op.iallocator,
                                  target_node=self.op.target_node,
+                                 allow_runtime_changes=allow_runtime_changes,
                                  ignore_ipolicy=self.op.ignore_ipolicy)]
       for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)
       ]
@@ -7776,6 +7807,7 @@ class TLMigrateInstance(Tasklet):
   def __init__(self, lu, instance_name, cleanup=False,
                failover=False, fallback=False,
                ignore_consistency=False,
+               allow_runtime_changes=True,
                shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT,
                ignore_ipolicy=False):
     """Initializes this class.
@@ -7792,6 +7824,7 @@ class TLMigrateInstance(Tasklet):
     self.ignore_consistency = ignore_consistency
     self.shutdown_timeout = shutdown_timeout
     self.ignore_ipolicy = ignore_ipolicy
+    self.allow_runtime_changes = allow_runtime_changes
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -7882,9 +7915,11 @@ class TLMigrateInstance(Tasklet):
     # check memory requirements on the secondary node
     if (not self.cleanup and
          (not self.failover or instance.admin_state == constants.ADMINST_UP)):
-      _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
-                           instance.name, i_be[constants.BE_MAXMEM],
-                           instance.hypervisor)
+      self.tgt_free_mem = _CheckNodeFreeMemory(self.lu, target_node,
+                                               "migrating instance %s" %
+                                               instance.name,
+                                               i_be[constants.BE_MINMEM],
+                                               instance.hypervisor)
     else:
       self.lu.LogInfo("Not checking memory on the secondary node as"
                       " instance will not be started")
@@ -7942,6 +7977,16 @@ class TLMigrateInstance(Tasklet):
       # Failover is never live
       self.live = False
 
+    if not (self.failover or self.cleanup):
+      remote_info = self.rpc.call_instance_info(instance.primary_node,
+                                                instance.name,
+                                                instance.hypervisor)
+      remote_info.Raise("Error checking instance on node %s" %
+                        instance.primary_node)
+      instance_running = bool(remote_info.payload)
+      if instance_running:
+        self.current_mem = int(remote_info.payload["memory"])
+
   def _RunAllocator(self):
     """Run the allocator based on input opcode.
 
@@ -8187,6 +8232,19 @@ class TLMigrateInstance(Tasklet):
                                  " synchronized on target node,"
                                  " aborting migration" % dev.iv_name)
 
+    if self.current_mem > self.tgt_free_mem:
+      if not self.allow_runtime_changes:
+        raise errors.OpExecError("Memory ballooning not allowed and not enough"
+                                 " free memory to fit instance %s on target"
+                                 " node %s (have %dMB, need %dMB)" %
+                                 (instance.name, target_node,
+                                  self.tgt_free_mem, self.current_mem))
+      self.feedback_fn("* setting instance memory to %s" % self.tgt_free_mem)
+      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
+                                                     instance,
+                                                     self.tgt_free_mem)
+      rpcres.Raise("Cannot modify instance runtime memory")
+
     # First get the migration information from the remote node
     result = self.rpc.call_migration_info(source_node, instance)
     msg = result.fail_msg
@@ -14334,7 +14392,7 @@ class IAllocator(object):
       "cluster_name": cfg.GetClusterName(),
       "cluster_tags": list(cluster_info.GetTags()),
       "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
-      # we don't have job IDs
+      "ipolicy": cluster_info.ipolicy,
       }
     ninfo = cfg.GetAllNodesInfo()
     iinfo = cfg.GetAllInstancesInfo().values()