# we already list instances living on such nodes, and that's
# enough warning
continue
- #TODO(dynmem): use MINMEM for checking
#TODO(dynmem): also consider ballooning out other instances
for prinode, instances in n_img.sbp.items():
needed_mem = 0
for instance in instances:
bep = cluster_info.FillBE(instance_cfg[instance])
if bep[constants.BE_AUTO_BALANCE]:
- needed_mem += bep[constants.BE_MAXMEM]
+ needed_mem += bep[constants.BE_MINMEM]
test = n_img.mfree < needed_mem
self._ErrorIf(test, constants.CV_ENODEN1, node,
"not enough memory to accomodate instance failovers"
@param requested: the amount of memory in MiB to check for
@type hypervisor_name: C{str}
@param hypervisor_name: the hypervisor to ask for memory stats
+ @rtype: integer
+ @return: node current free memory
@raise errors.OpPrereqError: if the node doesn't have enough memory, or
we cannot check the node
" needed %s MiB, available %s MiB" %
(node, reason, requested, free_mem),
errors.ECODE_NORES)
+ return free_mem
def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+ self.needed_locks[locking.LEVEL_NODE_RES] = []
+ self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
+
ignore_consistency = self.op.ignore_consistency
shutdown_timeout = self.op.shutdown_timeout
self._migrater = TLMigrateInstance(self, self.op.instance_name,
del self.recalculate_locks[locking.LEVEL_NODE]
else:
self._LockInstancesNodes()
+ elif level == locking.LEVEL_NODE_RES:
+ # Copy node locks
+ self.needed_locks[locking.LEVEL_NODE_RES] = \
+ self.needed_locks[locking.LEVEL_NODE][:]
def BuildHooksEnv(self):
"""Build hooks env.
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
- self._migrater = TLMigrateInstance(self, self.op.instance_name,
- cleanup=self.op.cleanup,
- failover=False,
- fallback=self.op.allow_failover,
- ignore_ipolicy=self.op.ignore_ipolicy)
+ self.needed_locks[locking.LEVEL_NODE] = []
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+ self._migrater = \
+ TLMigrateInstance(self, self.op.instance_name,
+ cleanup=self.op.cleanup,
+ failover=False,
+ fallback=self.op.allow_failover,
+ allow_runtime_changes=self.op.allow_runtime_changes,
+ ignore_ipolicy=self.op.ignore_ipolicy)
self.tasklets = [self._migrater]
def DeclareLocks(self, level):
del self.recalculate_locks[locking.LEVEL_NODE]
else:
self._LockInstancesNodes()
+ elif level == locking.LEVEL_NODE_RES:
+ # Copy node locks
+ self.needed_locks[locking.LEVEL_NODE_RES] = \
+ self.needed_locks[locking.LEVEL_NODE][:]
def BuildHooksEnv(self):
"""Build hooks env.
"MIGRATE_CLEANUP": self.op.cleanup,
"OLD_PRIMARY": source_node,
"NEW_PRIMARY": target_node,
+ "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
})
if instance.disk_template in constants.DTS_INT_MIRROR:
"""
return {
"NODE_NAME": self.op.node_name,
+ "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
}
def BuildHooksNodes(self):
def Exec(self, feedback_fn):
# Prepare jobs for migration instances
+ allow_runtime_changes = self.op.allow_runtime_changes
jobs = [
[opcodes.OpInstanceMigrate(instance_name=inst.name,
mode=self.op.mode,
live=self.op.live,
iallocator=self.op.iallocator,
target_node=self.op.target_node,
+ allow_runtime_changes=allow_runtime_changes,
ignore_ipolicy=self.op.ignore_ipolicy)]
for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)
]
def __init__(self, lu, instance_name, cleanup=False,
failover=False, fallback=False,
ignore_consistency=False,
+ allow_runtime_changes=True,
shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT,
ignore_ipolicy=False):
"""Initializes this class.
self.ignore_consistency = ignore_consistency
self.shutdown_timeout = shutdown_timeout
self.ignore_ipolicy = ignore_ipolicy
+ self.allow_runtime_changes = allow_runtime_changes
def CheckPrereq(self):
"""Check prerequisites.
# check memory requirements on the secondary node
if (not self.cleanup and
(not self.failover or instance.admin_state == constants.ADMINST_UP)):
- _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
- instance.name, i_be[constants.BE_MAXMEM],
- instance.hypervisor)
+ self.tgt_free_mem = _CheckNodeFreeMemory(self.lu, target_node,
+ "migrating instance %s" %
+ instance.name,
+ i_be[constants.BE_MINMEM],
+ instance.hypervisor)
else:
self.lu.LogInfo("Not checking memory on the secondary node as"
" instance will not be started")
# Failover is never live
self.live = False
+ if not (self.failover or self.cleanup):
+ remote_info = self.rpc.call_instance_info(instance.primary_node,
+ instance.name,
+ instance.hypervisor)
+ remote_info.Raise("Error checking instance on node %s" %
+ instance.primary_node)
+ instance_running = bool(remote_info.payload)
+ if instance_running:
+ self.current_mem = int(remote_info.payload["memory"])
+
def _RunAllocator(self):
"""Run the allocator based on input opcode.
" synchronized on target node,"
" aborting migration" % dev.iv_name)
+ if self.current_mem > self.tgt_free_mem:
+ if not self.allow_runtime_changes:
+ raise errors.OpExecError("Memory ballooning not allowed and not enough"
+ " free memory to fit instance %s on target"
+ " node %s (have %dMB, need %dMB)" %
+ (instance.name, target_node,
+ self.tgt_free_mem, self.current_mem))
+ self.feedback_fn("* setting instance memory to %s" % self.tgt_free_mem)
+ rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
+ instance,
+ self.tgt_free_mem)
+ rpcres.Raise("Cannot modify instance runtime memory")
+
# First get the migration information from the remote node
result = self.rpc.call_migration_info(source_node, instance)
msg = result.fail_msg
"cluster_name": cfg.GetClusterName(),
"cluster_tags": list(cluster_info.GetTags()),
"enabled_hypervisors": list(cluster_info.enabled_hypervisors),
- # we don't have job IDs
+ "ipolicy": cluster_info.ipolicy,
}
ninfo = cfg.GetAllNodesInfo()
iinfo = cfg.GetAllInstancesInfo().values()