Revision a295eb80
b/lib/cmdlib/instance.py | ||
---|---|---|
1158 | 1158 |
# memory check on primary node |
1159 | 1159 |
#TODO(dynmem): use MINMEM for checking |
1160 | 1160 |
if self.op.start: |
1161 |
hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}), |
|
1162 |
self.op.hvparams) |
|
1161 | 1163 |
CheckNodeFreeMemory(self, self.pnode.name, |
1162 | 1164 |
"creating instance %s" % self.op.instance_name, |
1163 | 1165 |
self.be_full[constants.BE_MAXMEM], |
1164 |
self.op.hypervisor) |
|
1166 |
self.op.hypervisor, hvfull)
|
|
1165 | 1167 |
|
1166 | 1168 |
self.dry_run_result = list(nodenames) |
1167 | 1169 |
|
... | ... | |
1692 | 1694 |
|
1693 | 1695 |
if instance.admin_state == constants.ADMINST_UP: |
1694 | 1696 |
# check memory requirements on the secondary node |
1695 |
CheckNodeFreeMemory(self, target_node,
|
|
1696 |
"failing over instance %s" %
|
|
1697 |
instance.name, bep[constants.BE_MAXMEM],
|
|
1698 |
instance.hypervisor)
|
|
1697 |
CheckNodeFreeMemory( |
|
1698 |
self, target_node, "failing over instance %s" %
|
|
1699 |
instance.name, bep[constants.BE_MAXMEM], instance.hypervisor,
|
|
1700 |
self.cfg.GetClusterInfo().hvparams[instance.hypervisor])
|
|
1699 | 1701 |
else: |
1700 | 1702 |
self.LogInfo("Not checking memory on the secondary node as" |
1701 | 1703 |
" instance will not be started") |
... | ... | |
1980 | 1982 |
return [(op, idx, params, fn()) for (op, idx, params) in mods] |
1981 | 1983 |
|
1982 | 1984 |
|
1983 |
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
|
|
1985 |
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_specs):
|
|
1984 | 1986 |
"""Checks if nodes have enough physical CPUs |
1985 | 1987 |
|
1986 | 1988 |
This function checks if all given nodes have the needed number of |
... | ... | |
1994 | 1996 |
@param nodenames: the list of node names to check |
1995 | 1997 |
@type requested: C{int} |
1996 | 1998 |
@param requested: the minimum acceptable number of physical CPUs |
1999 |
@type hypervisor_specs: list of pairs (string, dict of strings) |
|
2000 |
@param hypervisor_specs: list of hypervisor specifications in |
|
2001 |
pairs (hypervisor_name, hvparams) |
|
1997 | 2002 |
@raise errors.OpPrereqError: if the node doesn't have enough CPUs, |
1998 | 2003 |
or we cannot check the node |
1999 | 2004 |
|
2000 | 2005 |
""" |
2001 |
nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
|
|
2006 |
nodeinfo = lu.rpc.call_node_info(nodenames, None, hypervisor_specs, None)
|
|
2002 | 2007 |
for node in nodenames: |
2003 | 2008 |
info = nodeinfo[node] |
2004 | 2009 |
info.Raise("Cannot get current information from node %s" % node, |
... | ... | |
2793 | 2798 |
max_requested_cpu = max(map(max, cpu_list)) |
2794 | 2799 |
# Check that all of the instance's nodes have enough physical CPUs to |
2795 | 2800 |
# satisfy the requested CPU mask |
2801 |
hvspecs = [(instance.hypervisor, |
|
2802 |
self.cfg.GetClusterInfo().hvparams[instance.hypervisor])] |
|
2796 | 2803 |
_CheckNodesPhysicalCPUs(self, instance.all_nodes, |
2797 |
max_requested_cpu + 1, instance.hypervisor) |
|
2804 |
max_requested_cpu + 1, |
|
2805 |
hvspecs) |
|
2798 | 2806 |
|
2799 | 2807 |
# osparams processing |
2800 | 2808 |
if self.op.osparams: |
... | ... | |
2811 | 2819 |
if be_new[constants.BE_AUTO_BALANCE]: |
2812 | 2820 |
# either we changed auto_balance to yes or it was from before |
2813 | 2821 |
mem_check_list.extend(instance.secondary_nodes) |
2814 |
instance_info = self.rpc.call_instance_info(pnode, instance.name, |
|
2815 |
instance.hypervisor) |
|
2822 |
instance_info = self.rpc.call_instance_info( |
|
2823 |
pnode, instance.name, instance.hypervisor, |
|
2824 |
cluster.hvparams[instance.hypervisor]) |
|
2825 |
hvspecs = [(instance.hypervisor, cluster.hvparams[instance.hypervisor])] |
|
2816 | 2826 |
nodeinfo = self.rpc.call_node_info(mem_check_list, None, |
2817 |
[instance.hypervisor], False)
|
|
2827 |
hvspecs, False)
|
|
2818 | 2828 |
pninfo = nodeinfo[pnode] |
2819 | 2829 |
msg = pninfo.fail_msg |
2820 | 2830 |
if msg: |
... | ... | |
2888 | 2898 |
|
2889 | 2899 |
delta = self.op.runtime_mem - current_memory |
2890 | 2900 |
if delta > 0: |
2891 |
CheckNodeFreeMemory(self, instance.primary_node, |
|
2892 |
"ballooning memory for instance %s" % |
|
2893 |
instance.name, delta, instance.hypervisor) |
|
2901 |
CheckNodeFreeMemory( |
|
2902 |
self, instance.primary_node, "ballooning memory for instance %s" % |
|
2903 |
instance.name, delta, instance.hypervisor, |
|
2904 |
self.cfg.GetClusterInfo().hvparams[instance.hypervisor]) |
|
2894 | 2905 |
|
2895 | 2906 |
def _PrepareNicCreate(_, params, private): |
2896 | 2907 |
self._PrepareNicModification(params, private, None, None, |
b/lib/cmdlib/instance_migration.py | ||
---|---|---|
370 | 370 |
# check memory requirements on the secondary node |
371 | 371 |
if (not self.cleanup and |
372 | 372 |
(not self.failover or instance.admin_state == constants.ADMINST_UP)): |
373 |
self.tgt_free_mem = CheckNodeFreeMemory(self.lu, target_node, |
|
374 |
"migrating instance %s" % |
|
375 |
instance.name, |
|
376 |
i_be[constants.BE_MINMEM], |
|
377 |
instance.hypervisor) |
|
373 |
self.tgt_free_mem = CheckNodeFreeMemory( |
|
374 |
self.lu, target_node, "migrating instance %s" % instance.name, |
|
375 |
i_be[constants.BE_MINMEM], instance.hypervisor, |
|
376 |
self.cfg.GetClusterInfo().hvparams[instance.hypervisor]) |
|
378 | 377 |
else: |
379 | 378 |
self.lu.LogInfo("Not checking memory on the secondary node as" |
380 | 379 |
" instance will not be started") |
... | ... | |
654 | 653 |
source_node = self.source_node |
655 | 654 |
|
656 | 655 |
# Check for hypervisor version mismatch and warn the user. |
656 |
hvspecs = [(instance.hypervisor, |
|
657 |
self.cfg.GetClusterInfo().hvparams[instance.hypervisor])] |
|
657 | 658 |
nodeinfo = self.rpc.call_node_info([source_node, target_node], |
658 |
None, [self.instance.hypervisor], False)
|
|
659 |
None, hvspecs, False)
|
|
659 | 660 |
for ninfo in nodeinfo.values(): |
660 | 661 |
ninfo.Raise("Unable to retrieve node information from node '%s'" % |
661 | 662 |
ninfo.node) |
b/lib/cmdlib/instance_operation.py | ||
---|---|---|
133 | 133 |
remote_info.Raise("Error checking node %s" % instance.primary_node, |
134 | 134 |
prereq=True, ecode=errors.ECODE_ENVIRON) |
135 | 135 |
if not remote_info.payload: # not running already |
136 |
CheckNodeFreeMemory(self, instance.primary_node, |
|
137 |
"starting instance %s" % instance.name, |
|
138 |
bep[constants.BE_MINMEM], instance.hypervisor) |
|
136 |
CheckNodeFreeMemory( |
|
137 |
self, instance.primary_node, "starting instance %s" % instance.name, |
|
138 |
bep[constants.BE_MINMEM], instance.hypervisor, |
|
139 |
self.cfg.GetClusterInfo().hvparams[instance.hypervisor]) |
|
139 | 140 |
|
140 | 141 |
def Exec(self, feedback_fn): |
141 | 142 |
"""Start the instance. |
b/lib/cmdlib/instance_utils.py | ||
---|---|---|
450 | 450 |
return "originstname+%s" % instance.name |
451 | 451 |
|
452 | 452 |
|
453 |
def CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
|
|
453 |
def CheckNodeFreeMemory(lu, node, reason, requested, hvname, hvparams):
|
|
454 | 454 |
"""Checks if a node has enough free memory. |
455 | 455 |
|
456 | 456 |
This function checks if a given node has the needed amount of free |
... | ... | |
466 | 466 |
@param reason: string to use in the error message |
467 | 467 |
@type requested: C{int} |
468 | 468 |
@param requested: the amount of memory in MiB to check for |
469 |
@type hypervisor_name: C{str} |
|
470 |
@param hypervisor_name: the hypervisor to ask for memory stats |
|
469 |
@type hvname: string |
|
470 |
@param hvname: the hypervisor's name |
|
471 |
@type hvparams: dict of strings |
|
472 |
@param hvparams: the hypervisor's parameters |
|
471 | 473 |
@rtype: integer |
472 | 474 |
@return: node current free memory |
473 | 475 |
@raise errors.OpPrereqError: if the node doesn't have enough memory, or |
474 | 476 |
we cannot check the node |
475 | 477 |
|
476 | 478 |
""" |
477 |
nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name], False)
|
|
479 |
nodeinfo = lu.rpc.call_node_info([node], None, [(hvname, hvparams)], False)
|
|
478 | 480 |
nodeinfo[node].Raise("Can't get data from node %s" % node, |
479 | 481 |
prereq=True, ecode=errors.ECODE_ENVIRON) |
480 | 482 |
(_, _, (hv_info, )) = nodeinfo[node].payload |
b/lib/cmdlib/node.py | ||
---|---|---|
1176 | 1176 |
# FIXME: This currently maps everything to lvm, this should be more |
1177 | 1177 |
# flexible |
1178 | 1178 |
vg_req = rpc.BuildVgInfoQuery(lu.cfg) |
1179 |
default_hypervisor = lu.cfg.GetHypervisorType() |
|
1180 |
hvparams = lu.cfg.GetClusterInfo().hvparams[default_hypervisor] |
|
1181 |
hvspecs = [(default_hypervisor, hvparams)] |
|
1179 | 1182 |
node_data = lu.rpc.call_node_info(toquery_nodes, vg_req, |
1180 |
[lu.cfg.GetHypervisorType()], es_flags)
|
|
1183 |
hvspecs, es_flags)
|
|
1181 | 1184 |
live_data = dict((name, rpc.MakeLegacyNodeInfo(nresult.payload)) |
1182 | 1185 |
for (name, nresult) in node_data.items() |
1183 | 1186 |
if not nresult.fail_msg and nresult.payload) |
b/lib/masterd/iallocator.py | ||
---|---|---|
433 | 433 |
es_flags = rpc.GetExclusiveStorageForNodeNames(cfg, node_list) |
434 | 434 |
vg_req = rpc.BuildVgInfoQuery(cfg) |
435 | 435 |
has_lvm = bool(vg_req) |
436 |
hvspecs = [(hypervisor_name, cluster_info.hvparams[hypervisor_name])] |
|
436 | 437 |
node_data = self.rpc.call_node_info(node_list, vg_req, |
437 |
[hypervisor_name], es_flags)
|
|
438 |
hvspecs, es_flags)
|
|
438 | 439 |
node_iinfo = \ |
439 | 440 |
self.rpc.call_all_instances_info(node_list, |
440 | 441 |
cluster_info.enabled_hypervisors) |
Also available in: Unified diff