573 |
573 |
return dict.fromkeys(locking.LEVELS, 1)
|
574 |
574 |
|
575 |
575 |
|
|
576 |
def _MakeLegacyNodeInfo(data):
|
|
577 |
"""Formats the data returned by L{rpc.RpcRunner.call_node_info}.
|
|
578 |
|
|
579 |
Converts the data into a single dictionary. This is fine for most use cases,
|
|
580 |
but some require information from more than one volume group or hypervisor.
|
|
581 |
|
|
582 |
"""
|
|
583 |
(bootid, (vg_info, ), (hv_info, )) = data
|
|
584 |
|
|
585 |
return utils.JoinDisjointDicts(utils.JoinDisjointDicts(vg_info, hv_info), {
|
|
586 |
"bootid": bootid,
|
|
587 |
})
|
|
588 |
|
|
589 |
|
576 |
590 |
def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups):
|
577 |
591 |
"""Checks if the owned node groups are still correct for an instance.
|
578 |
592 |
|
... | ... | |
4591 |
4605 |
# filter out non-vm_capable nodes
|
4592 |
4606 |
toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
|
4593 |
4607 |
|
4594 |
|
node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
|
4595 |
|
lu.cfg.GetHypervisorType())
|
4596 |
|
live_data = dict((name, nresult.payload)
|
|
4608 |
node_data = lu.rpc.call_node_info(toquery_nodes, [lu.cfg.GetVGName()],
|
|
4609 |
[lu.cfg.GetHypervisorType()])
|
|
4610 |
live_data = dict((name, _MakeLegacyNodeInfo(nresult.payload))
|
4597 |
4611 |
for (name, nresult) in node_data.items()
|
4598 |
4612 |
if not nresult.fail_msg and nresult.payload)
|
4599 |
4613 |
else:
|
... | ... | |
6012 |
6026 |
we cannot check the node
|
6013 |
6027 |
|
6014 |
6028 |
"""
|
6015 |
|
nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
|
|
6029 |
nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name])
|
6016 |
6030 |
nodeinfo[node].Raise("Can't get data from node %s" % node,
|
6017 |
6031 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
6018 |
|
free_mem = nodeinfo[node].payload.get("memory_free", None)
|
|
6032 |
(_, _, (hv_info, )) = nodeinfo[node].payload
|
|
6033 |
|
|
6034 |
free_mem = hv_info.get("memory_free", None)
|
6019 |
6035 |
if not isinstance(free_mem, int):
|
6020 |
6036 |
raise errors.OpPrereqError("Can't compute free memory on node %s, result"
|
6021 |
6037 |
" was '%s'" % (node, free_mem),
|
... | ... | |
6070 |
6086 |
or we cannot check the node
|
6071 |
6087 |
|
6072 |
6088 |
"""
|
6073 |
|
nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
|
|
6089 |
nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None)
|
6074 |
6090 |
for node in nodenames:
|
6075 |
6091 |
info = nodeinfo[node]
|
6076 |
6092 |
info.Raise("Cannot get current information from node %s" % node,
|
6077 |
6093 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
6078 |
|
vg_free = info.payload.get("vg_free", None)
|
|
6094 |
(_, (vg_info, ), _) = info.payload
|
|
6095 |
vg_free = vg_info.get("vg_free", None)
|
6079 |
6096 |
if not isinstance(vg_free, int):
|
6080 |
6097 |
raise errors.OpPrereqError("Can't compute free disk space on node"
|
6081 |
6098 |
" %s for vg %s, result was '%s'" %
|
... | ... | |
6105 |
6122 |
or we cannot check the node
|
6106 |
6123 |
|
6107 |
6124 |
"""
|
6108 |
|
nodeinfo = lu.rpc.call_node_info(nodenames, None, hypervisor_name)
|
|
6125 |
nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name])
|
6109 |
6126 |
for node in nodenames:
|
6110 |
6127 |
info = nodeinfo[node]
|
6111 |
6128 |
info.Raise("Cannot get current information from node %s" % node,
|
6112 |
6129 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
6113 |
|
num_cpus = info.payload.get("cpu_total", None)
|
|
6130 |
(_, _, (hv_info, )) = info.payload
|
|
6131 |
num_cpus = hv_info.get("cpu_total", None)
|
6114 |
6132 |
if not isinstance(num_cpus, int):
|
6115 |
6133 |
raise errors.OpPrereqError("Can't compute the number of physical CPUs"
|
6116 |
6134 |
" on node %s, result was '%s'" %
|
... | ... | |
7678 |
7696 |
|
7679 |
7697 |
# Check for hypervisor version mismatch and warn the user.
|
7680 |
7698 |
nodeinfo = self.rpc.call_node_info([source_node, target_node],
|
7681 |
|
None, self.instance.hypervisor)
|
7682 |
|
src_info = nodeinfo[source_node]
|
7683 |
|
dst_info = nodeinfo[target_node]
|
7684 |
|
|
7685 |
|
if ((constants.HV_NODEINFO_KEY_VERSION in src_info.payload) and
|
7686 |
|
(constants.HV_NODEINFO_KEY_VERSION in dst_info.payload)):
|
7687 |
|
src_version = src_info.payload[constants.HV_NODEINFO_KEY_VERSION]
|
7688 |
|
dst_version = dst_info.payload[constants.HV_NODEINFO_KEY_VERSION]
|
|
7699 |
None, [self.instance.hypervisor])
|
|
7700 |
for ninfo in nodeinfo.items():
|
|
7701 |
ninfo.Raise("Unable to retrieve node information from node '%s'" %
|
|
7702 |
ninfo.node)
|
|
7703 |
(_, _, (src_info, )) = nodeinfo[source_node].payload
|
|
7704 |
(_, _, (dst_info, )) = nodeinfo[target_node].payload
|
|
7705 |
|
|
7706 |
if ((constants.HV_NODEINFO_KEY_VERSION in src_info) and
|
|
7707 |
(constants.HV_NODEINFO_KEY_VERSION in dst_info)):
|
|
7708 |
src_version = src_info[constants.HV_NODEINFO_KEY_VERSION]
|
|
7709 |
dst_version = dst_info[constants.HV_NODEINFO_KEY_VERSION]
|
7689 |
7710 |
if src_version != dst_version:
|
7690 |
7711 |
self.feedback_fn("* warning: hypervisor version mismatch between"
|
7691 |
7712 |
" source (%s) and target (%s) node" %
|
... | ... | |
11377 |
11398 |
instance_info = self.rpc.call_instance_info(pnode, instance.name,
|
11378 |
11399 |
instance.hypervisor)
|
11379 |
11400 |
nodeinfo = self.rpc.call_node_info(mem_check_list, None,
|
11380 |
|
instance.hypervisor)
|
|
11401 |
[instance.hypervisor])
|
11381 |
11402 |
pninfo = nodeinfo[pnode]
|
11382 |
11403 |
msg = pninfo.fail_msg
|
11383 |
11404 |
if msg:
|
11384 |
11405 |
# Assume the primary node is unreachable and go ahead
|
11385 |
11406 |
self.warn.append("Can't get info from primary node %s: %s" %
|
11386 |
11407 |
(pnode, msg))
|
11387 |
|
elif not isinstance(pninfo.payload.get("memory_free", None), int):
|
11388 |
|
self.warn.append("Node data from primary node %s doesn't contain"
|
11389 |
|
" free memory information" % pnode)
|
11390 |
|
elif instance_info.fail_msg:
|
11391 |
|
self.warn.append("Can't get instance runtime information: %s" %
|
11392 |
|
instance_info.fail_msg)
|
11393 |
11408 |
else:
|
11394 |
|
if instance_info.payload:
|
11395 |
|
current_mem = int(instance_info.payload["memory"])
|
|
11409 |
(_, _, (pnhvinfo, )) = pninfo.payload
|
|
11410 |
if not isinstance(pnhvinfo.get("memory_free", None), int):
|
|
11411 |
self.warn.append("Node data from primary node %s doesn't contain"
|
|
11412 |
" free memory information" % pnode)
|
|
11413 |
elif instance_info.fail_msg:
|
|
11414 |
self.warn.append("Can't get instance runtime information: %s" %
|
|
11415 |
instance_info.fail_msg)
|
11396 |
11416 |
else:
|
11397 |
|
# Assume instance not running
|
11398 |
|
# (there is a slight race condition here, but it's not very probable,
|
11399 |
|
# and we have no other way to check)
|
11400 |
|
current_mem = 0
|
11401 |
|
#TODO(dynmem): do the appropriate check involving MINMEM
|
11402 |
|
miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
|
11403 |
|
pninfo.payload["memory_free"])
|
11404 |
|
if miss_mem > 0:
|
11405 |
|
raise errors.OpPrereqError("This change will prevent the instance"
|
11406 |
|
" from starting, due to %d MB of memory"
|
11407 |
|
" missing on its primary node" % miss_mem,
|
11408 |
|
errors.ECODE_NORES)
|
|
11417 |
if instance_info.payload:
|
|
11418 |
current_mem = int(instance_info.payload["memory"])
|
|
11419 |
else:
|
|
11420 |
# Assume instance not running
|
|
11421 |
# (there is a slight race condition here, but it's not very
|
|
11422 |
# probable, and we have no other way to check)
|
|
11423 |
# TODO: Describe race condition
|
|
11424 |
current_mem = 0
|
|
11425 |
#TODO(dynmem): do the appropriate check involving MINMEM
|
|
11426 |
miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
|
|
11427 |
pninfo.payload["memory_free"])
|
|
11428 |
if miss_mem > 0:
|
|
11429 |
raise errors.OpPrereqError("This change will prevent the instance"
|
|
11430 |
" from starting, due to %d MB of memory"
|
|
11431 |
" missing on its primary node" %
|
|
11432 |
miss_mem,
|
|
11433 |
errors.ECODE_NORES)
|
11409 |
11434 |
|
11410 |
11435 |
if be_new[constants.BE_AUTO_BALANCE]:
|
11411 |
11436 |
for node, nres in nodeinfo.items():
|
... | ... | |
11413 |
11438 |
continue
|
11414 |
11439 |
nres.Raise("Can't get info from secondary node %s" % node,
|
11415 |
11440 |
prereq=True, ecode=errors.ECODE_STATE)
|
11416 |
|
if not isinstance(nres.payload.get("memory_free", None), int):
|
|
11441 |
(_, _, (nhvinfo, )) = nres.payload
|
|
11442 |
if not isinstance(nhvinfo.get("memory_free", None), int):
|
11417 |
11443 |
raise errors.OpPrereqError("Secondary node %s didn't return free"
|
11418 |
11444 |
" memory information" % node,
|
11419 |
11445 |
errors.ECODE_STATE)
|
11420 |
11446 |
#TODO(dynmem): do the appropriate check involving MINMEM
|
11421 |
|
elif be_new[constants.BE_MAXMEM] > nres.payload["memory_free"]:
|
|
11447 |
elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
|
11422 |
11448 |
raise errors.OpPrereqError("This change will prevent the instance"
|
11423 |
11449 |
" from failover to its secondary node"
|
11424 |
11450 |
" %s, due to not enough memory" % node,
|
... | ... | |
13491 |
13517 |
else:
|
13492 |
13518 |
hypervisor_name = cluster_info.enabled_hypervisors[0]
|
13493 |
13519 |
|
13494 |
|
node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
|
13495 |
|
hypervisor_name)
|
|
13520 |
node_data = self.rpc.call_node_info(node_list, [cfg.GetVGName()],
|
|
13521 |
[hypervisor_name])
|
13496 |
13522 |
node_iinfo = \
|
13497 |
13523 |
self.rpc.call_all_instances_info(node_list,
|
13498 |
13524 |
cluster_info.enabled_hypervisors)
|
... | ... | |
13565 |
13591 |
nresult.Raise("Can't get data for node %s" % nname)
|
13566 |
13592 |
node_iinfo[nname].Raise("Can't get node instance info from node %s" %
|
13567 |
13593 |
nname)
|
13568 |
|
remote_info = nresult.payload
|
|
13594 |
remote_info = _MakeLegacyNodeInfo(nresult.payload)
|
13569 |
13595 |
|
13570 |
13596 |
for attr in ["memory_total", "memory_free", "memory_dom0",
|
13571 |
13597 |
"vg_size", "vg_free", "cpu_total"]:
|