Revision 1a3c5d4e
b/htools/Ganeti/Rpc.hs | ||
---|---|---|
374 | 374 |
rpcCallData _ call = J.encode |
375 | 375 |
( rpcCallNodeInfoVolumeGroups call |
376 | 376 |
, rpcCallNodeInfoHypervisors call |
377 |
, False |
|
377 | 378 |
) |
378 | 379 |
|
379 | 380 |
instance Rpc RpcCallNodeInfo RpcResultNodeInfo where |
b/lib/backend.py | ||
---|---|---|
541 | 541 |
raise errors.QuitGanetiException(True, "Shutdown scheduled") |
542 | 542 |
|
543 | 543 |
|
544 |
def _GetVgInfo(name): |
|
544 |
def _GetVgInfo(name, excl_stor):
|
|
545 | 545 |
"""Retrieves information about a LVM volume group. |
546 | 546 |
|
547 | 547 |
""" |
548 | 548 |
# TODO: GetVGInfo supports returning information for multiple VGs at once |
549 |
vginfo = bdev.LogicalVolume.GetVGInfo([name]) |
|
549 |
vginfo = bdev.LogicalVolume.GetVGInfo([name], excl_stor)
|
|
550 | 550 |
if vginfo: |
551 | 551 |
vg_free = int(round(vginfo[0][0], 0)) |
552 | 552 |
vg_size = int(round(vginfo[0][1], 0)) |
... | ... | |
589 | 589 |
return map(fn, names) |
590 | 590 |
|
591 | 591 |
|
592 |
def GetNodeInfo(vg_names, hv_names): |
|
592 |
def GetNodeInfo(vg_names, hv_names, excl_stor):
|
|
593 | 593 |
"""Gives back a hash with different information about the node. |
594 | 594 |
|
595 | 595 |
@type vg_names: list of string |
596 | 596 |
@param vg_names: Names of the volume groups to ask for disk space information |
597 | 597 |
@type hv_names: list of string |
598 | 598 |
@param hv_names: Names of the hypervisors to ask for node information |
599 |
@type excl_stor: boolean |
|
600 |
@param excl_stor: Whether exclusive_storage is active |
|
599 | 601 |
@rtype: tuple; (string, None/dict, None/dict) |
600 | 602 |
@return: Tuple containing boot ID, volume group information and hypervisor |
601 | 603 |
information |
602 | 604 |
|
603 | 605 |
""" |
604 | 606 |
bootid = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n") |
605 |
vg_info = _GetNamedNodeInfo(vg_names, _GetVgInfo)
|
|
607 |
vg_info = _GetNamedNodeInfo(vg_names, (lambda vg: _GetVgInfo(vg, excl_stor)))
|
|
606 | 608 |
hv_info = _GetNamedNodeInfo(hv_names, _GetHvInfo) |
607 | 609 |
|
608 | 610 |
return (bootid, vg_info, hv_info) |
b/lib/bdev.py | ||
---|---|---|
699 | 699 |
return data |
700 | 700 |
|
701 | 701 |
@classmethod |
702 |
def GetVGInfo(cls, vg_names, filter_readonly=True): |
|
702 |
def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True):
|
|
703 | 703 |
"""Get the free space info for specific VGs. |
704 | 704 |
|
705 | 705 |
@param vg_names: list of volume group names, if empty all will be returned |
706 |
@param excl_stor: whether exclusive_storage is enabled |
|
706 | 707 |
@param filter_readonly: whether to skip over readonly VGs |
707 | 708 |
|
708 | 709 |
@rtype: list |
... | ... | |
921 | 922 |
snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params) |
922 | 923 |
_IgnoreError(snap.Remove) |
923 | 924 |
|
924 |
vg_info = self.GetVGInfo([self._vg_name]) |
|
925 |
vg_info = self.GetVGInfo([self._vg_name], False)
|
|
925 | 926 |
if not vg_info: |
926 | 927 |
_ThrowError("Can't compute VG info for vg %s", self._vg_name) |
927 | 928 |
free_size, _, _ = vg_info[0] |
b/lib/cmdlib.py | ||
---|---|---|
5289 | 5289 |
toquery_nodes = [name for name in nodenames if all_info[name].vm_capable] |
5290 | 5290 |
|
5291 | 5291 |
node_data = lu.rpc.call_node_info(toquery_nodes, [lu.cfg.GetVGName()], |
5292 |
[lu.cfg.GetHypervisorType()]) |
|
5292 |
[lu.cfg.GetHypervisorType()], False)
|
|
5293 | 5293 |
live_data = dict((name, rpc.MakeLegacyNodeInfo(nresult.payload)) |
5294 | 5294 |
for (name, nresult) in node_data.items() |
5295 | 5295 |
if not nresult.fail_msg and nresult.payload) |
... | ... | |
6827 | 6827 |
we cannot check the node |
6828 | 6828 |
|
6829 | 6829 |
""" |
6830 |
nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name]) |
|
6830 |
nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name], False)
|
|
6831 | 6831 |
nodeinfo[node].Raise("Can't get data from node %s" % node, |
6832 | 6832 |
prereq=True, ecode=errors.ECODE_ENVIRON) |
6833 | 6833 |
(_, _, (hv_info, )) = nodeinfo[node].payload |
... | ... | |
6888 | 6888 |
or we cannot check the node |
6889 | 6889 |
|
6890 | 6890 |
""" |
6891 |
nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None) |
|
6891 |
nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None, False)
|
|
6892 | 6892 |
for node in nodenames: |
6893 | 6893 |
info = nodeinfo[node] |
6894 | 6894 |
info.Raise("Cannot get current information from node %s" % node, |
... | ... | |
6924 | 6924 |
or we cannot check the node |
6925 | 6925 |
|
6926 | 6926 |
""" |
6927 |
nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name]) |
|
6927 |
nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
|
|
6928 | 6928 |
for node in nodenames: |
6929 | 6929 |
info = nodeinfo[node] |
6930 | 6930 |
info.Raise("Cannot get current information from node %s" % node, |
... | ... | |
8740 | 8740 |
|
8741 | 8741 |
# Check for hypervisor version mismatch and warn the user. |
8742 | 8742 |
nodeinfo = self.rpc.call_node_info([source_node, target_node], |
8743 |
None, [self.instance.hypervisor]) |
|
8743 |
None, [self.instance.hypervisor], False)
|
|
8744 | 8744 |
for ninfo in nodeinfo.values(): |
8745 | 8745 |
ninfo.Raise("Unable to retrieve node information from node '%s'" % |
8746 | 8746 |
ninfo.node) |
... | ... | |
13440 | 13440 |
instance_info = self.rpc.call_instance_info(pnode, instance.name, |
13441 | 13441 |
instance.hypervisor) |
13442 | 13442 |
nodeinfo = self.rpc.call_node_info(mem_check_list, None, |
13443 |
[instance.hypervisor]) |
|
13443 |
[instance.hypervisor], False)
|
|
13444 | 13444 |
pninfo = nodeinfo[pnode] |
13445 | 13445 |
msg = pninfo.fail_msg |
13446 | 13446 |
if msg: |
b/lib/masterd/iallocator.py | ||
---|---|---|
431 | 431 |
node_whitelist = None |
432 | 432 |
|
433 | 433 |
node_data = self.rpc.call_node_info(node_list, [cfg.GetVGName()], |
434 |
[hypervisor_name]) |
|
434 |
[hypervisor_name], False)
|
|
435 | 435 |
node_iinfo = \ |
436 | 436 |
self.rpc.call_all_instances_info(node_list, |
437 | 437 |
cluster_info.enabled_hypervisors) |
b/lib/rpc_defs.py | ||
---|---|---|
131 | 131 |
return result |
132 | 132 |
|
133 | 133 |
|
134 |
def _NodeInfoPreProc(node, args): |
|
135 |
"""Prepare the exclusive_storage argument for node_info calls.""" |
|
136 |
assert len(args) == 3 |
|
137 |
# The third argument is either a dictionary with one value for each node, or |
|
138 |
# a fixed value to be used for all the nodes |
|
139 |
if type(args[2]) is dict: |
|
140 |
return [args[0], args[1], args[2][node]] |
|
141 |
else: |
|
142 |
return args |
|
143 |
|
|
144 |
|
|
134 | 145 |
def _OsGetPostProc(result): |
135 | 146 |
"""Post-processor for L{rpc.RpcRunner.call_os_get}. |
136 | 147 |
|
... | ... | |
449 | 460 |
"Names of the volume groups to ask for disk space information"), |
450 | 461 |
("hv_names", None, |
451 | 462 |
"Names of the hypervisors to ask for node information"), |
452 |
], None, None, "Return node information"), |
|
463 |
("exclusive_storage", None, |
|
464 |
"Whether exclusive storage is enabled"), |
|
465 |
], _NodeInfoPreProc, None, "Return node information"), |
|
453 | 466 |
("node_verify", MULTI, None, constants.RPC_TMO_NORMAL, [ |
454 | 467 |
("checkdict", None, None), |
455 | 468 |
("cluster_name", None, None), |
b/lib/server/noded.py | ||
---|---|---|
687 | 687 |
"""Query node information. |
688 | 688 |
|
689 | 689 |
""" |
690 |
(vg_names, hv_names) = params |
|
691 |
return backend.GetNodeInfo(vg_names, hv_names) |
|
690 |
(vg_names, hv_names, excl_stor) = params
|
|
691 |
return backend.GetNodeInfo(vg_names, hv_names, excl_stor)
|
|
692 | 692 |
|
693 | 693 |
@staticmethod |
694 | 694 |
def perspective_etc_hosts_modify(params): |
Also available in: Unified diff