from ganeti import constants
from ganeti.storage import bdev
from ganeti.storage import drbd
+from ganeti.storage import filestorage
from ganeti import objects
from ganeti import ssconf
from ganeti import serializer
raise errors.QuitGanetiException(True, "Shutdown scheduled")
-def _GetVgInfo(name, excl_stor):
+def _CheckStorageParams(params, num_params):
+ """Performs sanity checks for storage parameters.
+
+ @type params: list
+ @param params: list of storage parameters
+ @type num_params: int
+ @param num_params: expected number of parameters
+
+ """
+ if params is None:
+ raise errors.ProgrammerError("No storage parameters for storage"
+ " reporting is provided.")
+ if not isinstance(params, list):
+ raise errors.ProgrammerError("The storage parameters are not of type"
+ " list: '%s'" % params)
+ if not len(params) == num_params:
+ raise errors.ProgrammerError("Did not receive the expected number of"
+ "storage parameters: expected %s,"
+ " received '%s'" % (num_params, len(params)))
+
+
+def _CheckLvmStorageParams(params):
+ """Performs sanity check for the 'exclusive storage' flag.
+
+ @see: C{_CheckStorageParams}
+
+ """
+ _CheckStorageParams(params, 1)
+ excl_stor = params[0]
+ if not isinstance(params[0], bool):
+ raise errors.ProgrammerError("Exclusive storage parameter is not"
+ " boolean: '%s'." % excl_stor)
+ return excl_stor
+
+
+def _GetLvmVgSpaceInfo(name, params):
+ """Wrapper around C{_GetVgInfo} which checks the storage parameters.
+
+ @type name: string
+ @param name: name of the volume group
+ @type params: list
+ @param params: list of storage parameters, which in this case should be
+ containing only one for exclusive storage
+
+ """
+ excl_stor = _CheckLvmStorageParams(params)
+ return _GetVgInfo(name, excl_stor)
+
+
+def _GetVgInfo(
+ name, excl_stor, info_fn=bdev.LogicalVolume.GetVGInfo):
"""Retrieves information about a LVM volume group.
"""
# TODO: GetVGInfo supports returning information for multiple VGs at once
- vginfo = bdev.LogicalVolume.GetVGInfo([name], excl_stor)
+ vginfo = info_fn([name], excl_stor)
if vginfo:
vg_free = int(round(vginfo[0][0], 0))
vg_size = int(round(vginfo[0][1], 0))
vg_size = None
return {
+ "type": constants.ST_LVM_VG,
"name": name,
- "vg_free": vg_free,
- "vg_size": vg_size,
+ "storage_free": vg_free,
+ "storage_size": vg_size,
}
-def _GetVgSpindlesInfo(name, excl_stor):
+def _GetLvmPvSpaceInfo(name, params):
+ """Wrapper around C{_GetVgSpindlesInfo} with sanity checks.
+
+ @see: C{_GetLvmVgSpaceInfo}
+
+ """
+ excl_stor = _CheckLvmStorageParams(params)
+ return _GetVgSpindlesInfo(name, excl_stor)
+
+
+def _GetVgSpindlesInfo(
+ name, excl_stor, info_fn=bdev.LogicalVolume.GetVgSpindlesInfo):
"""Retrieves information about spindles in an LVM volume group.
@type name: string
"""
if excl_stor:
- (vg_free, vg_size) = bdev.LogicalVolume.GetVgSpindlesInfo(name)
+ (vg_free, vg_size) = info_fn(name)
else:
vg_free = 0
vg_size = 0
return {
+ "type": constants.ST_LVM_PV,
"name": name,
- "vg_free": vg_free,
- "vg_size": vg_size,
+ "storage_free": vg_free,
+ "storage_size": vg_size,
}
-def _GetHvInfo(name):
+def _GetHvInfo(name, hvparams, get_hv_fn=hypervisor.GetHypervisor):
"""Retrieves node information from a hypervisor.
The information returned depends on the hypervisor. Common items:
- memory_total is the total number of ram in MiB
- hv_version: the hypervisor version, if available
+ @type hvparams: dict of string
+ @param hvparams: the hypervisor's hvparams
+
+ """
+ return get_hv_fn(name).GetNodeInfo(hvparams=hvparams)
+
+
+def _GetHvInfoAll(hv_specs, get_hv_fn=hypervisor.GetHypervisor):
+ """Retrieves node information for all hypervisors.
+
+ See C{_GetHvInfo} for information on the output.
+
+ @type hv_specs: list of pairs (string, dict of strings)
+ @param hv_specs: list of pairs of a hypervisor's name and its hvparams
+
"""
- return hypervisor.GetHypervisor(name).GetNodeInfo()
+ if hv_specs is None:
+ return None
+
+ result = []
+ for hvname, hvparams in hv_specs:
+ result.append(_GetHvInfo(hvname, hvparams, get_hv_fn))
+ return result
def _GetNamedNodeInfo(names, fn):
return map(fn, names)
-def GetNodeInfo(storage_units, hv_names, excl_stor):
+def GetNodeInfo(storage_units, hv_specs):
"""Gives back a hash with different information about the node.
- @type storage_units: list of pairs (string, string)
- @param storage_units: List of pairs (storage unit, identifier) to ask for disk
- space information. In case of lvm-vg, the identifier is
- the VG name.
- @type hv_names: list of string
- @param hv_names: Names of the hypervisors to ask for node information
- @type excl_stor: boolean
- @param excl_stor: Whether exclusive_storage is active
+ @type storage_units: list of tuples (string, string, list)
+ @param storage_units: List of tuples (storage unit, identifier, parameters) to
+ ask for disk space information. In case of lvm-vg, the identifier is
+ the VG name. The parameters can contain additional, storage-type-specific
+ parameters, for example exclusive storage for lvm storage.
+ @type hv_specs: list of pairs (string, dict of strings)
+ @param hv_specs: list of pairs of a hypervisor's name and its hvparams
@rtype: tuple; (string, None/dict, None/dict)
@return: Tuple containing boot ID, volume group information and hypervisor
information
bootid = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n")
storage_info = _GetNamedNodeInfo(
storage_units,
- (lambda storage_unit: _ApplyStorageInfoFunction(storage_unit[0],
- storage_unit[1],
- excl_stor)))
- hv_info = _GetNamedNodeInfo(hv_names, _GetHvInfo)
-
+ (lambda (storage_type, storage_key, storage_params):
+ _ApplyStorageInfoFunction(storage_type, storage_key, storage_params)))
+ hv_info = _GetHvInfoAll(hv_specs)
return (bootid, storage_info, hv_info)
+def _GetFileStorageSpaceInfo(path, params):
+ """Wrapper around filestorage.GetSpaceInfo.
+
+ The purpose of this wrapper is to call filestorage.GetFileStorageSpaceInfo
+ and ignore the *args parameter to not leak it into the filestorage
+ module's code.
+
+ @see: C{filestorage.GetFileStorageSpaceInfo} for description of the
+ parameters.
+
+ """
+ _CheckStorageParams(params, 0)
+ return filestorage.GetFileStorageSpaceInfo(path)
+
+
# FIXME: implement storage reporting for all missing storage types.
_STORAGE_TYPE_INFO_FN = {
constants.ST_BLOCK: None,
constants.ST_DISKLESS: None,
constants.ST_EXT: None,
- constants.ST_FILE: None,
- constants.ST_LVM_PV: _GetVgSpindlesInfo,
- constants.ST_LVM_VG: _GetVgInfo,
+ constants.ST_FILE: _GetFileStorageSpaceInfo,
+ constants.ST_LVM_PV: _GetLvmPvSpaceInfo,
+ constants.ST_LVM_VG: _GetLvmVgSpaceInfo,
constants.ST_RADOS: None,
}
return res
-def VerifyNode(what, cluster_name):
+def _VerifyHypervisors(what, vm_capable, result, all_hvparams,
+ get_hv_fn=hypervisor.GetHypervisor):
+ """Verifies the hypervisor. Appends the results to the 'results' list.
+
+ @type what: C{dict}
+ @param what: a dictionary of things to check
+ @type vm_capable: boolean
+ @param vm_capable: whether or not this node is vm capable
+ @type result: dict
+ @param result: dictionary of verification results; results of the
+ verifications in this function will be added here
+ @type all_hvparams: dict of dict of string
+ @param all_hvparams: dictionary mapping hypervisor names to hvparams
+ @type get_hv_fn: function
+ @param get_hv_fn: function to retrieve the hypervisor, to improve testability
+
+ """
+ if not vm_capable:
+ return
+
+ if constants.NV_HYPERVISOR in what:
+ result[constants.NV_HYPERVISOR] = {}
+ for hv_name in what[constants.NV_HYPERVISOR]:
+ hvparams = all_hvparams[hv_name]
+ try:
+ val = get_hv_fn(hv_name).Verify(hvparams=hvparams)
+ except errors.HypervisorError, err:
+ val = "Error while checking hypervisor: %s" % str(err)
+ result[constants.NV_HYPERVISOR][hv_name] = val
+
+
+def _VerifyHvparams(what, vm_capable, result,
+ get_hv_fn=hypervisor.GetHypervisor):
+ """Verifies the hvparams. Appends the results to the 'results' list.
+
+ @type what: C{dict}
+ @param what: a dictionary of things to check
+ @type vm_capable: boolean
+ @param vm_capable: whether or not this node is vm capable
+ @type result: dict
+ @param result: dictionary of verification results; results of the
+ verifications in this function will be added here
+ @type get_hv_fn: function
+ @param get_hv_fn: function to retrieve the hypervisor, to improve testability
+
+ """
+ if not vm_capable:
+ return
+
+ if constants.NV_HVPARAMS in what:
+ result[constants.NV_HVPARAMS] = []
+ for source, hv_name, hvparms in what[constants.NV_HVPARAMS]:
+ try:
+ logging.info("Validating hv %s, %s", hv_name, hvparms)
+ get_hv_fn(hv_name).ValidateParameters(hvparms)
+ except errors.HypervisorError, err:
+ result[constants.NV_HVPARAMS].append((source, hv_name, str(err)))
+
+
+def _VerifyInstanceList(what, vm_capable, result, all_hvparams):
+ """Verifies the instance list.
+
+ @type what: C{dict}
+ @param what: a dictionary of things to check
+ @type vm_capable: boolean
+ @param vm_capable: whether or not this node is vm capable
+ @type result: dict
+ @param result: dictionary of verification results; results of the
+ verifications in this function will be added here
+ @type all_hvparams: dict of dict of string
+ @param all_hvparams: dictionary mapping hypervisor names to hvparams
+
+ """
+ if constants.NV_INSTANCELIST in what and vm_capable:
+ # GetInstanceList can fail
+ try:
+ val = GetInstanceList(what[constants.NV_INSTANCELIST],
+ all_hvparams=all_hvparams)
+ except RPCFail, err:
+ val = str(err)
+ result[constants.NV_INSTANCELIST] = val
+
+
+def _VerifyNodeInfo(what, vm_capable, result, all_hvparams):
+ """Verifies the node info.
+
+ @type what: C{dict}
+ @param what: a dictionary of things to check
+ @type vm_capable: boolean
+ @param vm_capable: whether or not this node is vm capable
+ @type result: dict
+ @param result: dictionary of verification results; results of the
+ verifications in this function will be added here
+ @type all_hvparams: dict of dict of string
+ @param all_hvparams: dictionary mapping hypervisor names to hvparams
+
+ """
+ if constants.NV_HVINFO in what and vm_capable:
+ hvname = what[constants.NV_HVINFO]
+ hyper = hypervisor.GetHypervisor(hvname)
+ hvparams = all_hvparams[hvname]
+ result[constants.NV_HVINFO] = hyper.GetNodeInfo(hvparams=hvparams)
+
+
+def VerifyNode(what, cluster_name, all_hvparams):
"""Verify the status of the local node.
Based on the input L{what} parameter, various checks are done on the
- node-net-test: list of nodes we should check node daemon port
connectivity with
- hypervisor: list with hypervisors to run the verify for
+ @type cluster_name: string
+ @param cluster_name: the cluster's name
+ @type all_hvparams: dict of dict of strings
+ @param all_hvparams: a dictionary mapping hypervisor names to hvparams
@rtype: dict
@return: a dictionary with the same keys as the input dict, and
values representing the result of the checks
port = netutils.GetDaemonPort(constants.NODED)
vm_capable = my_name not in what.get(constants.NV_VMNODES, [])
- if constants.NV_HYPERVISOR in what and vm_capable:
- result[constants.NV_HYPERVISOR] = tmp = {}
- for hv_name in what[constants.NV_HYPERVISOR]:
- try:
- val = hypervisor.GetHypervisor(hv_name).Verify()
- except errors.HypervisorError, err:
- val = "Error while checking hypervisor: %s" % str(err)
- tmp[hv_name] = val
-
- if constants.NV_HVPARAMS in what and vm_capable:
- result[constants.NV_HVPARAMS] = tmp = []
- for source, hv_name, hvparms in what[constants.NV_HVPARAMS]:
- try:
- logging.info("Validating hv %s, %s", hv_name, hvparms)
- hypervisor.GetHypervisor(hv_name).ValidateParameters(hvparms)
- except errors.HypervisorError, err:
- tmp.append((source, hv_name, str(err)))
+ _VerifyHypervisors(what, vm_capable, result, all_hvparams)
+ _VerifyHvparams(what, vm_capable, result)
if constants.NV_FILELIST in what:
fingerprints = utils.FingerprintFiles(map(vcluster.LocalizeVirtualPath,
val = str(err)
result[constants.NV_LVLIST] = val
- if constants.NV_INSTANCELIST in what and vm_capable:
- # GetInstanceList can fail
- try:
- val = GetInstanceList(what[constants.NV_INSTANCELIST])
- except RPCFail, err:
- val = str(err)
- result[constants.NV_INSTANCELIST] = val
+ _VerifyInstanceList(what, vm_capable, result, all_hvparams)
if constants.NV_VGLIST in what and vm_capable:
result[constants.NV_VGLIST] = utils.ListVolumeGroups()
result[constants.NV_VERSION] = (constants.PROTOCOL_VERSION,
constants.RELEASE_VERSION)
- if constants.NV_HVINFO in what and vm_capable:
- hyper = hypervisor.GetHypervisor(what[constants.NV_HVINFO])
- result[constants.NV_HVINFO] = hyper.GetNodeInfo()
+ _VerifyNodeInfo(what, vm_capable, result, all_hvparams)
if constants.NV_DRBDVERSION in what and vm_capable:
try:
_Fail("Missing bridges %s", utils.CommaJoin(missing))
+def GetInstanceListForHypervisor(hname, hvparams=None,
+ get_hv_fn=hypervisor.GetHypervisor):
+ """Provides a list of instances of the given hypervisor.
+
+ @type hname: string
+ @param hname: name of the hypervisor
+ @type hvparams: dict of strings
+ @param hvparams: hypervisor parameters for the given hypervisor
+ @type get_hv_fn: function
+ @param get_hv_fn: function that returns a hypervisor for the given hypervisor
+ name; optional parameter to increase testability
+
+ @rtype: list
+ @return: a list of all running instances on the current node
+ - instance1.example.com
+ - instance2.example.com
+
+ """
+ results = []
+ try:
+ hv = get_hv_fn(hname)
+ names = hv.ListInstances(hvparams=hvparams)
+ results.extend(names)
+ except errors.HypervisorError, err:
+ _Fail("Error enumerating instances (hypervisor %s): %s",
+ hname, err, exc=True)
+ return results
+
+
def GetInstanceList(hypervisor_list, all_hvparams=None,
get_hv_fn=hypervisor.GetHypervisor):
"""Provides a list of instances.
"""
results = []
for hname in hypervisor_list:
- try:
- hvparams = None
- if all_hvparams is not None:
- hvparams = all_hvparams[hname]
- hv = get_hv_fn(hname)
- names = hv.ListInstances(hvparams)
- results.extend(names)
- except errors.HypervisorError, err:
- _Fail("Error enumerating instances (hypervisor %s): %s",
- hname, err, exc=True)
-
+ hvparams = all_hvparams[hname]
+ results.extend(GetInstanceListForHypervisor(hname, hvparams=hvparams,
+ get_hv_fn=get_hv_fn))
return results
-def GetInstanceInfo(instance, hname):
+def GetInstanceInfo(instance, hname, hvparams=None):
"""Gives back the information about an instance as a dictionary.
@type instance: string
@param instance: the instance name
@type hname: string
@param hname: the hypervisor type of the instance
+ @type hvparams: dict of strings
+ @param hvparams: the instance's hvparams
@rtype: dict
@return: dictionary with the following keys:
"""
output = {}
- iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance)
+ iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance,
+ hvparams=hvparams)
if iinfo is not None:
output["memory"] = iinfo[2]
output["vcpus"] = iinfo[3]
def GetInstanceMigratable(instance):
- """Gives whether an instance can be migrated.
+ """Computes whether an instance can be migrated.
@type instance: L{objects.Instance}
@param instance: object representing the instance to be checked.
"""
hyper = hypervisor.GetHypervisor(instance.hypervisor)
iname = instance.name
- if iname not in hyper.ListInstances():
+ if iname not in hyper.ListInstances(instance.hvparams):
_Fail("Instance %s is not running", iname)
for idx in range(len(instance.disks)):
iname, link_name, idx)
-def GetAllInstancesInfo(hypervisor_list):
+def GetAllInstancesInfo(hypervisor_list, all_hvparams):
"""Gather data about all instances.
This is the equivalent of L{GetInstanceInfo}, except that it
@type hypervisor_list: list
@param hypervisor_list: list of hypervisors to query for instance data
+ @type all_hvparams: dict of dict of strings
+ @param all_hvparams: mapping of hypervisor names to hvparams
@rtype: dict
@return: dictionary of instance: data, with data having the following keys:
output = {}
for hname in hypervisor_list:
- iinfo = hypervisor.GetHypervisor(hname).GetAllInstancesInfo()
+ hvparams = all_hvparams[hname]
+ iinfo = hypervisor.GetHypervisor(hname).GetAllInstancesInfo(hvparams)
if iinfo:
for name, _, memory, vcpus, state, times in iinfo:
value = {
@rtype: None
"""
- running_instances = GetInstanceList([instance.hypervisor])
+ running_instances = GetInstanceListForHypervisor(instance.hypervisor,
+ instance.hvparams)
if instance.name in running_instances:
logging.info("Instance %s already running, not starting", instance.name)
hyper = hypervisor.GetHypervisor(hv_name)
iname = instance.name
- if instance.name not in hyper.ListInstances():
+ if instance.name not in hyper.ListInstances(instance.hvparams):
logging.info("Instance %s not running, doing nothing", iname)
return
self.tried_once = False
def __call__(self):
- if iname not in hyper.ListInstances():
+ if iname not in hyper.ListInstances(instance.hvparams):
return
try:
if store_reason:
_StoreInstReasonTrail(instance.name, reason)
except errors.HypervisorError, err:
- if iname not in hyper.ListInstances():
+ if iname not in hyper.ListInstances(instance.hvparams):
# if the instance is no longer existing, consider this a
# success and go to cleanup
return
try:
hyper.StopInstance(instance, force=True)
except errors.HypervisorError, err:
- if iname in hyper.ListInstances():
+ if iname in hyper.ListInstances(instance.hvparams):
# only raise an error if the instance still exists, otherwise
# the error could simply be "instance ... unknown"!
_Fail("Failed to force stop instance %s: %s", iname, err)
time.sleep(1)
- if iname in hyper.ListInstances():
+ if iname in hyper.ListInstances(instance.hvparams):
_Fail("Could not shutdown instance %s even by destroy", iname)
try:
@rtype: None
"""
- running_instances = GetInstanceList([instance.hypervisor])
+ running_instances = GetInstanceListForHypervisor(instance.hypervisor,
+ instance.hvparams)
if instance.name not in running_instances:
_Fail("Cannot reboot instance %s that is not running", instance.name)
"""
hyper = hypervisor.GetHypervisor(instance.hypervisor)
- running = hyper.ListInstances()
+ running = hyper.ListInstances(instance.hvparams)
if instance.name not in running:
logging.info("Instance %s is not running, cannot balloon", instance.name)
return
_Fail("Failed to finalize migration on the target node: %s", err, exc=True)
-def MigrateInstance(instance, target, live):
+def MigrateInstance(cluster_name, instance, target, live):
"""Migrates an instance to another node.
+ @type cluster_name: string
+ @param cluster_name: name of the cluster
@type instance: L{objects.Instance}
@param instance: the instance definition
@type target: string
hyper = hypervisor.GetHypervisor(instance.hypervisor)
try:
- hyper.MigrateInstance(instance, target, live)
+ hyper.MigrateInstance(cluster_name, instance, target, live)
except errors.HypervisorError, err:
_Fail("Failed to migrate instance: %s", err, exc=True)
real_disk = _OpenRealBD(disk)
result["DISK_%d_PATH" % idx] = real_disk.dev_path
result["DISK_%d_ACCESS" % idx] = disk.mode
+ result["DISK_%d_UUID" % idx] = disk.uuid
+ if disk.name:
+ result["DISK_%d_NAME" % idx] = disk.name
if constants.HV_DISK_TYPE in instance.hvparams:
result["DISK_%d_FRONTEND_TYPE" % idx] = \
instance.hvparams[constants.HV_DISK_TYPE]
# NICs
for idx, nic in enumerate(instance.nics):
result["NIC_%d_MAC" % idx] = nic.mac
+ result["NIC_%d_UUID" % idx] = nic.uuid
+ if nic.name:
+ result["NIC_%d_NAME" % idx] = nic.name
if nic.ip:
result["NIC_%d_IP" % idx] = nic.ip
result["NIC_%d_MODE" % idx] = nic.nicparams[constants.NIC_MODE]
return result
-def BlockdevGrow(disk, amount, dryrun, backingstore):
+def BlockdevGrow(disk, amount, dryrun, backingstore, excl_stor):
"""Grow a stack of block devices.
This function is called recursively, with the childrens being the
only, or on "logical" storage only; e.g. DRBD is logical storage,
whereas LVM, file, RBD are backing storage
@rtype: (status, result)
+ @type excl_stor: boolean
+ @param excl_stor: Whether exclusive_storage is active
@return: a tuple with the status of the operation (True/False), and
the errors message if status is False
_Fail("Cannot find block device %s", disk)
try:
- r_dev.Grow(amount, dryrun, backingstore)
+ r_dev.Grow(amount, dryrun, backingstore, excl_stor)
except errors.BlockDeviceError, err:
_Fail("Failed to grow block device: %s", err, exc=True)
shutil.rmtree(status_dir, ignore_errors=True)
-def _FindDisks(nodes_ip, disks):
- """Sets the physical ID on disks and returns the block devices.
+def _SetPhysicalId(target_node_uuid, nodes_ip, disks):
+ """Sets the correct physical ID on all passed disks.
"""
- # set the correct physical ID
- my_name = netutils.Hostname.GetSysName()
for cf in disks:
- cf.SetPhysicalID(my_name, nodes_ip)
+ cf.SetPhysicalID(target_node_uuid, nodes_ip)
+
+
+def _FindDisks(target_node_uuid, nodes_ip, disks):
+ """Sets the physical ID on disks and returns the block devices.
+
+ """
+ _SetPhysicalId(target_node_uuid, nodes_ip, disks)
bdevs = []
return bdevs
-def DrbdDisconnectNet(nodes_ip, disks):
+def DrbdDisconnectNet(target_node_uuid, nodes_ip, disks):
"""Disconnects the network on a list of drbd devices.
"""
- bdevs = _FindDisks(nodes_ip, disks)
+ bdevs = _FindDisks(target_node_uuid, nodes_ip, disks)
# disconnect disks
for rd in bdevs:
err, exc=True)
-def DrbdAttachNet(nodes_ip, disks, instance_name, multimaster):
+def DrbdAttachNet(target_node_uuid, nodes_ip, disks, instance_name,
+ multimaster):
"""Attaches the network on a list of drbd devices.
"""
- bdevs = _FindDisks(nodes_ip, disks)
+ bdevs = _FindDisks(target_node_uuid, nodes_ip, disks)
if multimaster:
for idx, rd in enumerate(bdevs):
_Fail("Can't change to primary mode: %s", err)
-def DrbdWaitSync(nodes_ip, disks):
+def DrbdWaitSync(target_node_uuid, nodes_ip, disks):
"""Wait until DRBDs have synchronized.
"""
raise utils.RetryAgain()
return stats
- bdevs = _FindDisks(nodes_ip, disks)
+ bdevs = _FindDisks(target_node_uuid, nodes_ip, disks)
min_resync = 100
alldone = True
return (alldone, min_resync)
+def DrbdNeedsActivation(target_node_uuid, nodes_ip, disks):
+ """Checks which of the passed disks needs activation and returns their UUIDs.
+
+ """
+ _SetPhysicalId(target_node_uuid, nodes_ip, disks)
+ faulty_disks = []
+
+ for disk in disks:
+ rd = _RecursiveFindBD(disk)
+ if rd is None:
+ faulty_disks.append(disk)
+ continue
+
+ stats = rd.GetProcStatus()
+ if stats.is_standalone or stats.is_diskless:
+ faulty_disks.append(disk)
+
+ return [disk.uuid for disk in faulty_disks]
+
+
def GetDrbdUsermodeHelper():
"""Returns DRBD usermode helper currently configured.
_Fail(str(err))
-def PowercycleNode(hypervisor_type):
+def PowercycleNode(hypervisor_type, hvparams=None):
"""Hard-powercycle the node.
Because we need to return first, and schedule the powercycle in the
except Exception: # pylint: disable=W0703
pass
time.sleep(5)
- hyper.PowercycleNode()
+ hyper.PowercycleNode(hvparams=hvparams)
def _VerifyRestrictedCmdName(cmd):