"""
-# pylint: disable=E1103
+# pylint: disable=E1103,C0302
# E1103: %s %r has no %r member (but some types could not be
# inferred), because the _TryOSFromDisk returns either (True, os_obj)
# or (False, "string") which confuses pylint
+# C0302: This module has become too big and should be split up
+
import os
import os.path
_IES_CA_FILE = "ca"
#: Valid LVS output line regex
-_LVSLINE_REGEX = re.compile("^ *([^|]+)\|([^|]+)\|([0-9.]+)\|([^|]{6,})\|?$")
+_LVSLINE_REGEX = re.compile(r"^ *([^|]+)\|([^|]+)\|([0-9.]+)\|([^|]{6,})\|?$")
# Actions for the master setup script
_MASTER_START = "start"
raise errors.QuitGanetiException(True, "Shutdown scheduled")
-def _GetVgInfo(name, excl_stor):
+def _CheckStorageParams(params, num_params):
+ """Performs sanity checks for storage parameters.
+
+ @type params: list
+ @param params: list of storage parameters
+ @type num_params: int
+ @param num_params: expected number of parameters
+
+ """
+ if params is None:
+ raise errors.ProgrammerError("No storage parameters for storage"
+ " reporting is provided.")
+ if not isinstance(params, list):
+ raise errors.ProgrammerError("The storage parameters are not of type"
+ " list: '%s'" % params)
+ if not len(params) == num_params:
+ raise errors.ProgrammerError("Did not receive the expected number of"
+ "storage parameters: expected %s,"
+ " received '%s'" % (num_params, len(params)))
+
+
+def _CheckLvmStorageParams(params):
+ """Performs sanity check for the 'exclusive storage' flag.
+
+ @see: C{_CheckStorageParams}
+
+ """
+ _CheckStorageParams(params, 1)
+ excl_stor = params[0]
+ if not isinstance(params[0], bool):
+ raise errors.ProgrammerError("Exclusive storage parameter is not"
+ " boolean: '%s'." % excl_stor)
+ return excl_stor
+
+
+def _GetLvmVgSpaceInfo(name, params):
+ """Wrapper around C{_GetVgInfo} which checks the storage parameters.
+
+ @type name: string
+ @param name: name of the volume group
+ @type params: list
+ @param params: list of storage parameters, which in this case should be
+ containing only one for exclusive storage
+
+ """
+ excl_stor = _CheckLvmStorageParams(params)
+ return _GetVgInfo(name, excl_stor)
+
+
+def _GetVgInfo(
+ name, excl_stor, info_fn=bdev.LogicalVolume.GetVGInfo):
"""Retrieves information about a LVM volume group.
"""
# TODO: GetVGInfo supports returning information for multiple VGs at once
- vginfo = bdev.LogicalVolume.GetVGInfo([name], excl_stor)
+ vginfo = info_fn([name], excl_stor)
if vginfo:
vg_free = int(round(vginfo[0][0], 0))
vg_size = int(round(vginfo[0][1], 0))
}
-def _GetVgSpindlesInfo(name, excl_stor):
+def _GetLvmPvSpaceInfo(name, params):
+ """Wrapper around C{_GetVgSpindlesInfo} with sanity checks.
+
+ @see: C{_GetLvmVgSpaceInfo}
+
+ """
+ excl_stor = _CheckLvmStorageParams(params)
+ return _GetVgSpindlesInfo(name, excl_stor)
+
+
+def _GetVgSpindlesInfo(
+ name, excl_stor, info_fn=bdev.LogicalVolume.GetVgSpindlesInfo):
"""Retrieves information about spindles in an LVM volume group.
@type name: string
"""
if excl_stor:
- (vg_free, vg_size) = bdev.LogicalVolume.GetVgSpindlesInfo(name)
+ (vg_free, vg_size) = info_fn(name)
else:
vg_free = 0
vg_size = 0
return (bootid, storage_info, hv_info)
-# pylint: disable=W0613
-def _GetFileStorageSpaceInfo(path, *args):
+def _GetFileStorageSpaceInfo(path, params):
"""Wrapper around filestorage.GetSpaceInfo.
The purpose of this wrapper is to call filestorage.GetFileStorageSpaceInfo
parameters.
"""
+ _CheckStorageParams(params, 0)
return filestorage.GetFileStorageSpaceInfo(path)
constants.ST_DISKLESS: None,
constants.ST_EXT: None,
constants.ST_FILE: _GetFileStorageSpaceInfo,
- constants.ST_LVM_PV: _GetVgSpindlesInfo,
- constants.ST_LVM_VG: _GetVgInfo,
+ constants.ST_LVM_PV: _GetLvmPvSpaceInfo,
+ constants.ST_LVM_VG: _GetLvmVgSpaceInfo,
constants.ST_RADOS: None,
}
for bridge in what[constants.NV_BRIDGES]
if not utils.BridgeExists(bridge)]
- if what.get(constants.NV_FILE_STORAGE_PATHS) == my_name:
- result[constants.NV_FILE_STORAGE_PATHS] = \
- bdev.ComputeWrongFileStoragePaths()
+ if what.get(constants.NV_ACCEPTED_STORAGE_PATHS) == my_name:
+ result[constants.NV_ACCEPTED_STORAGE_PATHS] = \
+ filestorage.ComputeWrongFileStoragePaths()
+
+ if what.get(constants.NV_FILE_STORAGE_PATH):
+ pathresult = filestorage.CheckFileStoragePath(
+ what[constants.NV_FILE_STORAGE_PATH])
+ if pathresult:
+ result[constants.NV_FILE_STORAGE_PATH] = pathresult
+
+ if what.get(constants.NV_SHARED_FILE_STORAGE_PATH):
+ pathresult = filestorage.CheckFileStoragePath(
+ what[constants.NV_SHARED_FILE_STORAGE_PATH])
+ if pathresult:
+ result[constants.NV_SHARED_FILE_STORAGE_PATH] = pathresult
return result
devices must be already assembled.
@type instance: L{objects.Instance}
- @param instance: the instance whose disks we shoul assemble
+ @param instance: the instance whose disks we should assemble
@rtype: list
@return: list of (disk_object, device_path)
raise errors.BlockDeviceError("Cannot create block device symlink: %s" %
e.strerror)
- block_devices.append((disk, link_name))
+ block_devices.append((disk, link_name, device))
return block_devices
return result
-def BlockdevExport(disk, dest_node, dest_path, cluster_name):
+def BlockdevExport(disk, dest_node_ip, dest_path, cluster_name):
"""Export a block device to a remote node.
@type disk: L{objects.Disk}
@param disk: the description of the disk to export
- @type dest_node: str
- @param dest_node: the destination node to export to
+ @type dest_node_ip: str
+ @param dest_node_ip: the destination node IP to export to
@type dest_path: str
@param dest_path: the destination path on the target node
@type cluster_name: str
destcmd = utils.BuildShellCmd("dd of=%s conv=nocreat,notrunc bs=65536"
" oflag=dsync", dest_path)
- remotecmd = _GetSshRunner(cluster_name).BuildCmd(dest_node,
+ remotecmd = _GetSshRunner(cluster_name).BuildCmd(dest_node_ip,
constants.SSH_LOGIN_USER,
destcmd)
if constants.HV_DISK_TYPE in instance.hvparams:
result["DISK_%d_FRONTEND_TYPE" % idx] = \
instance.hvparams[constants.HV_DISK_TYPE]
- if disk.dev_type in constants.LDS_BLOCK:
+ if disk.dev_type in constants.DTS_BLOCK:
result["DISK_%d_BACKEND_TYPE" % idx] = "block"
- elif disk.dev_type == constants.LD_FILE:
+ elif disk.dev_type in [constants.DT_FILE, constants.DT_SHARED_FILE]:
result["DISK_%d_BACKEND_TYPE" % idx] = \
- "file:%s" % disk.physical_id[0]
+ "file:%s" % disk.logical_id[0]
# NICs
for idx, nic in enumerate(instance.nics):
@return: snapshot disk ID as (vg, lv)
"""
- if disk.dev_type == constants.LD_DRBD8:
+ if disk.dev_type == constants.DT_DRBD8:
if not disk.children:
_Fail("DRBD device '%s' without backing storage cannot be snapshotted",
disk.unique_id)
return BlockdevSnapshot(disk.children[0])
- elif disk.dev_type == constants.LD_LV:
+ elif disk.dev_type == constants.DT_PLAIN:
r_dev = _RecursiveFindBD(disk)
if r_dev is not None:
# FIXME: choose a saner value for the snapshot size
config.set(constants.INISECT_INS, "disk%d_ivname" % disk_count,
("%s" % disk.iv_name))
config.set(constants.INISECT_INS, "disk%d_dump" % disk_count,
- ("%s" % disk.physical_id[1]))
+ ("%s" % disk.logical_id[1]))
config.set(constants.INISECT_INS, "disk%d_size" % disk_count,
("%d" % disk.size))
"""Rename a list of block devices.
@type devlist: list of tuples
- @param devlist: list of tuples of the form (disk,
- new_logical_id, new_physical_id); disk is an
- L{objects.Disk} object describing the current disk,
- and new logical_id/physical_id is the name we
- rename it to
+ @param devlist: list of tuples of the form (disk, new_unique_id); disk is
+ an L{objects.Disk} object describing the current disk, and new
+ unique_id is the name we rename it to
@rtype: boolean
@return: True if all renames succeeded, False otherwise
@return: the normalized path if valid, None otherwise
"""
- if not (constants.ENABLE_FILE_STORAGE or
- constants.ENABLE_SHARED_FILE_STORAGE):
- _Fail("File storage disabled at configure time")
-
- bdev.CheckFileStoragePath(fs_dir)
+ filestorage.CheckFileStoragePath(fs_dir)
return os.path.normpath(fs_dir)
assert isinstance(disk_index, (int, long))
- real_disk = _OpenRealBD(disk)
-
inst_os = OSFromDisk(instance.os)
env = OSEnvironment(instance, inst_os)
script = inst_os.import_script
elif mode == constants.IEM_EXPORT:
+ real_disk = _OpenRealBD(disk)
env["EXPORT_DEVICE"] = real_disk.dev_path
env["EXPORT_INDEX"] = str(disk_index)
script = inst_os.export_script
shutil.rmtree(status_dir, ignore_errors=True)
-def _SetPhysicalId(target_node_uuid, nodes_ip, disks):
- """Sets the correct physical ID on all passed disks.
-
- """
- for cf in disks:
- cf.SetPhysicalID(target_node_uuid, nodes_ip)
+def _FindDisks(disks):
+ """Finds attached L{BlockDev}s for the given disks.
+ @type disks: list of L{objects.Disk}
+ @param disks: the disk objects we need to find
-def _FindDisks(target_node_uuid, nodes_ip, disks):
- """Sets the physical ID on disks and returns the block devices.
+ @return: list of L{BlockDev} objects or C{None} if a given disk
+ was not found or was no attached.
"""
- _SetPhysicalId(target_node_uuid, nodes_ip, disks)
-
bdevs = []
- for cf in disks:
- rd = _RecursiveFindBD(cf)
+ for disk in disks:
+ rd = _RecursiveFindBD(disk)
if rd is None:
- _Fail("Can't find device %s", cf)
+ _Fail("Can't find device %s", disk)
bdevs.append(rd)
return bdevs
-def DrbdDisconnectNet(target_node_uuid, nodes_ip, disks):
+def DrbdDisconnectNet(disks):
"""Disconnects the network on a list of drbd devices.
"""
- bdevs = _FindDisks(target_node_uuid, nodes_ip, disks)
+ bdevs = _FindDisks(disks)
# disconnect disks
for rd in bdevs:
err, exc=True)
-def DrbdAttachNet(target_node_uuid, nodes_ip, disks, instance_name,
- multimaster):
+def DrbdAttachNet(disks, instance_name, multimaster):
"""Attaches the network on a list of drbd devices.
"""
- bdevs = _FindDisks(target_node_uuid, nodes_ip, disks)
+ bdevs = _FindDisks(disks)
if multimaster:
for idx, rd in enumerate(bdevs):
_Fail("Can't change to primary mode: %s", err)
-def DrbdWaitSync(target_node_uuid, nodes_ip, disks):
+def DrbdWaitSync(disks):
"""Wait until DRBDs have synchronized.
"""
raise utils.RetryAgain()
return stats
- bdevs = _FindDisks(target_node_uuid, nodes_ip, disks)
+ bdevs = _FindDisks(disks)
min_resync = 100
alldone = True
return (alldone, min_resync)
-def DrbdNeedsActivation(target_node_uuid, nodes_ip, disks):
+def DrbdNeedsActivation(disks):
"""Checks which of the passed disks needs activation and returns their UUIDs.
"""
- _SetPhysicalId(target_node_uuid, nodes_ip, disks)
faulty_disks = []
for disk in disks:
utils.WriteFile(_filename, data="%d\n" % (until, ), mode=0644)
+def ConfigureOVS(ovs_name, ovs_link):
+ """Creates a OpenvSwitch on the node.
+
+ This function sets up a OpenvSwitch on the node with given name nad
+ connects it via a given eth device.
+
+ @type ovs_name: string
+ @param ovs_name: Name of the OpenvSwitch to create.
+ @type ovs_link: None or string
+ @param ovs_link: Ethernet device for outside connection (can be missing)
+
+ """
+ # Initialize the OpenvSwitch
+ result = utils.RunCmd(["ovs-vsctl", "add-br", ovs_name])
+ if result.failed:
+ _Fail("Failed to create openvswitch %s. Script return value: %s, output:"
+ " '%s'" % result.exit_code, result.output, log=True)
+
+ # And connect it to a physical interface, if given
+ if ovs_link:
+ result = utils.RunCmd(["ovs-vsctl", "add-port", ovs_name, ovs_link])
+ if result.failed:
+ _Fail("Failed to connect openvswitch to interface %s. Script return"
+ " value: %s, output: '%s'" % ovs_link, result.exit_code,
+ result.output, log=True)
+
+
class HooksRunner(object):
"""Hook runner.