#
#
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
from ganeti import ssh
from ganeti import hypervisor
from ganeti import constants
-from ganeti import bdev
+from ganeti.storage import bdev
+from ganeti.storage import drbd
from ganeti import objects
from ganeti import ssconf
from ganeti import serializer
from ganeti import netutils
from ganeti import runtime
-from ganeti import mcpu
from ganeti import compat
from ganeti import pathutils
from ganeti import vcluster
from ganeti import ht
+from ganeti.storage.base import BlockDev
+from ganeti.storage.drbd import DRBD8
+from ganeti import hooksmaster
_BOOT_ID_PATH = "/proc/sys/kernel/random/boot_id"
_MASTER_START = "start"
_MASTER_STOP = "stop"
-#: Maximum file permissions for remote command directory and executables
+#: Maximum file permissions for restricted command directory and executables
_RCMD_MAX_MODE = (stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
-#: Delay before returning an error for remote commands
+#: Delay before returning an error for restricted commands
_RCMD_INVALID_DELAY = 10
-#: How long to wait to acquire lock for remote commands (shorter than
+#: How long to wait to acquire lock for restricted commands (shorter than
#: L{_RCMD_INVALID_DELAY}) to reduce blockage of noded forks when many
#: command requests arrive
_RCMD_LOCK_TIMEOUT = _RCMD_INVALID_DELAY * 0.8
"""
+def _GetInstReasonFilename(instance_name):
+ """Path of the file containing the reason of the instance status change.
+
+ @type instance_name: string
+ @param instance_name: The name of the instance
+ @rtype: string
+ @return: The path of the file
+
+ """
+ return utils.PathJoin(pathutils.INSTANCE_REASON_DIR, instance_name)
+
+
+def _StoreInstReasonTrail(instance_name, trail):
+ """Serialize a reason trail related to an instance change of state to file.
+
+ The exact location of the file depends on the name of the instance and on
+ the configuration of the Ganeti cluster defined at deploy time.
+
+ @type instance_name: string
+ @param instance_name: The name of the instance
+ @rtype: None
+
+ """
+ json = serializer.DumpJson(trail)
+ filename = _GetInstReasonFilename(instance_name)
+ utils.WriteFile(filename, data=json)
+
+
def _Fail(msg, *args, **kwargs):
"""Log an error and the raise an RPCFail exception.
cfg = _GetConfig()
hr = HooksRunner()
- hm = mcpu.HooksMaster(hook_opcode, hooks_path, nodes, hr.RunLocalHooks,
- None, env_fn, logging.warning, cfg.GetClusterName(),
- cfg.GetMasterNode())
-
+ hm = hooksmaster.HooksMaster(hook_opcode, hooks_path, nodes,
+ hr.RunLocalHooks, None, env_fn,
+ logging.warning, cfg.GetClusterName(),
+ cfg.GetMasterNode())
hm.RunPhase(constants.HOOKS_PHASE_PRE)
result = fn(*args, **kwargs)
hm.RunPhase(constants.HOOKS_PHASE_POST)
result = utils.RunCmd([setup_script, action], env=env, reset_env=True)
if result.failed:
- _Fail("Failed to %s the master IP. Script return value: %s" %
- (action, result.exit_code), log=True)
+ _Fail("Failed to %s the master IP. Script return value: %s, output: '%s'" %
+ (action, result.exit_code, result.output), log=True)
@RunLocalHooks(constants.FAKE_OP_MASTER_TURNUP, "master-ip-turnup",
}
+def _GetVgSpindlesInfo(name, excl_stor):
+ """Retrieves information about spindles in an LVM volume group.
+
+ @type name: string
+ @param name: VG name
+ @type excl_stor: bool
+ @param excl_stor: exclusive storage
+ @rtype: dict
+ @return: dictionary whose keys are "name", "vg_free", "vg_size" for VG name,
+ free spindles, total spindles respectively
+
+ """
+ if excl_stor:
+ (vg_free, vg_size) = bdev.LogicalVolume.GetVgSpindlesInfo(name)
+ else:
+ vg_free = 0
+ vg_size = 0
+ return {
+ "name": name,
+ "vg_free": vg_free,
+ "vg_size": vg_size,
+ }
+
+
def _GetHvInfo(name):
"""Retrieves node information from a hypervisor.
return map(fn, names)
-def GetNodeInfo(vg_names, hv_names, excl_stor):
+def GetNodeInfo(storage_units, hv_names, excl_stor):
"""Gives back a hash with different information about the node.
- @type vg_names: list of string
- @param vg_names: Names of the volume groups to ask for disk space information
+ @type storage_units: list of pairs (string, string)
+ @param storage_units: List of pairs (storage unit, identifier) to ask for disk
+ space information. In case of lvm-vg, the identifier is
+ the VG name.
@type hv_names: list of string
@param hv_names: Names of the hypervisors to ask for node information
@type excl_stor: boolean
"""
bootid = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n")
- vg_info = _GetNamedNodeInfo(vg_names, (lambda vg: _GetVgInfo(vg, excl_stor)))
+ storage_info = _GetNamedNodeInfo(
+ storage_units,
+ (lambda storage_unit: _ApplyStorageInfoFunction(storage_unit[0],
+ storage_unit[1],
+ excl_stor)))
hv_info = _GetNamedNodeInfo(hv_names, _GetHvInfo)
- return (bootid, vg_info, hv_info)
+ return (bootid, storage_info, hv_info)
+
+
+# FIXME: implement storage reporting for all missing storage types.
+_STORAGE_TYPE_INFO_FN = {
+ constants.ST_BLOCK: None,
+ constants.ST_DISKLESS: None,
+ constants.ST_EXT: None,
+ constants.ST_FILE: None,
+ constants.ST_LVM_PV: _GetVgSpindlesInfo,
+ constants.ST_LVM_VG: _GetVgInfo,
+ constants.ST_RADOS: None,
+}
+
+
+def _ApplyStorageInfoFunction(storage_type, storage_key, *args):
+ """Looks up and applies the correct function to calculate free and total
+ storage for the given storage type.
+
+ @type storage_type: string
+ @param storage_type: the storage type for which the storage shall be reported.
+ @type storage_key: string
+ @param storage_key: identifier of a storage unit, e.g. the volume group name
+ of an LVM storage unit
+ @type args: any
+ @param args: various parameters that can be used for storage reporting. These
+ parameters and their semantics vary from storage type to storage type and
+ are just propagated in this function.
+ @return: the results of the application of the storage space function (see
+ _STORAGE_TYPE_INFO_FN) if storage space reporting is implemented for that
+ storage type
+ @raises NotImplementedError: for storage types who don't support space
+ reporting yet
+ """
+ fn = _STORAGE_TYPE_INFO_FN[storage_type]
+ if fn is not None:
+ return fn(storage_key, *args)
+ else:
+ raise NotImplementedError
+
+
+def _CheckExclusivePvs(pvi_list):
+ """Check that PVs are not shared among LVs
+
+ @type pvi_list: list of L{objects.LvmPvInfo} objects
+ @param pvi_list: information about the PVs
+
+ @rtype: list of tuples (string, list of strings)
+ @return: offending volumes, as tuples: (pv_name, [lv1_name, lv2_name...])
+
+ """
+ res = []
+ for pvi in pvi_list:
+ if len(pvi.lv_list) > 1:
+ res.append((pvi.name, pvi.lv_list))
+ return res
+
+
+def _VerifyHypervisors(what, vm_capable, result, all_hvparams,
+ get_hv_fn=hypervisor.GetHypervisor):
+ """Verifies the hypervisor. Appends the results to the 'results' list.
+
+ @type what: C{dict}
+ @param what: a dictionary of things to check
+ @type vm_capable: boolean
+ @param vm_capable: whether or not this node is vm capable
+ @type result: dict
+ @param result: dictionary of verification results; results of the
+ verifications in this function will be added here
+ @type all_hvparams: dict of dict of string
+ @param all_hvparams: dictionary mapping hypervisor names to hvparams
+ @type get_hv_fn: function
+ @param get_hv_fn: function to retrieve the hypervisor, to improve testability
+
+ """
+ if not vm_capable:
+ return
+
+ if constants.NV_HYPERVISOR in what:
+ result[constants.NV_HYPERVISOR] = {}
+ for hv_name in what[constants.NV_HYPERVISOR]:
+ hvparams = all_hvparams[hv_name]
+ try:
+ val = get_hv_fn(hv_name).Verify(hvparams=hvparams)
+ except errors.HypervisorError, err:
+ val = "Error while checking hypervisor: %s" % str(err)
+ result[constants.NV_HYPERVISOR][hv_name] = val
+
+
+def _VerifyHvparams(what, vm_capable, result,
+ get_hv_fn=hypervisor.GetHypervisor):
+ """Verifies the hvparams. Appends the results to the 'results' list.
+
+ @type what: C{dict}
+ @param what: a dictionary of things to check
+ @type vm_capable: boolean
+ @param vm_capable: whether or not this node is vm capable
+ @type result: dict
+ @param result: dictionary of verification results; results of the
+ verifications in this function will be added here
+ @type get_hv_fn: function
+ @param get_hv_fn: function to retrieve the hypervisor, to improve testability
+
+ """
+ if not vm_capable:
+ return
+
+ if constants.NV_HVPARAMS in what:
+ result[constants.NV_HVPARAMS] = []
+ for source, hv_name, hvparms in what[constants.NV_HVPARAMS]:
+ try:
+ logging.info("Validating hv %s, %s", hv_name, hvparms)
+ get_hv_fn(hv_name).ValidateParameters(hvparms)
+ except errors.HypervisorError, err:
+ result[constants.NV_HVPARAMS].append((source, hv_name, str(err)))
+
+
+def _VerifyInstanceList(what, vm_capable, result, all_hvparams):
+ """Verifies the instance list.
+
+ @type what: C{dict}
+ @param what: a dictionary of things to check
+ @type vm_capable: boolean
+ @param vm_capable: whether or not this node is vm capable
+ @type result: dict
+ @param result: dictionary of verification results; results of the
+ verifications in this function will be added here
+ @type all_hvparams: dict of dict of string
+ @param all_hvparams: dictionary mapping hypervisor names to hvparams
+
+ """
+ if constants.NV_INSTANCELIST in what and vm_capable:
+ # GetInstanceList can fail
+ try:
+ val = GetInstanceList(what[constants.NV_INSTANCELIST],
+ all_hvparams=all_hvparams)
+ except RPCFail, err:
+ val = str(err)
+ result[constants.NV_INSTANCELIST] = val
+
+def _VerifyNodeInfo(what, vm_capable, result, all_hvparams):
+ """Verifies the node info.
-def VerifyNode(what, cluster_name):
+ @type what: C{dict}
+ @param what: a dictionary of things to check
+ @type vm_capable: boolean
+ @param vm_capable: whether or not this node is vm capable
+ @type result: dict
+ @param result: dictionary of verification results; results of the
+ verifications in this function will be added here
+ @type all_hvparams: dict of dict of string
+ @param all_hvparams: dictionary mapping hypervisor names to hvparams
+
+ """
+ if constants.NV_HVINFO in what and vm_capable:
+ hvname = what[constants.NV_HVINFO]
+ hyper = hypervisor.GetHypervisor(hvname)
+ hvparams = all_hvparams[hvname]
+ result[constants.NV_HVINFO] = hyper.GetNodeInfo(hvparams=hvparams)
+
+
+def VerifyNode(what, cluster_name, all_hvparams):
"""Verify the status of the local node.
Based on the input L{what} parameter, various checks are done on the
- node-net-test: list of nodes we should check node daemon port
connectivity with
- hypervisor: list with hypervisors to run the verify for
+ @type cluster_name: string
+ @param cluster_name: the cluster's name
+ @type all_hvparams: dict of dict of strings
+ @param all_hvparams: a dictionary mapping hypervisor names to hvparams
@rtype: dict
@return: a dictionary with the same keys as the input dict, and
values representing the result of the checks
port = netutils.GetDaemonPort(constants.NODED)
vm_capable = my_name not in what.get(constants.NV_VMNODES, [])
- if constants.NV_HYPERVISOR in what and vm_capable:
- result[constants.NV_HYPERVISOR] = tmp = {}
- for hv_name in what[constants.NV_HYPERVISOR]:
- try:
- val = hypervisor.GetHypervisor(hv_name).Verify()
- except errors.HypervisorError, err:
- val = "Error while checking hypervisor: %s" % str(err)
- tmp[hv_name] = val
-
- if constants.NV_HVPARAMS in what and vm_capable:
- result[constants.NV_HVPARAMS] = tmp = []
- for source, hv_name, hvparms in what[constants.NV_HVPARAMS]:
- try:
- logging.info("Validating hv %s, %s", hv_name, hvparms)
- hypervisor.GetHypervisor(hv_name).ValidateParameters(hvparms)
- except errors.HypervisorError, err:
- tmp.append((source, hv_name, str(err)))
+ _VerifyHypervisors(what, vm_capable, result, all_hvparams)
+ _VerifyHvparams(what, vm_capable, result)
if constants.NV_FILELIST in what:
fingerprints = utils.FingerprintFiles(map(vcluster.LocalizeVirtualPath,
val = str(err)
result[constants.NV_LVLIST] = val
- if constants.NV_INSTANCELIST in what and vm_capable:
- # GetInstanceList can fail
- try:
- val = GetInstanceList(what[constants.NV_INSTANCELIST])
- except RPCFail, err:
- val = str(err)
- result[constants.NV_INSTANCELIST] = val
+ _VerifyInstanceList(what, vm_capable, result, all_hvparams)
if constants.NV_VGLIST in what and vm_capable:
result[constants.NV_VGLIST] = utils.ListVolumeGroups()
if constants.NV_PVLIST in what and vm_capable:
+ check_exclusive_pvs = constants.NV_EXCLUSIVEPVS in what
val = bdev.LogicalVolume.GetPVInfo(what[constants.NV_PVLIST],
- filter_allocatable=False)
+ filter_allocatable=False,
+ include_lvs=check_exclusive_pvs)
+ if check_exclusive_pvs:
+ result[constants.NV_EXCLUSIVEPVS] = _CheckExclusivePvs(val)
+ for pvi in val:
+ # Avoid sending useless data on the wire
+ pvi.lv_list = []
result[constants.NV_PVLIST] = map(objects.LvmPvInfo.ToDict, val)
if constants.NV_VERSION in what:
result[constants.NV_VERSION] = (constants.PROTOCOL_VERSION,
constants.RELEASE_VERSION)
- if constants.NV_HVINFO in what and vm_capable:
- hyper = hypervisor.GetHypervisor(what[constants.NV_HVINFO])
- result[constants.NV_HVINFO] = hyper.GetNodeInfo()
+ _VerifyNodeInfo(what, vm_capable, result, all_hvparams)
+
+ if constants.NV_DRBDVERSION in what and vm_capable:
+ try:
+ drbd_version = DRBD8.GetProcInfo().GetVersionString()
+ except errors.BlockDeviceError, err:
+ logging.warning("Can't get DRBD version", exc_info=True)
+ drbd_version = str(err)
+ result[constants.NV_DRBDVERSION] = drbd_version
if constants.NV_DRBDLIST in what and vm_capable:
try:
- used_minors = bdev.DRBD8.GetUsedDevs().keys()
+ used_minors = drbd.DRBD8.GetUsedDevs()
except errors.BlockDeviceError, err:
logging.warning("Can't get used minors list", exc_info=True)
used_minors = str(err)
if constants.NV_DRBDHELPER in what and vm_capable:
status = True
try:
- payload = bdev.BaseDRBD.GetUsermodeHelper()
+ payload = drbd.DRBD8.GetUsermodeHelper()
except errors.BlockDeviceError, err:
logging.error("Can't get DRBD usermode helper: %s", str(err))
status = False
_Fail("Missing bridges %s", utils.CommaJoin(missing))
-def GetInstanceList(hypervisor_list):
+def GetInstanceListForHypervisor(hname, hvparams=None,
+ get_hv_fn=hypervisor.GetHypervisor):
+ """Provides a list of instances of the given hypervisor.
+
+ @type hname: string
+ @param hname: name of the hypervisor
+ @type hvparams: dict of strings
+ @param hvparams: hypervisor parameters for the given hypervisor
+ @type get_hv_fn: function
+ @param get_hv_fn: function that returns a hypervisor for the given hypervisor
+ name; optional parameter to increase testability
+
+ @rtype: list
+ @return: a list of all running instances on the current node
+ - instance1.example.com
+ - instance2.example.com
+
+ """
+ results = []
+ try:
+ hv = get_hv_fn(hname)
+ names = hv.ListInstances(hvparams=hvparams)
+ results.extend(names)
+ except errors.HypervisorError, err:
+ _Fail("Error enumerating instances (hypervisor %s): %s",
+ hname, err, exc=True)
+ return results
+
+
+def GetInstanceList(hypervisor_list, all_hvparams=None,
+ get_hv_fn=hypervisor.GetHypervisor):
"""Provides a list of instances.
@type hypervisor_list: list
@param hypervisor_list: the list of hypervisors to query information
+ @type all_hvparams: dict of dict of strings
+ @param all_hvparams: a dictionary mapping hypervisor types to respective
+ cluster-wide hypervisor parameters
+ @type get_hv_fn: function
+ @param get_hv_fn: function that returns a hypervisor for the given hypervisor
+ name; optional parameter to increase testability
@rtype: list
@return: a list of all running instances on the current node
"""
results = []
for hname in hypervisor_list:
- try:
- names = hypervisor.GetHypervisor(hname).ListInstances()
- results.extend(names)
- except errors.HypervisorError, err:
- _Fail("Error enumerating instances (hypervisor %s): %s",
- hname, err, exc=True)
-
+ hvparams = all_hvparams[hname]
+ results.extend(GetInstanceListForHypervisor(hname, hvparams=hvparams,
+ get_hv_fn=get_hv_fn))
return results
def GetInstanceMigratable(instance):
- """Gives whether an instance can be migrated.
+ """Computes whether an instance can be migrated.
@type instance: L{objects.Instance}
@param instance: object representing the instance to be checked.
"""
hyper = hypervisor.GetHypervisor(instance.hypervisor)
iname = instance.name
- if iname not in hyper.ListInstances():
+ if iname not in hyper.ListInstances(instance.hvparams):
_Fail("Instance %s is not running", iname)
for idx in range(len(instance.disks)):
" log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
-def _GetBlockDevSymlinkPath(instance_name, idx):
- return utils.PathJoin(pathutils.DISK_LINKS_DIR, "%s%s%d" %
- (instance_name, constants.DISK_SEPARATOR, idx))
+def _GetBlockDevSymlinkPath(instance_name, idx, _dir=None):
+ """Returns symlink path for block device.
+
+ """
+ if _dir is None:
+ _dir = pathutils.DISK_LINKS_DIR
+
+ return utils.PathJoin(_dir,
+ ("%s%s%s" %
+ (instance_name, constants.DISK_SEPARATOR, idx)))
def _SymlinkBlockDev(instance_name, device_path, idx):
return block_devices
-def StartInstance(instance, startup_paused):
+def StartInstance(instance, startup_paused, reason, store_reason=True):
"""Start an instance.
@type instance: L{objects.Instance}
@param instance: the instance object
@type startup_paused: bool
@param instance: pause instance at startup?
+ @type reason: list of reasons
+ @param reason: the reason trail for this startup
+ @type store_reason: boolean
+ @param store_reason: whether to store the shutdown reason trail on file
@rtype: None
"""
- running_instances = GetInstanceList([instance.hypervisor])
+ running_instances = GetInstanceListForHypervisor(instance.hypervisor,
+ instance.hvparams)
if instance.name in running_instances:
logging.info("Instance %s already running, not starting", instance.name)
block_devices = _GatherAndLinkBlockDevs(instance)
hyper = hypervisor.GetHypervisor(instance.hypervisor)
hyper.StartInstance(instance, block_devices, startup_paused)
+ if store_reason:
+ _StoreInstReasonTrail(instance.name, reason)
except errors.BlockDeviceError, err:
_Fail("Block device error: %s", err, exc=True)
except errors.HypervisorError, err:
_Fail("Hypervisor error: %s", err, exc=True)
-def InstanceShutdown(instance, timeout):
+def InstanceShutdown(instance, timeout, reason, store_reason=True):
"""Shut an instance down.
@note: this functions uses polling with a hardcoded timeout.
@param instance: the instance object
@type timeout: integer
@param timeout: maximum timeout for soft shutdown
+ @type reason: list of reasons
+ @param reason: the reason trail for this shutdown
+ @type store_reason: boolean
+ @param store_reason: whether to store the shutdown reason trail on file
@rtype: None
"""
hyper = hypervisor.GetHypervisor(hv_name)
iname = instance.name
- if instance.name not in hyper.ListInstances():
+ if instance.name not in hyper.ListInstances(instance.hvparams):
logging.info("Instance %s not running, doing nothing", iname)
return
self.tried_once = False
def __call__(self):
- if iname not in hyper.ListInstances():
+ if iname not in hyper.ListInstances(instance.hvparams):
return
try:
hyper.StopInstance(instance, retry=self.tried_once)
+ if store_reason:
+ _StoreInstReasonTrail(instance.name, reason)
except errors.HypervisorError, err:
- if iname not in hyper.ListInstances():
+ if iname not in hyper.ListInstances(instance.hvparams):
# if the instance is no longer existing, consider this a
# success and go to cleanup
return
try:
hyper.StopInstance(instance, force=True)
except errors.HypervisorError, err:
- if iname in hyper.ListInstances():
+ if iname in hyper.ListInstances(instance.hvparams):
# only raise an error if the instance still exists, otherwise
# the error could simply be "instance ... unknown"!
_Fail("Failed to force stop instance %s: %s", iname, err)
time.sleep(1)
- if iname in hyper.ListInstances():
+ if iname in hyper.ListInstances(instance.hvparams):
_Fail("Could not shutdown instance %s even by destroy", iname)
try:
_RemoveBlockDevLinks(iname, instance.disks)
-def InstanceReboot(instance, reboot_type, shutdown_timeout):
+def InstanceReboot(instance, reboot_type, shutdown_timeout, reason):
"""Reboot an instance.
@type instance: L{objects.Instance}
instance (instead of a call_instance_reboot RPC)
@type shutdown_timeout: integer
@param shutdown_timeout: maximum timeout for soft shutdown
+ @type reason: list of reasons
+ @param reason: the reason trail for this reboot
@rtype: None
"""
- running_instances = GetInstanceList([instance.hypervisor])
+ running_instances = GetInstanceListForHypervisor(instance.hypervisor,
+ instance.hvparams)
if instance.name not in running_instances:
_Fail("Cannot reboot instance %s that is not running", instance.name)
_Fail("Failed to soft reboot instance %s: %s", instance.name, err)
elif reboot_type == constants.INSTANCE_REBOOT_HARD:
try:
- InstanceShutdown(instance, shutdown_timeout)
- return StartInstance(instance, False)
+ InstanceShutdown(instance, shutdown_timeout, reason, store_reason=False)
+ result = StartInstance(instance, False, reason, store_reason=False)
+ _StoreInstReasonTrail(instance.name, reason)
+ return result
except errors.HypervisorError, err:
_Fail("Failed to hard reboot instance %s: %s", instance.name, err)
else:
"""
hyper = hypervisor.GetHypervisor(instance.hypervisor)
- running = hyper.ListInstances()
+ running = hyper.ListInstances(instance.hvparams)
if instance.name not in running:
logging.info("Instance %s is not running, cannot balloon", instance.name)
return
"""
try:
result = _RecursiveAssembleBD(disk, owner, as_primary)
- if isinstance(result, bdev.BlockDev):
+ if isinstance(result, BlockDev):
# pylint: disable=E1103
result = result.dev_path
if as_primary:
return rbd.GetSyncStatus()
-def BlockdevGetsize(disks):
+def BlockdevGetdimensions(disks):
"""Computes the size of the given disks.
If a disk is not found, returns None instead.
@param disks: the list of disk to compute the size for
@rtype: list
@return: list with elements None if the disk cannot be found,
- otherwise the size
+ otherwise the pair (size, spindles), where spindles is None if the
+ device doesn't support that
"""
result = []
if rbd is None:
result.append(None)
else:
- result.append(rbd.GetActualSize())
+ result.append(rbd.GetActualDimensions())
return result
result["NIC_%d_BRIDGE" % idx] = nic.nicparams[constants.NIC_LINK]
if nic.nicparams[constants.NIC_LINK]:
result["NIC_%d_LINK" % idx] = nic.nicparams[constants.NIC_LINK]
- if nic.network:
- result["NIC_%d_NETWORK" % idx] = nic.network
+ if nic.netinfo:
+ nobj = objects.Network.FromDict(nic.netinfo)
+ result.update(nobj.HooksDict("NIC_%d_" % idx))
if constants.HV_NIC_TYPE in instance.hvparams:
result["NIC_%d_FRONTEND_TYPE" % idx] = \
instance.hvparams[constants.HV_NIC_TYPE]
return result
+def DiagnoseExtStorage(top_dirs=None):
+ """Compute the validity for all ExtStorage Providers.
+
+ @type top_dirs: list
+ @param top_dirs: the list of directories in which to
+ search (if not given defaults to
+ L{pathutils.ES_SEARCH_PATH})
+ @rtype: list of L{objects.ExtStorage}
+ @return: a list of tuples (name, path, status, diagnose, parameters)
+ for all (potential) ExtStorage Providers under all
+ search paths, where:
+ - name is the (potential) ExtStorage Provider
+ - path is the full path to the ExtStorage Provider
+ - status True/False is the validity of the ExtStorage Provider
+ - diagnose is the error message for an invalid ExtStorage Provider,
+ otherwise empty
+ - parameters is a list of (name, help) parameters, if any
+
+ """
+ if top_dirs is None:
+ top_dirs = pathutils.ES_SEARCH_PATH
+
+ result = []
+ for dir_name in top_dirs:
+ if os.path.isdir(dir_name):
+ try:
+ f_names = utils.ListVisibleFiles(dir_name)
+ except EnvironmentError, err:
+ logging.exception("Can't list the ExtStorage directory %s: %s",
+ dir_name, err)
+ break
+ for name in f_names:
+ es_path = utils.PathJoin(dir_name, name)
+ status, es_inst = bdev.ExtStorageFromDisk(name, base_dir=dir_name)
+ if status:
+ diagnose = ""
+ parameters = es_inst.supported_parameters
+ else:
+ diagnose = es_inst
+ parameters = []
+ result.append((name, es_path, status, diagnose, parameters))
+
+ return result
+
+
def BlockdevGrow(disk, amount, dryrun, backingstore):
"""Grow a stack of block devices.
# Write and replace the file atomically
utils.WriteFile(file_name, data=_Decompress(content), uid=getents.masterd_uid,
- gid=getents.masterd_gid)
+ gid=getents.daemons_gid, mode=constants.JOB_QUEUE_FILES_PERMS)
def JobQueueRename(old, new):
getents = runtime.GetEnts()
- utils.RenameFile(old, new, mkdir=True, mkdir_mode=0700,
- dir_uid=getents.masterd_uid, dir_gid=getents.masterd_gid)
+ utils.RenameFile(old, new, mkdir=True, mkdir_mode=0750,
+ dir_uid=getents.masterd_uid, dir_gid=getents.daemons_gid)
def BlockdevClose(instance_name, disks):
"""
try:
- return bdev.BaseDRBD.GetUsermodeHelper()
+ return drbd.DRBD8.GetUsermodeHelper()
except errors.BlockDeviceError, err:
_Fail(str(err))
def _VerifyRestrictedCmdName(cmd):
- """Verifies a remote command name.
+ """Verifies a restricted command name.
@type cmd: string
@param cmd: Command name
def _CommonRestrictedCmdCheck(path, owner):
- """Common checks for remote command file system directories and files.
+ """Common checks for restricted command file system directories and files.
@type path: string
@param path: Path to check
def _VerifyRestrictedCmdDirectory(path, _owner=None):
- """Verifies remote command directory.
+ """Verifies restricted command directory.
@type path: string
@param path: Path to check
def _VerifyRestrictedCmd(path, cmd, _owner=None):
- """Verifies a whole remote command and returns its executable filename.
+ """Verifies a whole restricted command and returns its executable filename.
@type path: string
- @param path: Directory containing remote commands
+ @param path: Directory containing restricted commands
@type cmd: string
@param cmd: Command name
@rtype: tuple; (boolean, string)
_verify_dir=_VerifyRestrictedCmdDirectory,
_verify_name=_VerifyRestrictedCmdName,
_verify_cmd=_VerifyRestrictedCmd):
- """Performs a number of tests on a remote command.
+ """Performs a number of tests on a restricted command.
@type path: string
- @param path: Directory containing remote commands
+ @param path: Directory containing restricted commands
@type cmd: string
@param cmd: Command name
@return: Same as L{_VerifyRestrictedCmd}
_prepare_fn=_PrepareRestrictedCmd,
_runcmd_fn=utils.RunCmd,
_enabled=constants.ENABLE_RESTRICTED_COMMANDS):
- """Executes a remote command after performing strict tests.
+ """Executes a restricted command after performing strict tests.
@type cmd: string
@param cmd: Command name
@raise RPCFail: In case of an error
"""
- logging.info("Preparing to run remote command '%s'", cmd)
+ logging.info("Preparing to run restricted command '%s'", cmd)
if not _enabled:
- _Fail("Remote commands disabled at configure time")
+ _Fail("Restricted commands disabled at configure time")
lock = None
try:
# Do not include original error message in returned error
_Fail("Executing command '%s' failed" % cmd)
elif cmdresult.failed or cmdresult.fail_reason:
- _Fail("Remote command '%s' failed: %s; output: %s",
+ _Fail("Restricted command '%s' failed: %s; output: %s",
cmd, cmdresult.fail_reason, cmdresult.output)
else:
return cmdresult.output