from ganeti import query
from ganeti import utils
-from ganeti.cmdlib.base import _QueryBase, NoHooksLU, LogicalUnit
-from ganeti.cmdlib.common import _GetWantedNodes, _ShareAll, \
- _CheckNodeOnline, _ExpandNodeName
-from ganeti.cmdlib.instance_storage import _StartInstanceDisks, \
- _ShutdownInstanceDisks
-from ganeti.cmdlib.instance_utils import _GetClusterDomainSecret, \
- _BuildInstanceHookEnvByObject, _CheckNodeNotDrained, _RemoveInstance
+from ganeti.cmdlib.base import QueryBase, NoHooksLU, LogicalUnit
+from ganeti.cmdlib.common import GetWantedNodes, ShareAll, CheckNodeOnline, \
+ ExpandNodeName
+from ganeti.cmdlib.instance_storage import StartInstanceDisks, \
+ ShutdownInstanceDisks
+from ganeti.cmdlib.instance_utils import GetClusterDomainSecret, \
+ BuildInstanceHookEnvByObject, CheckNodeNotDrained, RemoveInstance
-class _ExportQuery(_QueryBase):
+class ExportQuery(QueryBase):
FIELDS = query.EXPORT_FIELDS
#: The node name is not a unique key for this query
# The following variables interact with _QueryBase._GetNames
if self.names:
- self.wanted = _GetWantedNodes(lu, self.names)
+ self.wanted = GetWantedNodes(lu, self.names)
else:
self.wanted = locking.ALL_SET
self.do_locking = self.use_locking
if self.do_locking:
- lu.share_locks = _ShareAll()
+ lu.share_locks = ShareAll()
lu.needed_locks = {
locking.LEVEL_NODE: self.wanted,
}
REQ_BGL = False
def CheckArguments(self):
- self.expq = _ExportQuery(qlang.MakeSimpleFilter("node", self.op.nodes),
- ["node", "export"], self.op.use_locking)
+ self.expq = ExportQuery(qlang.MakeSimpleFilter("node", self.op.nodes),
+ ["node", "export"], self.op.use_locking)
def ExpandNames(self):
self.expq.ExpandNames(self)
self.instance = self.cfg.GetInstanceInfo(instance_name)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
- _CheckNodeOnline(self, self.instance.primary_node)
+ CheckNodeOnline(self, self.instance.primary_node)
- self._cds = _GetClusterDomainSecret()
+ self._cds = GetClusterDomainSecret()
def Exec(self, feedback_fn):
"""Prepares an instance for an export.
"REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
}
- env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+ env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
self.instance = self.cfg.GetInstanceInfo(instance_name)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
- _CheckNodeOnline(self, self.instance.primary_node)
+ CheckNodeOnline(self, self.instance.primary_node)
if (self.op.remove_instance and
self.instance.admin_state == constants.ADMINST_UP and
" down before", errors.ECODE_STATE)
if self.op.mode == constants.EXPORT_MODE_LOCAL:
- self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
+ self.op.target_node = ExpandNodeName(self.cfg, self.op.target_node)
self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
assert self.dst_node is not None
- _CheckNodeOnline(self, self.dst_node.name)
- _CheckNodeNotDrained(self, self.dst_node.name)
+ CheckNodeOnline(self, self.dst_node.name)
+ CheckNodeNotDrained(self, self.dst_node.name)
self._cds = None
self.dest_disk_info = None
len(self.instance.disks)),
errors.ECODE_INVAL)
- cds = _GetClusterDomainSecret()
+ cds = GetClusterDomainSecret()
# Check X509 key name
try:
if activate_disks:
# Activate the instance disks if we'exporting a stopped instance
feedback_fn("Activating disks for %s" % instance.name)
- _StartInstanceDisks(self, instance, None)
+ StartInstanceDisks(self, instance, None)
try:
helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
msg = result.fail_msg
if msg:
feedback_fn("Failed to start instance: %s" % msg)
- _ShutdownInstanceDisks(self, instance)
+ ShutdownInstanceDisks(self, instance)
raise errors.OpExecError("Could not start instance: %s" % msg)
if self.op.mode == constants.EXPORT_MODE_LOCAL:
finally:
if activate_disks:
feedback_fn("Deactivating disks for %s" % instance.name)
- _ShutdownInstanceDisks(self, instance)
+ ShutdownInstanceDisks(self, instance)
if not (compat.all(dresults) and fin_resu):
failures = []
# Remove instance if requested
if self.op.remove_instance:
feedback_fn("Removing instance %s" % instance.name)
- _RemoveInstance(self, feedback_fn, instance,
- self.op.ignore_remove_failures)
+ RemoveInstance(self, feedback_fn, instance,
+ self.op.ignore_remove_failures)
if self.op.mode == constants.EXPORT_MODE_LOCAL:
self._CleanupExports(feedback_fn)
from ganeti import locking
from ganeti import query
from ganeti import utils
-from ganeti.cmdlib.common import _ExpandInstanceName
+from ganeti.cmdlib.common import ExpandInstanceName
class ResultWithJobs:
else:
assert locking.LEVEL_INSTANCE not in self.needed_locks, \
"_ExpandAndLockInstance called with instance-level locks set"
- self.op.instance_name = _ExpandInstanceName(self.cfg,
- self.op.instance_name)
+ self.op.instance_name = ExpandInstanceName(self.cfg,
+ self.op.instance_name)
self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
def _LockInstancesNodes(self, primary_only=False,
raise NotImplementedError
-class _QueryBase:
+class QueryBase:
"""Base for query utility classes.
"""
from ganeti import utils
from ganeti import vcluster
-from ganeti.cmdlib.base import NoHooksLU, _QueryBase, LogicalUnit, \
+from ganeti.cmdlib.base import NoHooksLU, QueryBase, LogicalUnit, \
ResultWithJobs
-from ganeti.cmdlib.common import _ShareAll, _RunPostHook, \
- _ComputeAncillaryFiles, _RedistributeAncillaryFiles, _UploadHelper, \
- _GetWantedInstances, _MergeAndVerifyHvState, _MergeAndVerifyDiskState, \
- _GetUpdatedIPolicy, _ComputeNewInstanceViolations, _GetUpdatedParams, \
- _CheckOSParams, _CheckHVParams, _AdjustCandidatePool, _CheckNodePVs, \
- _ComputeIPolicyInstanceViolation, _AnnotateDiskParams, \
- _SupportsOob
+from ganeti.cmdlib.common import ShareAll, RunPostHook, \
+ ComputeAncillaryFiles, RedistributeAncillaryFiles, UploadHelper, \
+ GetWantedInstances, MergeAndVerifyHvState, MergeAndVerifyDiskState, \
+ GetUpdatedIPolicy, ComputeNewInstanceViolations, GetUpdatedParams, \
+ CheckOSParams, CheckHVParams, AdjustCandidatePool, CheckNodePVs, \
+ ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob
import ganeti.masterd.instance
REQ_BGL = False
def CheckArguments(self):
- self.cq = _ClusterQuery(None, self.op.output_fields, False)
+ self.cq = ClusterQuery(None, self.op.output_fields, False)
def ExpandNames(self):
self.cq.ExpandNames(self)
master_params = self.cfg.GetMasterNetworkParameters()
# Run post hooks on master node before it's removed
- _RunPostHook(self, master_params.name)
+ RunPostHook(self, master_params.name)
ems = self.cfg.GetUseExternalMipScript()
result = self.rpc.call_node_deactivate_master_ip(master_params.name,
return True
-class _ClusterQuery(_QueryBase):
+class ClusterQuery(QueryBase):
FIELDS = query.CLUSTER_FIELDS
#: Do not sort (there is only one item)
locking.LEVEL_NODE: locking.ALL_SET,
locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
}
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
def Exec(self, feedback_fn):
"""Redistribute the configuration.
"""
self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
- _RedistributeAncillaryFiles(self)
+ RedistributeAncillaryFiles(self)
class LUClusterRename(LogicalUnit):
node_list.remove(master_params.name)
except ValueError:
pass
- _UploadHelper(self, node_list, pathutils.SSH_KNOWN_HOSTS_FILE)
+ UploadHelper(self, node_list, pathutils.SSH_KNOWN_HOSTS_FILE)
finally:
master_params.ip = new_ip
result = self.rpc.call_node_activate_master_ip(master_params.name,
def ExpandNames(self):
if self.op.instances:
- self.wanted_names = _GetWantedInstances(self, self.op.instances)
+ self.wanted_names = GetWantedInstances(self, self.op.instances)
# Not getting the node allocation lock as only a specific set of
# instances (and their nodes) is going to be acquired
self.needed_locks = {
locking.LEVEL_NODEGROUP: locking.ALL_SET,
locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
}
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
def BuildHooksEnv(self):
"""Build hooks env.
constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
if self.op.hv_state:
- new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
- self.cluster.hv_state_static)
+ new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
+ self.cluster.hv_state_static)
self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values))
for hv, values in new_hv_state.items())
if self.op.disk_state:
- new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state,
- self.cluster.disk_state_static)
+ new_disk_state = MergeAndVerifyDiskState(self.op.disk_state,
+ self.cluster.disk_state_static)
self.new_disk_state = \
dict((storage, dict((name, cluster.SimpleFillDiskState(values))
for name, values in svalues.items()))
for storage, svalues in new_disk_state.items())
if self.op.ipolicy:
- self.new_ipolicy = _GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
- group_policy=False)
+ self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
+ group_policy=False)
all_instances = self.cfg.GetAllInstancesInfo().values()
violations = set()
for node in inst.all_nodes)])
new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
- new = _ComputeNewInstanceViolations(ipol,
- new_ipolicy, instances, self.cfg)
+ new = ComputeNewInstanceViolations(ipol,
+ new_ipolicy, instances, self.cfg)
if new:
violations.update(new)
if os_name not in self.new_osp:
self.new_osp[os_name] = {}
- self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
- use_none=True)
+ self.new_osp[os_name] = GetUpdatedParams(self.new_osp[os_name], osp,
+ use_none=True)
if not self.new_osp[os_name]:
# we removed all parameters
del self.new_osp[os_name]
else:
# check the parameter validity (remote check)
- _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
- os_name, self.new_osp[os_name])
+ CheckOSParams(self, False, [self.cfg.GetMasterNode()],
+ os_name, self.new_osp[os_name])
# changes to the hypervisor list
if self.op.enabled_hypervisors is not None:
hv_class = hypervisor.GetHypervisorClass(hv_name)
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
hv_class.CheckParameterSyntax(hv_params)
- _CheckHVParams(self, node_list, hv_name, hv_params)
+ CheckHVParams(self, node_list, hv_name, hv_params)
self._CheckDiskTemplateConsistency()
new_osp = objects.FillDict(cluster_defaults, hv_params)
hv_class = hypervisor.GetHypervisorClass(hv_name)
hv_class.CheckParameterSyntax(new_osp)
- _CheckHVParams(self, node_list, hv_name, new_osp)
+ CheckHVParams(self, node_list, hv_name, new_osp)
if self.op.default_iallocator:
alloc_script = utils.FindFile(self.op.default_iallocator,
if self.op.candidate_pool_size is not None:
self.cluster.candidate_pool_size = self.op.candidate_pool_size
# we need to update the pool size here, otherwise the save will fail
- _AdjustCandidatePool(self, [])
+ AdjustCandidatePool(self, [])
if self.op.maintain_node_health is not None:
if self.op.maintain_node_health and not constants.ENABLE_CONFD:
def ExpandNames(self):
self.needed_locks = dict.fromkeys(locking.LEVELS, locking.ALL_SET)
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
def CheckPrereq(self):
"""Check prerequisites.
locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
}
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
_ErrorIf(vgstatus, constants.CV_ENODELVM, node, vgstatus)
# Check PVs
- (errmsgs, pvminmax) = _CheckNodePVs(nresult, self._exclusive_storage)
+ (errmsgs, pvminmax) = CheckNodePVs(nresult, self._exclusive_storage)
for em in errmsgs:
self._Error(constants.CV_ENODELVM, node, em)
if pvminmax is not None:
cluster = self.cfg.GetClusterInfo()
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
self.group_info)
- err = _ComputeIPolicyInstanceViolation(ipolicy, inst_config, self.cfg)
+ err = ComputeIPolicyInstanceViolation(ipolicy, inst_config, self.cfg)
_ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err),
code=self.ETYPE_WARNING)
# _AnnotateDiskParams makes already copies of the disks
devonly = []
for (inst, dev) in disks:
- (anno_disk,) = _AnnotateDiskParams(instanceinfo[inst], [dev], self.cfg)
+ (anno_disk,) = AnnotateDiskParams(instanceinfo[inst], [dev], self.cfg)
self.cfg.SetDiskID(anno_disk, nname)
devonly.append(anno_disk)
# FIXME: verify OS list
# File verification
- filemap = _ComputeAncillaryFiles(cluster, False)
+ filemap = ComputeAncillaryFiles(cluster, False)
# do local checksums
master_node = self.master_node = self.cfg.GetMasterNode()
# Gather OOB paths
oob_paths = []
for node in self.all_node_info.values():
- path = _SupportsOob(self.cfg, node)
+ path = SupportsOob(self.cfg, node)
if path and path not in oob_paths:
oob_paths.append(path)
REQ_BGL = False
def ExpandNames(self):
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
self.needed_locks = {
locking.LEVEL_NODEGROUP: locking.ALL_SET,
}
return full_name
-def _ExpandInstanceName(cfg, name):
+def ExpandInstanceName(cfg, name):
"""Wrapper over L{_ExpandItemName} for instance."""
return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
-def _ExpandNodeName(cfg, name):
+def ExpandNodeName(cfg, name):
"""Wrapper over L{_ExpandItemName} for nodes."""
return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
-def _ShareAll():
+def ShareAll():
"""Returns a dict declaring all lock levels shared.
"""
return dict.fromkeys(locking.LEVELS, 1)
-def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
+def CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
"""Checks if the instances in a node group are still correct.
@type cfg: L{config.ConfigWriter}
return wanted_instances
-def _GetWantedNodes(lu, nodes):
+def GetWantedNodes(lu, nodes):
"""Returns list of checked and expanded node names.
@type lu: L{LogicalUnit}
"""
if nodes:
- return [_ExpandNodeName(lu.cfg, name) for name in nodes]
+ return [ExpandNodeName(lu.cfg, name) for name in nodes]
return utils.NiceSort(lu.cfg.GetNodeList())
-def _GetWantedInstances(lu, instances):
+def GetWantedInstances(lu, instances):
"""Returns list of checked and expanded instance names.
@type lu: L{LogicalUnit}
"""
if instances:
- wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
+ wanted = [ExpandInstanceName(lu.cfg, name) for name in instances]
else:
wanted = utils.NiceSort(lu.cfg.GetInstanceList())
return wanted
-def _RunPostHook(lu, node_name):
+def RunPostHook(lu, node_name):
"""Runs the post-hook for an opcode on a single node.
"""
node_name, err)
-def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
+def RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
"""Distribute additional files which are part of the cluster configuration.
ConfigWriter takes care of distributing the config and ssconf files, but
# Gather file lists
(files_all, _, files_mc, files_vm) = \
- _ComputeAncillaryFiles(cluster, True)
+ ComputeAncillaryFiles(cluster, True)
# Never re-distribute configuration file from here
assert not (pathutils.CLUSTER_CONF_FILE in files_all or
# Upload the files
for (node_list, files) in filemap:
for fname in files:
- _UploadHelper(lu, node_list, fname)
+ UploadHelper(lu, node_list, fname)
-def _ComputeAncillaryFiles(cluster, redist):
+def ComputeAncillaryFiles(cluster, redist):
"""Compute files external to Ganeti which need to be consistent.
@type redist: boolean
return (files_all, files_opt, files_mc, files_vm)
-def _UploadHelper(lu, nodes, fname):
+def UploadHelper(lu, nodes, fname):
"""Helper for uploading a file and showing warnings.
"""
lu.LogWarning(msg)
-def _MergeAndVerifyHvState(op_input, obj_input):
+def MergeAndVerifyHvState(op_input, obj_input):
"""Combines the hv state from an opcode with the one of the object
@param op_input: The input dict from the opcode
return None
-def _MergeAndVerifyDiskState(op_input, obj_input):
+def MergeAndVerifyDiskState(op_input, obj_input):
"""Combines the disk state from an opcode with the one of the object
@param op_input: The input dict from the opcode
return None
-def _CheckOSParams(lu, required, nodenames, osname, osparams):
+def CheckOSParams(lu, required, nodenames, osname, osparams):
"""OS parameters validation.
@type lu: L{LogicalUnit}
osname, node)
-def _CheckHVParams(lu, nodenames, hvname, hvparams):
+def CheckHVParams(lu, nodenames, hvname, hvparams):
"""Hypervisor parameter validation.
This function abstract the hypervisor parameter validation to be
info.Raise("Hypervisor parameter validation failed on node %s" % node)
-def _AdjustCandidatePool(lu, exceptions):
+def AdjustCandidatePool(lu, exceptions):
"""Adjust the candidate pool after node operations.
"""
(mc_now, mc_max))
-def _CheckNodePVs(nresult, exclusive_storage):
+def CheckNodePVs(nresult, exclusive_storage):
"""Check node PVs.
"""
return None
-def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
- nic_count, disk_sizes, spindle_use,
- disk_template,
- _compute_fn=_ComputeMinMaxSpec):
+def ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
+ nic_count, disk_sizes, spindle_use,
+ disk_template,
+ _compute_fn=_ComputeMinMaxSpec):
"""Verifies ipolicy against provided specs.
@type ipolicy: dict
return ret + min_errs
-def _ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
- _compute_fn=_ComputeIPolicySpecViolation):
+def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
+ _compute_fn=ComputeIPolicySpecViolation):
"""Compute if instance meets the specs of ipolicy.
@type ipolicy: dict
@type cfg: L{config.ConfigWriter}
@param cfg: Cluster configuration
@param _compute_fn: The function to verify ipolicy (unittest only)
- @see: L{_ComputeIPolicySpecViolation}
+ @see: L{ComputeIPolicySpecViolation}
"""
be_full = cfg.GetClusterInfo().FillBE(instance)
"""
return frozenset([inst.name for inst in instances
- if _ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
+ if ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
-def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
+def ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
"""Computes a set of any instances that would violate the new ipolicy.
@param old_ipolicy: The current (still in-place) ipolicy
_ComputeViolatingInstances(old_ipolicy, instances, cfg))
-def _GetUpdatedParams(old_params, update_dict,
+def GetUpdatedParams(old_params, update_dict,
use_default=True, use_none=False):
"""Return the new version of a parameter dictionary.
return params_copy
-def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
+def GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
"""Return the new version of an instance policy.
@param group_policy: whether this policy applies to a group and thus
if group_policy:
msg = "%s cannot appear in group instance specs" % key
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
- ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
- use_none=False, use_default=False)
+ ipolicy[key] = GetUpdatedParams(old_ipolicy.get(key, {}), value,
+ use_none=False, use_default=False)
utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
else:
# FIXME: we assume all others are lists; this should be redone
return ipolicy
-def _AnnotateDiskParams(instance, devs, cfg):
+def AnnotateDiskParams(instance, devs, cfg):
"""Little helper wrapper to the rpc annotation method.
@param instance: The instance object
cfg.GetInstanceDiskParams(instance))
-def _SupportsOob(cfg, node):
+def SupportsOob(cfg, node):
"""Tells if node supports OOB.
@type cfg: L{config.ConfigWriter}
"""
def fn(old, value):
- new = _GetUpdatedParams(old, value)
+ new = GetUpdatedParams(old, value)
utils.ForceDictType(new, type_check)
return new
return [name for name in nodenames if name not in vm_nodes]
-def _GetDefaultIAllocator(cfg, ialloc):
+def GetDefaultIAllocator(cfg, ialloc):
"""Decides on which iallocator to use.
@type cfg: L{config.ConfigWriter}
return ialloc
-def _CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
- cur_group_uuid):
+def CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
+ cur_group_uuid):
"""Checks if node groups for locked instances are still correct.
@type cfg: L{config.ConfigWriter}
assert owned_nodes.issuperset(inst.all_nodes), \
"Instance %s's nodes changed while we kept the lock" % name
- inst_groups = _CheckInstanceNodeGroups(cfg, name, owned_groups)
+ inst_groups = CheckInstanceNodeGroups(cfg, name, owned_groups)
assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
"Instance %s has no node in group %s" % (name, cur_group_uuid)
-def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups,
- primary_only=False):
+def CheckInstanceNodeGroups(cfg, instance_name, owned_groups,
+ primary_only=False):
"""Checks if the owned node groups are still correct for an instance.
@type cfg: L{config.ConfigWriter}
return inst_groups
-def _LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
+def LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
"""Unpacks the result of change-group and node-evacuate iallocator requests.
Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
return op
-def _MapInstanceDisksToNodes(instances):
+def MapInstanceDisksToNodes(instances):
"""Creates a map from (node, volume) to instance name.
@type instances: list of L{objects.Instance}
for vol in vols)
-def _CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels):
+def CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels):
"""Make sure that none of the given paramters is global.
If a global parameter is found, an L{errors.OpPrereqError} exception is
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
-def _IsExclusiveStorageEnabledNode(cfg, node):
+def IsExclusiveStorageEnabledNode(cfg, node):
"""Whether exclusive_storage is in effect for the given node.
@type cfg: L{config.ConfigWriter}
return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE]
-def _CheckInstanceState(lu, instance, req_states, msg=None):
+def CheckInstanceState(lu, instance, req_states, msg=None):
"""Ensure that an instance is in one of the required states.
@param lu: the LU on behalf of which we make the check
" is down")
-def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
+def CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
"""Check the sanity of iallocator and node arguments and use the
cluster-wide iallocator if appropriate.
" iallocator", errors.ECODE_INVAL)
-def _FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_name, prereq):
+def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_name, prereq):
faulty = []
for dev in instance.disks:
return faulty
-def _CheckNodeOnline(lu, node, msg=None):
+def CheckNodeOnline(lu, node, msg=None):
"""Ensure that a given node is online.
@param lu: the LU on behalf of which we make the check
from ganeti import query
from ganeti import utils
from ganeti.masterd import iallocator
-from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, _QueryBase, \
+from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase, \
ResultWithJobs
-from ganeti.cmdlib.common import _MergeAndVerifyHvState, \
- _MergeAndVerifyDiskState, _GetWantedNodes, _GetUpdatedParams, \
- _CheckNodeGroupInstances, _GetUpdatedIPolicy, \
- _ComputeNewInstanceViolations, _GetDefaultIAllocator, _ShareAll, \
- _CheckInstancesNodeGroups, _LoadNodeEvacResult, _MapInstanceDisksToNodes
+from ganeti.cmdlib.common import MergeAndVerifyHvState, \
+ MergeAndVerifyDiskState, GetWantedNodes, GetUpdatedParams, \
+ CheckNodeGroupInstances, GetUpdatedIPolicy, \
+ ComputeNewInstanceViolations, GetDefaultIAllocator, ShareAll, \
+ CheckInstancesNodeGroups, LoadNodeEvacResult, MapInstanceDisksToNodes
import ganeti.masterd.instance
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
if self.op.hv_state:
- self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
+ self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, None)
else:
self.new_hv_state = None
if self.op.disk_state:
- self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
+ self.new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, None)
else:
self.new_disk_state = None
def ExpandNames(self):
# These raise errors.OpPrereqError on their own:
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
- self.op.nodes = _GetWantedNodes(self, self.op.nodes)
+ self.op.nodes = GetWantedNodes(self, self.op.nodes)
# We want to lock all the affected nodes and groups. We have readily
# available the list of nodes, and the *destination* group. To gather the
list(previously_split_instances & all_split_instances))
-class _GroupQuery(_QueryBase):
+class GroupQuery(QueryBase):
FIELDS = query.GROUP_FIELDS
def ExpandNames(self, lu):
REQ_BGL = False
def CheckArguments(self):
- self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
+ self.gq = GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
self.op.output_fields, False)
def ExpandNames(self):
"""Updates and verifies disk parameters.
"""
- new_params = _GetUpdatedParams(old, new)
+ new_params = GetUpdatedParams(old, new)
utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
return new_params
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
# Check if locked instances are still correct
- _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+ CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
self.group = self.cfg.GetNodeGroup(self.group_uuid)
cluster = self.cfg.GetClusterInfo()
(self.op.group_name, self.group_uuid))
if self.op.ndparams:
- new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
+ new_ndparams = GetUpdatedParams(self.group.ndparams, self.op.ndparams)
utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
self.new_ndparams = new_ndparams
errors.ECODE_INVAL)
if self.op.hv_state:
- self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
- self.group.hv_state_static)
+ self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
+ self.group.hv_state_static)
if self.op.disk_state:
self.new_disk_state = \
- _MergeAndVerifyDiskState(self.op.disk_state,
- self.group.disk_state_static)
+ MergeAndVerifyDiskState(self.op.disk_state,
+ self.group.disk_state_static)
if self.op.ipolicy:
- self.new_ipolicy = _GetUpdatedIPolicy(self.group.ipolicy,
- self.op.ipolicy,
- group_policy=True)
+ self.new_ipolicy = GetUpdatedIPolicy(self.group.ipolicy,
+ self.op.ipolicy,
+ group_policy=True)
new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
inst_filter = lambda inst: inst.name in owned_instances
instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
gmi = ganeti.masterd.instance
violations = \
- _ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
- self.group),
- new_ipolicy, instances, self.cfg)
+ ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
+ self.group),
+ new_ipolicy, instances, self.cfg)
if violations:
self.LogWarning("After the ipolicy change the following instances"
utils.CommaJoin(self.req_target_uuids)),
errors.ECODE_INVAL)
- self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
+ self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
self.needed_locks = {
locking.LEVEL_INSTANCE: [],
locking.LEVEL_NODEGROUP: [],
assert self.group_uuid in owned_groups
# Check if locked instances are still correct
- _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+ CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
# Get instance information
self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
# Check if node groups for locked instances are still correct
- _CheckInstancesNodeGroups(self.cfg, self.instances,
- owned_groups, owned_nodes, self.group_uuid)
+ CheckInstancesNodeGroups(self.cfg, self.instances,
+ owned_groups, owned_nodes, self.group_uuid)
if self.req_target_uuids:
# User requested specific target groups
(self.op.iallocator, ial.info),
errors.ECODE_NORES)
- jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
+ jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
len(jobs), self.op.group_name)
# Raises errors.OpPrereqError on its own if group can't be found
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
self.needed_locks = {
locking.LEVEL_INSTANCE: [],
locking.LEVEL_NODEGROUP: [],
assert self.group_uuid in owned_groups
# Check if locked instances are still correct
- _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+ CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
# Get instance information
self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
# Check if node groups for locked instances are still correct
- _CheckInstancesNodeGroups(self.cfg, self.instances,
- owned_groups, owned_nodes, self.group_uuid)
+ CheckInstancesNodeGroups(self.cfg, self.instances,
+ owned_groups, owned_nodes, self.group_uuid)
def Exec(self, feedback_fn):
"""Verify integrity of cluster disks.
res_instances = set()
res_missing = {}
- nv_dict = _MapInstanceDisksToNodes(
+ nv_dict = MapInstanceDisksToNodes(
[inst for inst in self.instances.values()
if inst.admin_state == constants.ADMINST_UP])
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
from ganeti.cmdlib.common import INSTANCE_DOWN, \
- INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, _CheckNodeOnline, \
- _ShareAll, _GetDefaultIAllocator, _CheckInstanceNodeGroups, \
- _LoadNodeEvacResult, _CheckIAllocatorOrNode, _CheckParamsNotGlobal, \
- _IsExclusiveStorageEnabledNode, _CheckHVParams, _CheckOSParams, \
- _AnnotateDiskParams, _GetUpdatedParams, _ExpandInstanceName, \
- _ComputeIPolicySpecViolation, _CheckInstanceState, _ExpandNodeName
-from ganeti.cmdlib.instance_storage import _CreateDisks, \
- _CheckNodesFreeDiskPerVG, _WipeDisks, _WaitForSync, \
- _IsExclusiveStorageEnabledNodeName, _CreateSingleBlockDev, _ComputeDisks, \
- _CheckRADOSFreeSpace, _ComputeDiskSizePerVG, _GenerateDiskTemplate, \
- _CreateBlockDev, _StartInstanceDisks, _ShutdownInstanceDisks, \
- _AssembleInstanceDisks
-from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \
- _GetClusterDomainSecret, _BuildInstanceHookEnv, _NICListToTuple, \
- _NICToTuple, _CheckNodeNotDrained, _RemoveInstance, _CopyLockList, \
- _ReleaseLocks, _CheckNodeVmCapable, _CheckTargetNodeIPolicy, \
- _GetInstanceInfoText, _RemoveDisks, _CheckNodeFreeMemory, \
- _CheckInstanceBridgesExist, _CheckNicsBridgesExist, _CheckNodeHasOS
+ INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
+ ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
+ LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
+ IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
+ AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
+ ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
+from ganeti.cmdlib.instance_storage import CreateDisks, \
+ CheckNodesFreeDiskPerVG, WipeDisks, WaitForSync, \
+ IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
+ CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
+ CreateBlockDev, StartInstanceDisks, ShutdownInstanceDisks, \
+ AssembleInstanceDisks
+from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
+ GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
+ NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
+ ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
+ GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
+ CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
import ganeti.masterd.instance
-#: Type description for changes as returned by L{ApplyContainerMods}'s
+#: Type description for changes as returned by L{_ApplyContainerMods}'s
#: callbacks
_TApplyContModsCbChanges = \
ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
def _ComputeIPolicyInstanceSpecViolation(
ipolicy, instance_spec, disk_template,
- _compute_fn=_ComputeIPolicySpecViolation):
+ _compute_fn=ComputeIPolicySpecViolation):
"""Compute if instance specs meets the specs of ipolicy.
@type ipolicy: dict
@type disk_template: string
@param disk_template: the disk template of the instance
@param _compute_fn: The function to verify ipolicy (unittest only)
- @see: L{_ComputeIPolicySpecViolation}
+ @see: L{ComputeIPolicySpecViolation}
"""
mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
opcodes.RequireSharedFileStorage()
### Node/iallocator related checks
- _CheckIAllocatorOrNode(self, "iallocator", "pnode")
+ CheckIAllocatorOrNode(self, "iallocator", "pnode")
if self.op.pnode is not None:
if self.op.disk_template in constants.DTS_INT_MIRROR:
_CheckOpportunisticLocking(self.op)
- self._cds = _GetClusterDomainSecret()
+ self._cds = GetClusterDomainSecret()
if self.op.mode == constants.INSTANCE_IMPORT:
# On import force_variant must be True, because if we forced it at
self.opportunistic_locks[locking.LEVEL_NODE] = True
self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
else:
- self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
+ self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
nodelist = [self.op.pnode]
if self.op.snode is not None:
- self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
+ self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
nodelist.append(self.op.snode)
self.needed_locks[locking.LEVEL_NODE] = nodelist
" requires a source node option",
errors.ECODE_INVAL)
else:
- self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
+ self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
self.needed_locks[locking.LEVEL_NODE].append(src_node)
if not os.path.isabs(src_path):
utils.PathJoin(pathutils.EXPORT_DIR, src_path)
self.needed_locks[locking.LEVEL_NODE_RES] = \
- _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+ CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def _RunAllocator(self):
"""Run the allocator based on input opcode.
env["SRC_PATH"] = self.op.src_path
env["SRC_IMAGES"] = self.src_images
- env.update(_BuildInstanceHookEnv(
+ env.update(BuildInstanceHookEnv(
name=self.op.instance_name,
primary_node=self.op.pnode,
secondary_nodes=self.secondaries,
minmem=self.be_full[constants.BE_MINMEM],
maxmem=self.be_full[constants.BE_MAXMEM],
vcpus=self.be_full[constants.BE_VCPUS],
- nics=_NICListToTuple(self, self.nics),
+ nics=NICListToTuple(self, self.nics),
disk_template=self.op.disk_template,
disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
d[constants.IDISK_MODE]) for d in self.disks],
raise errors.OpPrereqError("No export found for relative path %s" %
src_path, errors.ECODE_INVAL)
- _CheckNodeOnline(self, src_node)
+ CheckNodeOnline(self, src_node)
result = self.rpc.call_export_info(src_node, src_path)
result.Raise("No export or invalid export found in dir %s" % src_path)
hv_type.CheckParameterSyntax(filled_hvp)
self.hv_full = filled_hvp
# check that we don't specify global parameters on an instance
- _CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
- "instance", "cluster")
+ CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
+ "instance", "cluster")
# fill and remember the beparams dict
self.be_full = _ComputeFullBeParams(self.op, cluster)
# disk checks/pre-build
default_vg = self.cfg.GetVGName()
- self.disks = _ComputeDisks(self.op, default_vg)
+ self.disks = ComputeDisks(self.op, default_vg)
if self.op.mode == constants.INSTANCE_IMPORT:
disk_images = []
# Release all unneeded node locks
keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
- _ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
- _ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
- _ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
+ ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
+ ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
+ ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
assert (self.owned_locks(locking.LEVEL_NODE) ==
self.owned_locks(locking.LEVEL_NODE_RES)), \
if self.op.snode == pnode.name:
raise errors.OpPrereqError("The secondary node cannot be the"
" primary node", errors.ECODE_INVAL)
- _CheckNodeOnline(self, self.op.snode)
- _CheckNodeNotDrained(self, self.op.snode)
- _CheckNodeVmCapable(self, self.op.snode)
+ CheckNodeOnline(self, self.op.snode)
+ CheckNodeNotDrained(self, self.op.snode)
+ CheckNodeVmCapable(self, self.op.snode)
self.secondaries.append(self.op.snode)
snode = self.cfg.GetNodeInfo(self.op.snode)
nodes = [pnode]
if self.op.disk_template in constants.DTS_INT_MIRROR:
nodes.append(snode)
- has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
+ has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
if compat.any(map(has_es, nodes)):
raise errors.OpPrereqError("Disk template %s not supported with"
" exclusive storage" % self.op.disk_template,
# _CheckRADOSFreeSpace() is just a placeholder.
# Any function that checks prerequisites can be placed here.
# Check if there is enough space on the RADOS cluster.
- _CheckRADOSFreeSpace()
+ CheckRADOSFreeSpace()
elif self.op.disk_template == constants.DT_EXT:
# FIXME: Function that checks prereqs if needed
pass
else:
# Check lv size requirements, if not adopting
- req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
- _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
+ req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
+ CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
(pnode.group, group_info.name, utils.CommaJoin(res)))
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
- _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
+ CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
- _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
+ CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
# check OS parameters (remotely)
- _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
+ CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
- _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
+ CheckNicsBridgesExist(self, self.nics, self.pnode.name)
#TODO: _CheckExtParams (remotely)
# Check parameters for extstorage
# memory check on primary node
#TODO(dynmem): use MINMEM for checking
if self.op.start:
- _CheckNodeFreeMemory(self, self.pnode.name,
- "creating instance %s" % self.op.instance_name,
- self.be_full[constants.BE_MAXMEM],
- self.op.hypervisor)
+ CheckNodeFreeMemory(self, self.pnode.name,
+ "creating instance %s" % self.op.instance_name,
+ self.be_full[constants.BE_MAXMEM],
+ self.op.hypervisor)
self.dry_run_result = list(nodenames)
# has no disks yet (we are generating them right here).
node = self.cfg.GetNodeInfo(pnode_name)
nodegroup = self.cfg.GetNodeGroup(node.group)
- disks = _GenerateDiskTemplate(self,
- self.op.disk_template,
- instance, pnode_name,
- self.secondaries,
- self.disks,
- self.instance_file_storage_dir,
- self.op.file_driver,
- 0,
- feedback_fn,
- self.cfg.GetGroupDiskParams(nodegroup))
+ disks = GenerateDiskTemplate(self,
+ self.op.disk_template,
+ instance, pnode_name,
+ self.secondaries,
+ self.disks,
+ self.instance_file_storage_dir,
+ self.op.file_driver,
+ 0,
+ feedback_fn,
+ self.cfg.GetGroupDiskParams(nodegroup))
iobj = objects.Instance(name=instance, os=self.op.os_type,
primary_node=pnode_name,
else:
feedback_fn("* creating instance disks...")
try:
- _CreateDisks(self, iobj)
+ CreateDisks(self, iobj)
except errors.OpExecError:
self.LogWarning("Device creation failed")
self.cfg.ReleaseDRBDMinors(instance)
if self.op.mode == constants.INSTANCE_IMPORT:
# Release unused nodes
- _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
+ ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
else:
# Release all nodes
- _ReleaseLocks(self, locking.LEVEL_NODE)
+ ReleaseLocks(self, locking.LEVEL_NODE)
disk_abort = False
if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
feedback_fn("* wiping instance disks...")
try:
- _WipeDisks(self, iobj)
+ WipeDisks(self, iobj)
except errors.OpExecError, err:
logging.exception("Wiping disks failed")
self.LogWarning("Wiping instance disks failed (%s)", err)
# Something is already wrong with the disks, don't do anything else
pass
elif self.op.wait_for_sync:
- disk_abort = not _WaitForSync(self, iobj)
+ disk_abort = not WaitForSync(self, iobj)
elif iobj.disk_template in constants.DTS_INT_MIRROR:
# make sure the disks are not degraded (still sync-ing is ok)
feedback_fn("* checking mirrors status")
- disk_abort = not _WaitForSync(self, iobj, oneshot=True)
+ disk_abort = not WaitForSync(self, iobj, oneshot=True)
else:
disk_abort = False
if disk_abort:
- _RemoveDisks(self, iobj)
+ RemoveDisks(self, iobj)
self.cfg.RemoveInstance(iobj.name)
# Make sure the instance lock gets removed
self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
" this instance")
# Release all node resource locks
- _ReleaseLocks(self, locking.LEVEL_NODE_RES)
+ ReleaseLocks(self, locking.LEVEL_NODE_RES)
if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
# we need to set the disks ID to the primary node, since the
This runs on master, primary and secondary nodes of the instance.
"""
- env = _BuildInstanceHookEnvByObject(self, self.instance)
+ env = BuildInstanceHookEnvByObject(self, self.instance)
env["INSTANCE_NEW_NAME"] = self.op.new_name
return env
This checks that the instance is in the cluster and is not running.
"""
- self.op.instance_name = _ExpandInstanceName(self.cfg,
- self.op.instance_name)
+ self.op.instance_name = ExpandInstanceName(self.cfg,
+ self.op.instance_name)
instance = self.cfg.GetInstanceInfo(self.op.instance_name)
assert instance is not None
- _CheckNodeOnline(self, instance.primary_node)
- _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
- msg="cannot rename")
+ CheckNodeOnline(self, instance.primary_node)
+ CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
+ msg="cannot rename")
self.instance = instance
new_name = self.op.new_name
(inst.primary_node, old_file_storage_dir,
new_file_storage_dir))
- _StartInstanceDisks(self, inst, None)
+ StartInstanceDisks(self, inst, None)
# update info on disks
- info = _GetInstanceInfoText(inst)
+ info = GetInstanceInfoText(inst)
for (idx, disk) in enumerate(inst.disks):
for node in inst.all_nodes:
self.cfg.SetDiskID(disk, node)
(inst.name, inst.primary_node, msg))
self.LogWarning(msg)
finally:
- _ShutdownInstanceDisks(self, inst)
+ ShutdownInstanceDisks(self, inst)
return inst.name
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
- _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+ CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
- env = _BuildInstanceHookEnvByObject(self, self.instance)
+ env = BuildInstanceHookEnvByObject(self, self.instance)
env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
return env
self.owned_locks(locking.LEVEL_NODE)), \
"Not owning correct locks"
- _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
+ RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
class LUInstanceMove(LogicalUnit):
def ExpandNames(self):
self._ExpandAndLockInstance()
- target_node = _ExpandNodeName(self.cfg, self.op.target_node)
+ target_node = ExpandNodeName(self.cfg, self.op.target_node)
self.op.target_node = target_node
self.needed_locks[locking.LEVEL_NODE] = [target_node]
self.needed_locks[locking.LEVEL_NODE_RES] = []
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
- _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+ CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
"TARGET_NODE": self.op.target_node,
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
}
- env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+ env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
def BuildHooksNodes(self):
raise errors.OpPrereqError("Instance disk %d has a complex layout,"
" cannot copy" % idx, errors.ECODE_STATE)
- _CheckNodeOnline(self, target_node)
- _CheckNodeNotDrained(self, target_node)
- _CheckNodeVmCapable(self, target_node)
+ CheckNodeOnline(self, target_node)
+ CheckNodeNotDrained(self, target_node)
+ CheckNodeVmCapable(self, target_node)
cluster = self.cfg.GetClusterInfo()
group_info = self.cfg.GetNodeGroup(node.group)
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
- _CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
- ignore=self.op.ignore_ipolicy)
+ CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
+ ignore=self.op.ignore_ipolicy)
if instance.admin_state == constants.ADMINST_UP:
# check memory requirements on the secondary node
- _CheckNodeFreeMemory(self, target_node,
- "failing over instance %s" %
- instance.name, bep[constants.BE_MAXMEM],
- instance.hypervisor)
+ CheckNodeFreeMemory(self, target_node,
+ "failing over instance %s" %
+ instance.name, bep[constants.BE_MAXMEM],
+ instance.hypervisor)
else:
self.LogInfo("Not checking memory on the secondary node as"
" instance will not be started")
# check bridge existance
- _CheckInstanceBridgesExist(self, instance, node=target_node)
+ CheckInstanceBridgesExist(self, instance, node=target_node)
def Exec(self, feedback_fn):
"""Move an instance.
# create the target disks
try:
- _CreateDisks(self, instance, target_node=target_node)
+ CreateDisks(self, instance, target_node=target_node)
except errors.OpExecError:
self.LogWarning("Device creation failed")
self.cfg.ReleaseDRBDMinors(instance.name)
if errs:
self.LogWarning("Some disks failed to copy, aborting")
try:
- _RemoveDisks(self, instance, target_node=target_node)
+ RemoveDisks(self, instance, target_node=target_node)
finally:
self.cfg.ReleaseDRBDMinors(instance.name)
raise errors.OpExecError("Errors during disk copy: %s" %
self.cfg.Update(instance, feedback_fn)
self.LogInfo("Removing the disks on the original node")
- _RemoveDisks(self, instance, target_node=source_node)
+ RemoveDisks(self, instance, target_node=source_node)
# Only start the instance if it's marked as up
if instance.admin_state == constants.ADMINST_UP:
self.LogInfo("Starting instance %s on node %s",
instance.name, target_node)
- disks_ok, _ = _AssembleInstanceDisks(self, instance,
- ignore_secondaries=True)
+ disks_ok, _ = AssembleInstanceDisks(self, instance,
+ ignore_secondaries=True)
if not disks_ok:
- _ShutdownInstanceDisks(self, instance)
+ ShutdownInstanceDisks(self, instance)
raise errors.OpExecError("Can't activate the instance's disks")
result = self.rpc.call_instance_start(target_node,
self.op.reason)
msg = result.fail_msg
if msg:
- _ShutdownInstanceDisks(self, instance)
+ ShutdownInstanceDisks(self, instance)
raise errors.OpExecError("Could not start instance %s on node %s: %s" %
(instance.name, target_node, msg))
"""Calculate the locks.
"""
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
self.needed_locks = {
# iallocator will select nodes and even if no iallocator is used,
# collisions with LUInstanceCreate should be avoided
else:
nodeslist = []
for inst in self.op.instances:
- inst.pnode = _ExpandNodeName(self.cfg, inst.pnode)
+ inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
nodeslist.append(inst.pnode)
if inst.snode is not None:
- inst.snode = _ExpandNodeName(self.cfg, inst.snode)
+ inst.snode = ExpandNodeName(self.cfg, inst.snode)
nodeslist.append(inst.snode)
self.needed_locks[locking.LEVEL_NODE] = nodeslist
else:
node_whitelist = None
- insts = [_CreateInstanceAllocRequest(op, _ComputeDisks(op, default_vg),
+ insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
_ComputeNics(op, cluster, None,
self.cfg, ec_id),
_ComputeFullBeParams(op, cluster),
self.filled = None
-def PrepareContainerMods(mods, private_fn):
+def _PrepareContainerMods(mods, private_fn):
"""Prepares a list of container modifications by adding a private data field.
@type mods: list of tuples; (operation, index, parameters)
(kind, identifier), errors.ECODE_NOENT)
-def ApplyContainerMods(kind, container, chgdesc, mods,
- create_fn, modify_fn, remove_fn):
+def _ApplyContainerMods(kind, container, chgdesc, mods,
+ create_fn, modify_fn, remove_fn):
"""Applies descriptions in C{mods} to C{container}.
@type kind: string
@type chgdesc: None or list
@param chgdesc: List of applied changes
@type mods: list
- @param mods: Modifications as returned by L{PrepareContainerMods}
+ @param mods: Modifications as returned by L{_PrepareContainerMods}
@type create_fn: callable
@param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
receives absolute item index, parameters and private data object as added
- by L{PrepareContainerMods}, returns tuple containing new item and changes
+ by L{_PrepareContainerMods}, returns tuple containing new item and changes
as list
@type modify_fn: callable
@param modify_fn: Callback for modifying an existing item
(L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
- and private data object as added by L{PrepareContainerMods}, returns
+ and private data object as added by L{_PrepareContainerMods}, returns
changes as list
@type remove_fn: callable
@param remove_fn: Callback on removing item; receives absolute item index,
- item and private data object as added by L{PrepareContainerMods}
+ item and private data object as added by L{_PrepareContainerMods}
"""
for (op, identifier, params, private) in mods:
raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
if self.op.hvparams:
- _CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
- "hypervisor", "instance", "cluster")
+ CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
+ "hypervisor", "instance", "cluster")
self.op.disks = self._UpgradeDiskNicMods(
"disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
self._VerifyNicModification)
if self.op.pnode:
- self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
+ self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
def ExpandNames(self):
self._ExpandAndLockInstance()
elif level == locking.LEVEL_NODE:
self._LockInstancesNodes()
if self.op.disk_template and self.op.remote_node:
- self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
+ self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
- _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+ CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
n = copy.deepcopy(nic)
nicparams = self.cluster.SimpleFillNIC(n.nicparams)
n.nicparams = nicparams
- nics.append(_NICToTuple(self, n))
+ nics.append(NICToTuple(self, n))
args["nics"] = nics
- env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
+ env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
if self.op.disk_template:
env["NEW_DISK_TEMPLATE"] = self.op.disk_template
if self.op.runtime_mem:
new_net_obj.name, errors.ECODE_INVAL)
new_params = dict(netparams)
else:
- new_params = _GetUpdatedParams(old_params, update_params_dict)
+ new_params = GetUpdatedParams(old_params, update_params_dict)
utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
" %s to %s" % (instance.disk_template,
self.op.disk_template),
errors.ECODE_INVAL)
- _CheckInstanceState(self, instance, INSTANCE_DOWN,
- msg="cannot change disk template")
+ CheckInstanceState(self, instance, INSTANCE_DOWN,
+ msg="cannot change disk template")
if self.op.disk_template in constants.DTS_INT_MIRROR:
if self.op.remote_node == pnode:
raise errors.OpPrereqError("Given new secondary node %s is the same"
" as the primary node of the instance" %
self.op.remote_node, errors.ECODE_STATE)
- _CheckNodeOnline(self, self.op.remote_node)
- _CheckNodeNotDrained(self, self.op.remote_node)
+ CheckNodeOnline(self, self.op.remote_node)
+ CheckNodeNotDrained(self, self.op.remote_node)
# FIXME: here we assume that the old instance type is DT_PLAIN
assert instance.disk_template == constants.DT_PLAIN
disks = [{constants.IDISK_SIZE: d.size,
constants.IDISK_VG: d.logical_id[0]}
for d in instance.disks]
- required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
- _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
+ required = ComputeDiskSizePerVG(self.op.disk_template, disks)
+ CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
snode_group = self.cfg.GetNodeGroup(snode_info.group)
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
snode_group)
- _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
- ignore=self.op.ignore_ipolicy)
+ CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
+ ignore=self.op.ignore_ipolicy)
if pnode_info.group != snode_info.group:
self.LogWarning("The primary and secondary nodes are in two"
" different node groups; the disk parameters"
if self.op.disk_template in constants.DTS_INT_MIRROR:
assert snode_info
nodes.append(snode_info)
- has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
+ has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
if compat.any(map(has_es, nodes)):
errmsg = ("Cannot convert disk template from %s to %s when exclusive"
" storage is enabled" % (instance.disk_template,
self._VerifyDiskModification)
# Prepare disk/NIC modifications
- self.diskmod = PrepareContainerMods(self.op.disks, None)
- self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
+ self.diskmod = _PrepareContainerMods(self.op.disks, None)
+ self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
# Check the validity of the `provider' parameter
if instance.disk_template in constants.DT_EXT:
# OS change
if self.op.os_name and not self.op.force:
- _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
- self.op.force_variant)
+ CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
+ self.op.force_variant)
instance_os = self.op.os_name
else:
instance_os = instance.os
# hvparams processing
if self.op.hvparams:
hv_type = instance.hypervisor
- i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
+ i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
# local check
hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
- _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
+ CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
self.hv_proposed = self.hv_new = hv_new # the new actual values
self.hv_inst = i_hvdict # the new dict (without defaults)
else:
# beparams processing
if self.op.beparams:
- i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
- use_none=True)
+ i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
+ use_none=True)
objects.UpgradeBeParams(i_bedict)
utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
be_new = cluster.SimpleFillBE(i_bedict)
# osparams processing
if self.op.osparams:
- i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
- _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
+ i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
+ CheckOSParams(self, True, nodelist, instance_os, i_osdict)
self.os_inst = i_osdict # the new dict (without defaults)
else:
self.os_inst = {}
delta = self.op.runtime_mem - current_memory
if delta > 0:
- _CheckNodeFreeMemory(self, instance.primary_node,
- "ballooning memory for instance %s" %
- instance.name, delta, instance.hypervisor)
+ CheckNodeFreeMemory(self, instance.primary_node,
+ "ballooning memory for instance %s" %
+ instance.name, delta, instance.hypervisor)
if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Disk operations not supported for"
# Verify NIC changes (operating on copy)
nics = instance.nics[:]
- ApplyContainerMods("NIC", nics, None, self.nicmod,
- _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
+ _ApplyContainerMods("NIC", nics, None, self.nicmod,
+ _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
if len(nics) > constants.MAX_NICS:
raise errors.OpPrereqError("Instance has too many network interfaces"
" (%d), cannot add more" % constants.MAX_NICS,
# Verify disk changes (operating on a copy)
disks = copy.deepcopy(instance.disks)
- ApplyContainerMods("disk", disks, None, self.diskmod, None, _PrepareDiskMod,
- None)
+ _ApplyContainerMods("disk", disks, None, self.diskmod, None,
+ _PrepareDiskMod, None)
utils.ValidateDeviceNames("disk", disks)
if len(disks) > constants.MAX_DISKS:
raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
if self.op.offline is not None and self.op.offline:
- _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
- msg="can't change to offline")
+ CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
+ msg="can't change to offline")
# Pre-compute NIC changes (necessary to use result in hooks)
self._nic_chgdesc = []
if self.nicmod:
# Operate on copies as this is still in prereq
nics = [nic.Copy() for nic in instance.nics]
- ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
- self._CreateNewNic, self._ApplyNicMods, None)
+ _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
+ self._CreateNewNic, self._ApplyNicMods, None)
# Verify that NIC names are unique and valid
utils.ValidateDeviceNames("NIC", nics)
self._new_nics = nics
constants.IDISK_VG: d.logical_id[0],
constants.IDISK_NAME: d.name}
for d in instance.disks]
- new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
- instance.name, pnode, [snode],
- disk_info, None, None, 0, feedback_fn,
- self.diskparams)
+ new_disks = GenerateDiskTemplate(self, self.op.disk_template,
+ instance.name, pnode, [snode],
+ disk_info, None, None, 0, feedback_fn,
+ self.diskparams)
anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
self.diskparams)
- p_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
- s_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, snode)
- info = _GetInstanceInfoText(instance)
+ p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
+ s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
+ info = GetInstanceInfoText(instance)
feedback_fn("Creating additional volumes...")
# first, create the missing data and meta devices
for disk in anno_disks:
# unfortunately this is... not too nice
- _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
- info, True, p_excl_stor)
+ CreateSingleBlockDev(self, pnode, instance, disk.children[1],
+ info, True, p_excl_stor)
for child in disk.children:
- _CreateSingleBlockDev(self, snode, instance, child, info, True,
- s_excl_stor)
+ CreateSingleBlockDev(self, snode, instance, child, info, True,
+ s_excl_stor)
# at this stage, all new LVs have been created, we can rename the
# old ones
feedback_fn("Renaming original volumes...")
for disk in anno_disks:
for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
f_create = node == pnode
- _CreateSingleBlockDev(self, node, instance, disk, info, f_create,
- excl_stor)
+ CreateSingleBlockDev(self, node, instance, disk, info, f_create,
+ excl_stor)
except errors.GenericError, e:
feedback_fn("Initializing of DRBD devices failed;"
" renaming back original volumes...")
self.cfg.Update(instance, feedback_fn)
# Release node locks while waiting for sync
- _ReleaseLocks(self, locking.LEVEL_NODE)
+ ReleaseLocks(self, locking.LEVEL_NODE)
# disks are created, waiting for sync
- disk_abort = not _WaitForSync(self, instance,
- oneshot=not self.op.wait_for_sync)
+ disk_abort = not WaitForSync(self, instance,
+ oneshot=not self.op.wait_for_sync)
if disk_abort:
raise errors.OpExecError("There are some degraded disks for"
" this instance, please cleanup manually")
snode = instance.secondary_nodes[0]
feedback_fn("Converting template to plain")
- old_disks = _AnnotateDiskParams(instance, instance.disks, self.cfg)
+ old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
new_disks = [d.children[0] for d in instance.disks]
# copy over size, mode and name
self.cfg.Update(instance, feedback_fn)
# Release locks in case removing disks takes a while
- _ReleaseLocks(self, locking.LEVEL_NODE)
+ ReleaseLocks(self, locking.LEVEL_NODE)
feedback_fn("Removing volumes on the secondary node...")
for disk in old_disks:
file_driver = file_path = None
disk = \
- _GenerateDiskTemplate(self, instance.disk_template, instance.name,
- instance.primary_node, instance.secondary_nodes,
- [params], file_path, file_driver, idx,
- self.Log, self.diskparams)[0]
+ GenerateDiskTemplate(self, instance.disk_template, instance.name,
+ instance.primary_node, instance.secondary_nodes,
+ [params], file_path, file_driver, idx,
+ self.Log, self.diskparams)[0]
- info = _GetInstanceInfoText(instance)
+ info = GetInstanceInfoText(instance)
logging.info("Creating volume %s for instance %s",
disk.iv_name, instance.name)
for node in instance.all_nodes:
f_create = (node == instance.primary_node)
try:
- _CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
+ CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
except errors.OpExecError, err:
self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
disk.iv_name, disk, node, err)
if self.cluster.prealloc_wipe_disks:
# Wipe new disk
- _WipeDisks(self, instance,
- disks=[(idx, disk, 0)])
+ WipeDisks(self, instance,
+ disks=[(idx, disk, 0)])
return (disk, [
("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
"""Removes a disk.
"""
- (anno_disk,) = _AnnotateDiskParams(self.instance, [root], self.cfg)
+ (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
self.cfg.SetDiskID(disk, node)
msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
result.append(("runtime_memory", self.op.runtime_mem))
# Apply disk changes
- ApplyContainerMods("disk", instance.disks, result, self.diskmod,
- self._CreateNewDisk, self._ModifyDisk, self._RemoveDisk)
+ _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
+ self._CreateNewDisk, self._ModifyDisk,
+ self._RemoveDisk)
_UpdateIvNames(0, instance.disks)
if self.op.disk_template:
("Not owning the correct locks, owning %r, expected at least %r" %
(owned, check_nodes))
- r_shut = _ShutdownInstanceDisks(self, instance)
+ r_shut = ShutdownInstanceDisks(self, instance)
if not r_shut:
raise errors.OpExecError("Cannot shutdown instance disks, unable to"
" proceed with disk template conversion")
# Release node and resource locks if there are any (they might already have
# been released during disk conversion)
- _ReleaseLocks(self, locking.LEVEL_NODE)
- _ReleaseLocks(self, locking.LEVEL_NODE_RES)
+ ReleaseLocks(self, locking.LEVEL_NODE)
+ ReleaseLocks(self, locking.LEVEL_NODE_RES)
# Apply NIC changes
if self._new_nics is not None:
REQ_BGL = False
def ExpandNames(self):
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
self.needed_locks = {
locking.LEVEL_NODEGROUP: [],
else:
self.req_target_uuids = None
- self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
+ self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
def DeclareLocks(self, level):
if level == locking.LEVEL_NODEGROUP:
("Instance %s's nodes changed while we kept the lock" %
self.op.instance_name)
- inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
- owned_groups)
+ inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
+ owned_groups)
if self.req_target_uuids:
# User requested specific target groups
"TARGET_GROUPS": " ".join(self.target_uuids),
}
- env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+ env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
(self.op.instance_name, self.op.iallocator,
ial.info), errors.ECODE_NORES)
- jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
+ jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
self.LogInfo("Iallocator returned %s job(s) for changing group of"
" instance '%s'", len(jobs), self.op.instance_name)
from ganeti.masterd import iallocator
from ganeti import utils
from ganeti.cmdlib.base import LogicalUnit, Tasklet
-from ganeti.cmdlib.common import _ExpandInstanceName, \
- _CheckIAllocatorOrNode, _ExpandNodeName
-from ganeti.cmdlib.instance_storage import _CheckDiskConsistency, \
- _ExpandCheckDisks, _ShutdownInstanceDisks, _AssembleInstanceDisks
-from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \
- _CheckTargetNodeIPolicy, _ReleaseLocks, _CheckNodeNotDrained, \
- _CopyLockList, _CheckNodeFreeMemory, _CheckInstanceBridgesExist
+from ganeti.cmdlib.common import ExpandInstanceName, \
+ CheckIAllocatorOrNode, ExpandNodeName
+from ganeti.cmdlib.instance_storage import CheckDiskConsistency, \
+ ExpandCheckDisks, ShutdownInstanceDisks, AssembleInstanceDisks
+from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
+ CheckTargetNodeIPolicy, ReleaseLocks, CheckNodeNotDrained, \
+ CopyLockList, CheckNodeFreeMemory, CheckInstanceBridgesExist
import ganeti.masterd.instance
"""
if lu.op.target_node is not None:
- lu.op.target_node = _ExpandNodeName(lu.cfg, lu.op.target_node)
+ lu.op.target_node = ExpandNodeName(lu.cfg, lu.op.target_node)
lu.needed_locks[locking.LEVEL_NODE] = []
lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
lu.needed_locks[locking.LEVEL_NODE_RES] = \
- _CopyLockList(lu.needed_locks[locking.LEVEL_NODE])
+ CopyLockList(lu.needed_locks[locking.LEVEL_NODE])
class LUInstanceFailover(LogicalUnit):
else:
env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
- env.update(_BuildInstanceHookEnvByObject(self, instance))
+ env.update(BuildInstanceHookEnvByObject(self, instance))
return env
instance = self._migrater.instance
source_node = instance.primary_node
target_node = self.op.target_node
- env = _BuildInstanceHookEnvByObject(self, instance)
+ env = BuildInstanceHookEnvByObject(self, instance)
env.update({
"MIGRATE_LIVE": self._migrater.live,
"MIGRATE_CLEANUP": self.op.cleanup,
This checks that the instance is in the cluster.
"""
- instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
+ instance_name = ExpandInstanceName(self.lu.cfg, self.instance_name)
instance = self.cfg.GetInstanceInfo(instance_name)
assert instance is not None
self.instance = instance
errors.ECODE_STATE)
if instance.disk_template in constants.DTS_EXT_MIRROR:
- _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
+ CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
if self.lu.op.iallocator:
assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
group_info = self.cfg.GetNodeGroup(nodeinfo.group)
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
group_info)
- _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
- ignore=self.ignore_ipolicy)
+ CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
+ ignore=self.ignore_ipolicy)
# self.target_node is already populated, either directly or by the
# iallocator run
if len(self.lu.tasklets) == 1:
# It is safe to release locks only when we're the only tasklet
# in the LU
- _ReleaseLocks(self.lu, locking.LEVEL_NODE,
- keep=[instance.primary_node, self.target_node])
- _ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
+ ReleaseLocks(self.lu, locking.LEVEL_NODE,
+ keep=[instance.primary_node, self.target_node])
+ ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
else:
assert not self.lu.glm.is_owned(locking.LEVEL_NODE_ALLOC)
group_info = self.cfg.GetNodeGroup(nodeinfo.group)
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
group_info)
- _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
- ignore=self.ignore_ipolicy)
+ CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
+ ignore=self.ignore_ipolicy)
i_be = cluster.FillBE(instance)
# check memory requirements on the secondary node
if (not self.cleanup and
(not self.failover or instance.admin_state == constants.ADMINST_UP)):
- self.tgt_free_mem = _CheckNodeFreeMemory(self.lu, target_node,
- "migrating instance %s" %
- instance.name,
- i_be[constants.BE_MINMEM],
- instance.hypervisor)
+ self.tgt_free_mem = CheckNodeFreeMemory(self.lu, target_node,
+ "migrating instance %s" %
+ instance.name,
+ i_be[constants.BE_MINMEM],
+ instance.hypervisor)
else:
self.lu.LogInfo("Not checking memory on the secondary node as"
" instance will not be started")
self.failover = True
# check bridge existance
- _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
+ CheckInstanceBridgesExist(self.lu, instance, node=target_node)
if not self.cleanup:
- _CheckNodeNotDrained(self.lu, target_node)
+ CheckNodeNotDrained(self.lu, target_node)
if not self.failover:
result = self.rpc.call_instance_migratable(instance.primary_node,
instance)
self.feedback_fn("* checking disk consistency between source and target")
for (idx, dev) in enumerate(instance.disks):
- if not _CheckDiskConsistency(self.lu, instance, dev, target_node, False):
+ if not CheckDiskConsistency(self.lu, instance, dev, target_node, False):
raise errors.OpExecError("Disk %s is degraded or not fully"
" synchronized on target node,"
" aborting migration" % idx)
# If the instance's disk template is `rbd' or `ext' and there was a
# successful migration, unmap the device from the source node.
if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT):
- disks = _ExpandCheckDisks(instance, instance.disks)
+ disks = ExpandCheckDisks(instance, instance.disks)
self.feedback_fn("* unmapping instance's disks from %s" % source_node)
for disk in disks:
result = self.rpc.call_blockdev_shutdown(source_node, (disk, instance))
self.feedback_fn("* checking disk consistency between source and target")
for (idx, dev) in enumerate(instance.disks):
# for drbd, these are drbd over lvm
- if not _CheckDiskConsistency(self.lu, instance, dev, target_node,
- False):
+ if not CheckDiskConsistency(self.lu, instance, dev, target_node,
+ False):
if primary_node.offline:
self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
" target node %s" %
(instance.name, source_node, msg))
self.feedback_fn("* deactivating the instance's disks on source node")
- if not _ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
+ if not ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
raise errors.OpExecError("Can't shut down the instance's disks")
instance.primary_node = target_node
logging.info("Starting instance %s on node %s",
instance.name, target_node)
- disks_ok, _ = _AssembleInstanceDisks(self.lu, instance,
- ignore_secondaries=True)
+ disks_ok, _ = AssembleInstanceDisks(self.lu, instance,
+ ignore_secondaries=True)
if not disks_ok:
- _ShutdownInstanceDisks(self.lu, instance)
+ ShutdownInstanceDisks(self.lu, instance)
raise errors.OpExecError("Can't activate the instance's disks")
self.feedback_fn("* starting the instance on the target node %s" %
False, self.lu.op.reason)
msg = result.fail_msg
if msg:
- _ShutdownInstanceDisks(self.lu, instance)
+ ShutdownInstanceDisks(self.lu, instance)
raise errors.OpExecError("Could not start instance %s on node %s: %s" %
(instance.name, target_node, msg))
from ganeti import utils
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU
from ganeti.cmdlib.common import INSTANCE_ONLINE, INSTANCE_DOWN, \
- _CheckHVParams, _CheckInstanceState, _CheckNodeOnline, _ExpandNodeName, \
- _GetUpdatedParams, _CheckOSParams, _ShareAll
-from ganeti.cmdlib.instance_storage import _StartInstanceDisks, \
- _ShutdownInstanceDisks
-from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \
- _CheckInstanceBridgesExist, _CheckNodeFreeMemory, _CheckNodeHasOS
+ CheckHVParams, CheckInstanceState, CheckNodeOnline, ExpandNodeName, \
+ GetUpdatedParams, CheckOSParams, ShareAll
+from ganeti.cmdlib.instance_storage import StartInstanceDisks, \
+ ShutdownInstanceDisks
+from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
+ CheckInstanceBridgesExist, CheckNodeFreeMemory, CheckNodeHasOS
class LUInstanceStartup(LogicalUnit):
"FORCE": self.op.force,
}
- env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+ env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
filled_hvp.update(self.op.hvparams)
hv_type = hypervisor.GetHypervisorClass(instance.hypervisor)
hv_type.CheckParameterSyntax(filled_hvp)
- _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
+ CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
- _CheckInstanceState(self, instance, INSTANCE_ONLINE)
+ CheckInstanceState(self, instance, INSTANCE_ONLINE)
self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
if self.op.hvparams or self.op.beparams:
self.LogWarning("Overridden parameters are ignored")
else:
- _CheckNodeOnline(self, instance.primary_node)
+ CheckNodeOnline(self, instance.primary_node)
bep = self.cfg.GetClusterInfo().FillBE(instance)
bep.update(self.op.beparams)
# check bridges existence
- _CheckInstanceBridgesExist(self, instance)
+ CheckInstanceBridgesExist(self, instance)
remote_info = self.rpc.call_instance_info(instance.primary_node,
instance.name,
remote_info.Raise("Error checking node %s" % instance.primary_node,
prereq=True, ecode=errors.ECODE_ENVIRON)
if not remote_info.payload: # not running already
- _CheckNodeFreeMemory(self, instance.primary_node,
- "starting instance %s" % instance.name,
- bep[constants.BE_MINMEM], instance.hypervisor)
+ CheckNodeFreeMemory(self, instance.primary_node,
+ "starting instance %s" % instance.name,
+ bep[constants.BE_MINMEM], instance.hypervisor)
def Exec(self, feedback_fn):
"""Start the instance.
else:
node_current = instance.primary_node
- _StartInstanceDisks(self, instance, force)
+ StartInstanceDisks(self, instance, force)
result = \
self.rpc.call_instance_start(node_current,
self.op.startup_paused, reason)
msg = result.fail_msg
if msg:
- _ShutdownInstanceDisks(self, instance)
+ ShutdownInstanceDisks(self, instance)
raise errors.OpExecError("Could not start instance: %s" % msg)
This runs on master, primary and secondary nodes of the instance.
"""
- env = _BuildInstanceHookEnvByObject(self, self.instance)
+ env = BuildInstanceHookEnvByObject(self, self.instance)
env["TIMEOUT"] = self.op.timeout
return env
"Cannot retrieve locked instance %s" % self.op.instance_name
if not self.op.force:
- _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
+ CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
else:
self.LogWarning("Ignoring offline instance check")
if self.primary_offline and self.op.ignore_offline_nodes:
self.LogWarning("Ignoring offline primary node")
else:
- _CheckNodeOnline(self, self.instance.primary_node)
+ CheckNodeOnline(self, self.instance.primary_node)
def Exec(self, feedback_fn):
"""Shutdown the instance.
if msg:
self.LogWarning("Could not shutdown instance: %s", msg)
- _ShutdownInstanceDisks(self, instance)
+ ShutdownInstanceDisks(self, instance)
class LUInstanceReinstall(LogicalUnit):
This runs on master, primary and secondary nodes of the instance.
"""
- return _BuildInstanceHookEnvByObject(self, self.instance)
+ return BuildInstanceHookEnvByObject(self, self.instance)
def BuildHooksNodes(self):
"""Build hooks nodes.
instance = self.cfg.GetInstanceInfo(self.op.instance_name)
assert instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
- _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
- " offline, cannot reinstall")
+ CheckNodeOnline(self, instance.primary_node, "Instance primary node"
+ " offline, cannot reinstall")
if instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Instance '%s' has no disks" %
self.op.instance_name,
errors.ECODE_INVAL)
- _CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
+ CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
if self.op.os_type is not None:
# OS verification
- pnode = _ExpandNodeName(self.cfg, instance.primary_node)
- _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
+ pnode = ExpandNodeName(self.cfg, instance.primary_node)
+ CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
instance_os = self.op.os_type
else:
instance_os = instance.os
nodelist = list(instance.all_nodes)
if self.op.osparams:
- i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
- _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
+ i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
+ CheckOSParams(self, True, nodelist, instance_os, i_osdict)
self.os_inst = i_osdict # the new dict (without defaults)
else:
self.os_inst = None
# Write to configuration
self.cfg.Update(inst, feedback_fn)
- _StartInstanceDisks(self, inst, None)
+ StartInstanceDisks(self, inst, None)
try:
feedback_fn("Running the instance OS create scripts...")
# FIXME: pass debug option from opcode to backend
result.Raise("Could not install OS for instance %s on node %s" %
(inst.name, inst.primary_node))
finally:
- _ShutdownInstanceDisks(self, inst)
+ ShutdownInstanceDisks(self, inst)
class LUInstanceReboot(LogicalUnit):
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
}
- env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+ env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
- _CheckInstanceState(self, instance, INSTANCE_ONLINE)
- _CheckNodeOnline(self, instance.primary_node)
+ CheckInstanceState(self, instance, INSTANCE_ONLINE)
+ CheckNodeOnline(self, instance.primary_node)
# check bridges existence
- _CheckInstanceBridgesExist(self, instance)
+ CheckInstanceBridgesExist(self, instance)
def Exec(self, feedback_fn):
"""Reboot the instance.
self.op.shutdown_timeout,
reason)
result.Raise("Could not shutdown instance for full reboot")
- _ShutdownInstanceDisks(self, instance)
+ ShutdownInstanceDisks(self, instance)
else:
self.LogInfo("Instance %s was already stopped, starting now",
instance.name)
- _StartInstanceDisks(self, instance, ignore_secondaries)
+ StartInstanceDisks(self, instance, ignore_secondaries)
result = self.rpc.call_instance_start(node_current,
(instance, None, None), False,
reason)
msg = result.fail_msg
if msg:
- _ShutdownInstanceDisks(self, instance)
+ ShutdownInstanceDisks(self, instance)
raise errors.OpExecError("Could not start instance for"
" full reboot: %s" % msg)
self.cfg.MarkInstanceUp(instance.name)
-def _GetInstanceConsole(cluster, instance):
+def GetInstanceConsole(cluster, instance):
"""Returns console information for an instance.
@type cluster: L{objects.Cluster}
REQ_BGL = False
def ExpandNames(self):
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
self._ExpandAndLockInstance()
def CheckPrereq(self):
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
- _CheckNodeOnline(self, self.instance.primary_node)
+ CheckNodeOnline(self, self.instance.primary_node)
def Exec(self, feedback_fn):
"""Connect to the console of an instance
logging.debug("Connecting to console of %s on %s", instance.name, node)
- return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
+ return GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
from ganeti import locking
from ganeti import qlang
from ganeti import query
-from ganeti.cmdlib.base import _QueryBase, NoHooksLU
-from ganeti.cmdlib.common import _ShareAll, _GetWantedInstances, \
- _CheckInstanceNodeGroups, _CheckInstancesNodeGroups, _AnnotateDiskParams
-from ganeti.cmdlib.instance_operation import _GetInstanceConsole
-from ganeti.cmdlib.instance_utils import _NICListToTuple
+from ganeti.cmdlib.base import QueryBase, NoHooksLU
+from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
+ CheckInstanceNodeGroups, CheckInstancesNodeGroups, AnnotateDiskParams
+from ganeti.cmdlib.instance_operation import GetInstanceConsole
+from ganeti.cmdlib.instance_utils import NICListToTuple
import ganeti.masterd.instance
-class _InstanceQuery(_QueryBase):
+class InstanceQuery(QueryBase):
FIELDS = query.INSTANCE_FIELDS
def ExpandNames(self, lu):
lu.needed_locks = {}
- lu.share_locks = _ShareAll()
+ lu.share_locks = ShareAll()
if self.names:
- self.wanted = _GetWantedInstances(lu, self.names)
+ self.wanted = GetWantedInstances(lu, self.names)
else:
self.wanted = locking.ALL_SET
# Check if node groups for locked instances are still correct
for instance_name in owned_instances:
- _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
+ CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
def _GetQueryData(self, lu):
"""Computes the list of instances and their attributes.
for inst in instance_list:
if inst.name in live_data:
# Instance is running
- consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
+ consinfo[inst.name] = GetInstanceConsole(cluster, inst)
else:
consinfo[inst.name] = None
assert set(consinfo.keys()) == set(instance_names)
REQ_BGL = False
def CheckArguments(self):
- self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
+ self.iq = InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
self.op.output_fields, self.op.use_locking)
def ExpandNames(self):
if self.op.instances or not self.op.use_locking:
# Expand instance names right here
- self.wanted_names = _GetWantedInstances(self, self.op.instances)
+ self.wanted_names = GetWantedInstances(self, self.op.instances)
else:
# Will use acquired locks
self.wanted_names = None
if self.op.use_locking:
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
if self.wanted_names is None:
self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
if self.op.use_locking:
- _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
- None)
+ CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
+ None)
else:
assert not (owned_instances or owned_groups or
owned_nodes or owned_networks)
"""Compute block device status.
"""
- (anno_dev,) = _AnnotateDiskParams(instance, [dev], self.cfg)
+ (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
return self._ComputeDiskStatusInner(instance, snode, anno_dev)
"snodes_group_names": map(group2name_fn, snodes_group_uuids),
"os": instance.os,
# this happens to be the same format used for hooks
- "nics": _NICListToTuple(self, instance.nics),
+ "nics": NICListToTuple(self, instance.nics),
"disk_template": instance.disk_template,
"disks": disks,
"hypervisor": instance.hypervisor,
from ganeti import rpc
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, Tasklet
from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_NOT_RUNNING, \
- _AnnotateDiskParams, _CheckIAllocatorOrNode, _ExpandNodeName, \
- _CheckNodeOnline, _CheckInstanceNodeGroups, _CheckInstanceState, \
- _IsExclusiveStorageEnabledNode, _FindFaultyInstanceDisks
-from ganeti.cmdlib.instance_utils import _GetInstanceInfoText, \
- _CopyLockList, _ReleaseLocks, _CheckNodeVmCapable, \
- _BuildInstanceHookEnvByObject, _CheckNodeNotDrained, _CheckTargetNodeIPolicy
+ AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeName, \
+ CheckNodeOnline, CheckInstanceNodeGroups, CheckInstanceState, \
+ IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks
+from ganeti.cmdlib.instance_utils import GetInstanceInfoText, \
+ CopyLockList, ReleaseLocks, CheckNodeVmCapable, \
+ BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy
import ganeti.masterd.instance
}
-def _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
- excl_stor):
+def CreateSingleBlockDev(lu, node, instance, device, info, force_open,
+ excl_stor):
"""Create a single block device on a given node.
This will not recurse over children of the device, so they must be
if not force_create:
return created_devices
- _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
- excl_stor)
+ CreateSingleBlockDev(lu, node, instance, device, info, force_open,
+ excl_stor)
# The device has been completely created, so there is no point in keeping
# its subdevices in the list. We just add the device itself instead.
created_devices = [(node, device)]
raise errors.DeviceCreationError(str(e), created_devices)
-def _IsExclusiveStorageEnabledNodeName(cfg, nodename):
+def IsExclusiveStorageEnabledNodeName(cfg, nodename):
"""Whether exclusive_storage is in effect for the given node.
@type cfg: L{config.ConfigWriter}
if ni is None:
raise errors.OpPrereqError("Invalid node name %s" % nodename,
errors.ECODE_NOENT)
- return _IsExclusiveStorageEnabledNode(cfg, ni)
+ return IsExclusiveStorageEnabledNode(cfg, ni)
-def _CreateBlockDev(lu, node, instance, device, force_create, info,
+def CreateBlockDev(lu, node, instance, device, force_create, info,
force_open):
"""Wrapper around L{_CreateBlockDevInner}.
This method annotates the root device first.
"""
- (disk,) = _AnnotateDiskParams(instance, [device], lu.cfg)
- excl_stor = _IsExclusiveStorageEnabledNodeName(lu.cfg, node)
+ (disk,) = AnnotateDiskParams(instance, [device], lu.cfg)
+ excl_stor = IsExclusiveStorageEnabledNodeName(lu.cfg, node)
return _CreateBlockDevInner(lu, node, instance, disk, force_create, info,
force_open, excl_stor)
-def _CreateDisks(lu, instance, to_skip=None, target_node=None):
+def CreateDisks(lu, instance, to_skip=None, target_node=None):
"""Create all disks for an instance.
This abstracts away some work from AddInstance.
@return: the success of the creation
"""
- info = _GetInstanceInfoText(instance)
+ info = GetInstanceInfoText(instance)
if target_node is None:
pnode = instance.primary_node
all_nodes = instance.all_nodes
for node in all_nodes:
f_create = node == pnode
try:
- _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
+ CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
disks_created.append((node, device))
except errors.OpExecError:
logging.warning("Creating disk %s for instance '%s' failed",
raise errors.OpExecError(e.message)
-def _ComputeDiskSizePerVG(disk_template, disks):
+def ComputeDiskSizePerVG(disk_template, disks):
"""Compute disk size requirements in the volume group
"""
return req_size_dict[disk_template]
-def _ComputeDisks(op, default_vg):
+def ComputeDisks(op, default_vg):
"""Computes the instance disks.
@param op: The instance opcode
return disks
-def _CheckRADOSFreeSpace():
+def CheckRADOSFreeSpace():
"""Compute disk size requirements inside the RADOS cluster.
"""
return drbd_dev
-def _GenerateDiskTemplate(
+def GenerateDiskTemplate(
lu, template_name, instance_name, primary_node, secondary_nodes,
disk_info, file_storage_dir, file_driver, base_index,
feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage,
# We don't want _CheckIAllocatorOrNode selecting the default iallocator
# when neither iallocator nor nodes are specified
if self.op.iallocator or self.op.nodes:
- _CheckIAllocatorOrNode(self, "iallocator", "nodes")
+ CheckIAllocatorOrNode(self, "iallocator", "nodes")
for (idx, params) in self.op.disks:
utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
if self.op.nodes:
- self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
+ self.op.nodes = [ExpandNodeName(self.cfg, n) for n in self.op.nodes]
self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
else:
self.needed_locks[locking.LEVEL_NODE] = []
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
- _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+ CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
- return _BuildInstanceHookEnvByObject(self, self.instance)
+ return BuildInstanceHookEnvByObject(self, self.instance)
def BuildHooksNodes(self):
"""Build hooks nodes.
else:
primary_node = instance.primary_node
if not self.op.iallocator:
- _CheckNodeOnline(self, primary_node)
+ CheckNodeOnline(self, primary_node)
if instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Instance '%s' has no disks" %
if owned_groups:
# Node group locks are acquired only for the primary node (and only
# when the allocator is used)
- _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups,
- primary_only=True)
+ CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups,
+ primary_only=True)
# if we replace nodes *and* the old primary is offline, we don't
# check the instance state
old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
if not ((self.op.iallocator or self.op.nodes) and old_pnode.offline):
- _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
- msg="cannot recreate disks")
+ CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
+ msg="cannot recreate disks")
if self.op.disks:
self.disks = dict(self.op.disks)
if self.op.iallocator:
self._RunAllocator()
# Release unneeded node and node resource locks
- _ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.nodes)
- _ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.nodes)
- _ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
+ ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.nodes)
+ ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.nodes)
+ ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
# All touched nodes must be locked
mylocks = self.owned_locks(locking.LEVEL_NODE)
assert mylocks.issuperset(frozenset(instance.all_nodes))
- _CreateDisks(self, instance, to_skip=to_skip)
+ CreateDisks(self, instance, to_skip=to_skip)
def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
errors.ECODE_NORES)
-def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
+def CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
"""Checks if nodes have enough free disk space in all the VGs.
This function checks if all given nodes have the needed amount of
return (total_size - written) * avg_time
-def _WipeDisks(lu, instance, disks=None):
+def WipeDisks(lu, instance, disks=None):
"""Wipes instance disks.
@type lu: L{LogicalUnit}
" failed", idx, instance.name)
-def _ExpandCheckDisks(instance, disks):
+def ExpandCheckDisks(instance, disks):
"""Return the instance disks selected by the disks list
@type disks: list of L{objects.Disk} or None
return disks
-def _WaitForSync(lu, instance, disks=None, oneshot=False):
+def WaitForSync(lu, instance, disks=None, oneshot=False):
"""Sleep and poll for an instance's disk to sync.
"""
if not instance.disks or disks is not None and not disks:
return True
- disks = _ExpandCheckDisks(instance, disks)
+ disks = ExpandCheckDisks(instance, disks)
if not oneshot:
lu.LogInfo("Waiting for instance %s to sync disks", instance.name)
return not cumul_degraded
-def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
+def ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
"""Shutdown block devices of an instance.
This does the shutdown on all nodes of the instance.
"""
all_result = True
- disks = _ExpandCheckDisks(instance, disks)
+ disks = ExpandCheckDisks(instance, disks)
for disk in disks:
for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
_ShutdownInstanceDisks.
"""
- _CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
- _ShutdownInstanceDisks(lu, instance, disks=disks)
+ CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
+ ShutdownInstanceDisks(lu, instance, disks=disks)
-def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
+def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
ignore_size=False):
"""Prepare the block devices for an instance.
device_info = []
disks_ok = True
iname = instance.name
- disks = _ExpandCheckDisks(instance, disks)
+ disks = ExpandCheckDisks(instance, disks)
# With the two passes mechanism we try to reduce the window of
# opportunity for the race condition of switching DRBD to primary
return disks_ok, device_info
-def _StartInstanceDisks(lu, instance, force):
+def StartInstanceDisks(lu, instance, force):
"""Start the disks of an instance.
"""
- disks_ok, _ = _AssembleInstanceDisks(lu, instance,
- ignore_secondaries=force)
+ disks_ok, _ = AssembleInstanceDisks(lu, instance,
+ ignore_secondaries=force)
if not disks_ok:
- _ShutdownInstanceDisks(lu, instance)
+ ShutdownInstanceDisks(lu, instance)
if force is not None and not force:
lu.LogWarning("",
hint=("If the message above refers to a secondary node,"
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
- _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+ CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
"AMOUNT": self.op.amount,
"ABSOLUTE": self.op.absolute,
}
- env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+ env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
def BuildHooksNodes(self):
"Cannot retrieve locked instance %s" % self.op.instance_name
nodenames = list(instance.all_nodes)
for node in nodenames:
- _CheckNodeOnline(self, node)
+ CheckNodeOnline(self, node)
self.instance = instance
# TODO: check the free disk space for file, when that feature will be
# supported
nodes = map(self.cfg.GetNodeInfo, nodenames)
- es_nodes = filter(lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n),
+ es_nodes = filter(lambda n: IsExclusiveStorageEnabledNode(self.cfg, n),
nodes)
if es_nodes:
# With exclusive storage we need to something smarter than just looking
# at free space; for now, let's simply abort the operation.
raise errors.OpPrereqError("Cannot grow disks when exclusive_storage"
" is enabled", errors.ECODE_STATE)
- _CheckNodesFreeDiskPerVG(self, nodenames, req_vgspace)
+ CheckNodesFreeDiskPerVG(self, nodenames, req_vgspace)
def Exec(self, feedback_fn):
"""Execute disk grow.
wipe_disks = self.cfg.GetClusterInfo().prealloc_wipe_disks
- disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
+ disks_ok, _ = AssembleInstanceDisks(self, self.instance, disks=[disk])
if not disks_ok:
raise errors.OpExecError("Cannot activate block device to grow")
self.cfg.Update(instance, feedback_fn)
# Changes have been recorded, release node lock
- _ReleaseLocks(self, locking.LEVEL_NODE)
+ ReleaseLocks(self, locking.LEVEL_NODE)
# Downgrade lock while waiting for sync
self.glm.downgrade(locking.LEVEL_INSTANCE)
assert instance.disks[self.op.disk] == disk
# Wipe newly added disk space
- _WipeDisks(self, instance,
- disks=[(self.op.disk, disk, old_disk_size)])
+ WipeDisks(self, instance,
+ disks=[(self.op.disk, disk, old_disk_size)])
if self.op.wait_for_sync:
- disk_abort = not _WaitForSync(self, instance, disks=[disk])
+ disk_abort = not WaitForSync(self, instance, disks=[disk])
if disk_abort:
self.LogWarning("Disk syncing has not returned a good status; check"
" the instance")
" iallocator script must be used or the"
" new node given", errors.ECODE_INVAL)
else:
- _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
+ CheckIAllocatorOrNode(self, "iallocator", "remote_node")
elif remote_node is not None or ialloc is not None:
# Not replacing the secondary
"Conflicting options"
if self.op.remote_node is not None:
- self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
+ self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
# Warning: do not remove the locking of the new secondary here
# unless DRBD8.AddChildren is changed to work in parallel;
"NEW_SECONDARY": self.op.remote_node,
"OLD_SECONDARY": instance.secondary_nodes[0],
}
- env.update(_BuildInstanceHookEnvByObject(self, instance))
+ env.update(BuildInstanceHookEnvByObject(self, instance))
return env
def BuildHooksNodes(self):
# Verify if node group locks are still correct
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
if owned_groups:
- _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
+ CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
return LogicalUnit.CheckPrereq(self)
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
- _CheckNodeOnline(self, self.instance.primary_node)
+ CheckNodeOnline(self, self.instance.primary_node)
def Exec(self, feedback_fn):
"""Activate the disks.
"""
disks_ok, disks_info = \
- _AssembleInstanceDisks(self, self.instance,
- ignore_size=self.op.ignore_size)
+ AssembleInstanceDisks(self, self.instance,
+ ignore_size=self.op.ignore_size)
if not disks_ok:
raise errors.OpExecError("Cannot activate block devices")
if self.op.wait_for_sync:
- if not _WaitForSync(self, self.instance):
+ if not WaitForSync(self, self.instance):
raise errors.OpExecError("Some disks of the instance are degraded!")
return disks_info
"""
instance = self.instance
if self.op.force:
- _ShutdownInstanceDisks(self, instance)
+ ShutdownInstanceDisks(self, instance)
else:
_SafeShutdownInstanceDisks(self, instance)
return result
-def _CheckDiskConsistency(lu, instance, dev, node, on_primary, ldisk=False):
+def CheckDiskConsistency(lu, instance, dev, node, on_primary, ldisk=False):
"""Wrapper around L{_CheckDiskConsistencyInner}.
"""
- (disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
+ (disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
return _CheckDiskConsistencyInner(lu, instance, disk, node, on_primary,
ldisk=ldisk)
@returns The result of the rpc call
"""
- (disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
+ (disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
return lu.rpc.call_blockdev_find(node, disk)
return remote_node_name
def _FindFaultyDisks(self, node_name):
- """Wrapper for L{_FindFaultyInstanceDisks}.
+ """Wrapper for L{FindFaultyInstanceDisks}.
"""
- return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
- node_name, True)
+ return FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
+ node_name, True)
def _CheckDisksActivated(self, instance):
"""Checks if the instance disks are activated.
self.target_node = secondary_node
check_nodes = [self.new_node, self.other_node]
- _CheckNodeNotDrained(self.lu, remote_node)
- _CheckNodeVmCapable(self.lu, remote_node)
+ CheckNodeNotDrained(self.lu, remote_node)
+ CheckNodeVmCapable(self.lu, remote_node)
old_node_info = self.cfg.GetNodeInfo(secondary_node)
assert old_node_info is not None
cluster = self.cfg.GetClusterInfo()
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
new_group_info)
- _CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
- self.cfg, ignore=self.ignore_ipolicy)
+ CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
+ self.cfg, ignore=self.ignore_ipolicy)
for node in check_nodes:
- _CheckNodeOnline(self.lu, node)
+ CheckNodeOnline(self.lu, node)
touched_nodes = frozenset(node_name for node_name in [self.new_node,
self.other_node,
if node_name is not None)
# Release unneeded node and node resource locks
- _ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
- _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
- _ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
+ ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
+ ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
+ ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
# Release any owned node group
- _ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
+ ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
# Check whether disks are valid
for disk_idx in self.disks:
# Activate the instance disks if we're replacing them on a down instance
if activate_disks:
- _StartInstanceDisks(self.lu, self.instance, True)
+ StartInstanceDisks(self.lu, self.instance, True)
try:
# Should we replace the secondary node?
self.lu.LogInfo("Checking disk/%d consistency on node %s" %
(idx, node_name))
- if not _CheckDiskConsistency(self.lu, self.instance, dev, node_name,
- on_primary, ldisk=ldisk):
+ if not CheckDiskConsistency(self.lu, self.instance, dev, node_name,
+ on_primary, ldisk=ldisk):
raise errors.OpExecError("Node %s has degraded storage, unsafe to"
" replace disks for instance %s" %
(node_name, self.instance.name))
"""
iv_names = {}
- disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
+ disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
for idx, dev in enumerate(disks):
if idx not in self.disks:
continue
new_lvs = [lv_data, lv_meta]
old_lvs = [child.Copy() for child in dev.children]
iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
- excl_stor = _IsExclusiveStorageEnabledNodeName(self.lu.cfg, node_name)
+ excl_stor = IsExclusiveStorageEnabledNodeName(self.lu.cfg, node_name)
# we pass force_create=True to force the LVM creation
for new_lv in new_lvs:
_CreateBlockDevInner(self.lu, node_name, self.instance, new_lv, True,
- _GetInstanceInfoText(self.instance), False,
+ GetInstanceInfoText(self.instance), False,
excl_stor)
return iv_names
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node, iv_names)
# TODO: Check if releasing locks early still makes sense
- _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
+ ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
else:
# Release all resource locks except those used by the instance
- _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
- keep=self.node_secondary_ip.keys())
+ ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
+ keep=self.node_secondary_ip.keys())
# Release all node locks while waiting for sync
- _ReleaseLocks(self.lu, locking.LEVEL_NODE)
+ ReleaseLocks(self.lu, locking.LEVEL_NODE)
# TODO: Can the instance lock be downgraded here? Take the optional disk
# shutdown in the caller into consideration.
# This can fail as the old devices are degraded and _WaitForSync
# does a combined result over all disks, so we don't check its return value
self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
- _WaitForSync(self.lu, self.instance)
+ WaitForSync(self.lu, self.instance)
# Check all devices manually
self._CheckDevices(self.instance.primary_node, iv_names)
# Step: create new storage
self.lu.LogStep(3, steps_total, "Allocate new storage")
- disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
- excl_stor = _IsExclusiveStorageEnabledNodeName(self.lu.cfg, self.new_node)
+ disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
+ excl_stor = IsExclusiveStorageEnabledNodeName(self.lu.cfg, self.new_node)
for idx, dev in enumerate(disks):
self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
(self.new_node, idx))
# we pass force_create=True to force LVM creation
for new_lv in dev.children:
_CreateBlockDevInner(self.lu, self.new_node, self.instance, new_lv,
- True, _GetInstanceInfoText(self.instance), False,
+ True, GetInstanceInfoText(self.instance), False,
excl_stor)
# Step 4: dbrd minors and drbd setups changes
children=dev.children,
size=dev.size,
params={})
- (anno_new_drbd,) = _AnnotateDiskParams(self.instance, [new_drbd],
- self.cfg)
+ (anno_new_drbd,) = AnnotateDiskParams(self.instance, [new_drbd],
+ self.cfg)
try:
- _CreateSingleBlockDev(self.lu, self.new_node, self.instance,
- anno_new_drbd,
- _GetInstanceInfoText(self.instance), False,
- excl_stor)
+ CreateSingleBlockDev(self.lu, self.new_node, self.instance,
+ anno_new_drbd,
+ GetInstanceInfoText(self.instance), False,
+ excl_stor)
except errors.GenericError:
self.cfg.ReleaseDRBDMinors(self.instance.name)
raise
self.cfg.Update(self.instance, feedback_fn)
# Release all node locks (the configuration has been updated)
- _ReleaseLocks(self.lu, locking.LEVEL_NODE)
+ ReleaseLocks(self.lu, locking.LEVEL_NODE)
# and now perform the drbd attach
self.lu.LogInfo("Attaching primary drbds to new secondary"
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node, iv_names)
# TODO: Check if releasing locks early still makes sense
- _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
+ ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
else:
# Release all resource locks except those used by the instance
- _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
- keep=self.node_secondary_ip.keys())
+ ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
+ keep=self.node_secondary_ip.keys())
# TODO: Can the instance lock be downgraded here? Take the optional disk
# shutdown in the caller into consideration.
# This can fail as the old devices are degraded and _WaitForSync
# does a combined result over all disks, so we don't check its return value
self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
- _WaitForSync(self.lu, self.instance)
+ WaitForSync(self.lu, self.instance)
# Check all devices manually
self._CheckDevices(self.instance.primary_node, iv_names)
from ganeti import objects
from ganeti import pathutils
from ganeti import utils
-from ganeti.cmdlib.common import _AnnotateDiskParams, \
- _ComputeIPolicyInstanceViolation
+from ganeti.cmdlib.common import AnnotateDiskParams, \
+ ComputeIPolicyInstanceViolation
-def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
- minmem, maxmem, vcpus, nics, disk_template, disks,
- bep, hvp, hypervisor_name, tags):
+def BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
+ minmem, maxmem, vcpus, nics, disk_template, disks,
+ bep, hvp, hypervisor_name, tags):
"""Builds instance related env variables for hooks
This builds the hook environment from individual variables.
return env
-def _BuildInstanceHookEnvByObject(lu, instance, override=None):
+def BuildInstanceHookEnvByObject(lu, instance, override=None):
"""Builds instance related env variables for hooks from an object.
@type lu: L{LogicalUnit}
"maxmem": bep[constants.BE_MAXMEM],
"minmem": bep[constants.BE_MINMEM],
"vcpus": bep[constants.BE_VCPUS],
- "nics": _NICListToTuple(lu, instance.nics),
+ "nics": NICListToTuple(lu, instance.nics),
"disk_template": instance.disk_template,
"disks": [(disk.name, disk.size, disk.mode)
for disk in instance.disks],
}
if override:
args.update(override)
- return _BuildInstanceHookEnv(**args) # pylint: disable=W0142
+ return BuildInstanceHookEnv(**args) # pylint: disable=W0142
-def _GetClusterDomainSecret():
+def GetClusterDomainSecret():
"""Reads the cluster domain secret.
"""
strict=True)
-def _CheckNodeNotDrained(lu, node):
+def CheckNodeNotDrained(lu, node):
"""Ensure that a given node is not drained.
@param lu: the LU on behalf of which we make the check
errors.ECODE_STATE)
-def _CheckNodeVmCapable(lu, node):
+def CheckNodeVmCapable(lu, node):
"""Ensure that a given node is vm capable.
@param lu: the LU on behalf of which we make the check
errors.ECODE_STATE)
-def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
+def RemoveInstance(lu, feedback_fn, instance, ignore_failures):
"""Utility function to remove an instance.
"""
logging.info("Removing block devices for instance %s", instance.name)
- if not _RemoveDisks(lu, instance, ignore_failures=ignore_failures):
+ if not RemoveDisks(lu, instance, ignore_failures=ignore_failures):
if not ignore_failures:
raise errors.OpExecError("Can't remove instance's disks")
feedback_fn("Warning: can't remove instance's disks")
lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
-def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
+def RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
"""Remove all disks for an instance.
This abstracts away some work from `AddInstance()` and
all_result = True
ports_to_release = set()
- anno_disks = _AnnotateDiskParams(instance, instance.disks, lu.cfg)
+ anno_disks = AnnotateDiskParams(instance, instance.disks, lu.cfg)
for (idx, device) in enumerate(anno_disks):
if target_node:
edata = [(target_node, device)]
return all_result
-def _NICToTuple(lu, nic):
+def NICToTuple(lu, nic):
"""Build a tupple of nic information.
@type lu: L{LogicalUnit}
return (nic.name, nic.uuid, nic.ip, nic.mac, mode, link, nic.network, netinfo)
-def _NICListToTuple(lu, nics):
+def NICListToTuple(lu, nics):
"""Build a list of nic information tuples.
This list is suitable to be passed to _BuildInstanceHookEnv or as a return
"""
hooks_nics = []
for nic in nics:
- hooks_nics.append(_NICToTuple(lu, nic))
+ hooks_nics.append(NICToTuple(lu, nic))
return hooks_nics
-def _CopyLockList(names):
+def CopyLockList(names):
"""Makes a copy of a list of lock names.
Handles L{locking.ALL_SET} correctly.
return names[:]
-def _ReleaseLocks(lu, level, names=None, keep=None):
+def ReleaseLocks(lu, level, names=None, keep=None):
"""Releases locks owned by an LU.
@type lu: L{LogicalUnit}
def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
target_group, cfg,
- _compute_fn=_ComputeIPolicyInstanceViolation):
+ _compute_fn=ComputeIPolicyInstanceViolation):
"""Compute if instance meets the specs of the new target group.
@param ipolicy: The ipolicy to verify
@type cfg: L{config.ConfigWriter}
@param cfg: Cluster configuration
@param _compute_fn: The function to verify ipolicy (unittest only)
- @see: L{ganeti.cmdlib.common._ComputeIPolicySpecViolation}
+ @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
"""
if current_group == target_group:
return _compute_fn(ipolicy, instance, cfg)
-def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
- _compute_fn=_ComputeIPolicyNodeViolation):
+def CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
+ _compute_fn=_ComputeIPolicyNodeViolation):
"""Checks that the target node is correct in terms of instance policy.
@param ipolicy: The ipolicy to verify
@param cfg: Cluster configuration
@param ignore: Ignore violations of the ipolicy
@param _compute_fn: The function to verify ipolicy (unittest only)
- @see: L{ganeti.cmdlib.common._ComputeIPolicySpecViolation}
+ @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
"""
primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
-def _GetInstanceInfoText(instance):
+def GetInstanceInfoText(instance):
"""Compute that text that should be added to the disk's metadata.
"""
return "originstname+%s" % instance.name
-def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
+def CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
"""Checks if a node has enough free memory.
This function checks if a given node has the needed amount of free
return free_mem
-def _CheckInstanceBridgesExist(lu, instance, node=None):
+def CheckInstanceBridgesExist(lu, instance, node=None):
"""Check that the brigdes needed by an instance exist.
"""
if node is None:
node = instance.primary_node
- _CheckNicsBridgesExist(lu, instance.nics, node)
+ CheckNicsBridgesExist(lu, instance.nics, node)
-def _CheckNicsBridgesExist(lu, target_nics, target_node):
+def CheckNicsBridgesExist(lu, target_nics, target_node):
"""Check that the brigdes needed by a list of nics exist.
"""
target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
-def _CheckNodeHasOS(lu, node, os_name, force_variant):
+def CheckNodeHasOS(lu, node, os_name, force_variant):
"""Ensure that a node supports a given OS.
@param lu: the LU on behalf of which we make the check
from ganeti import qlang
from ganeti import query
from ganeti import utils
-from ganeti.cmdlib.base import NoHooksLU, _QueryBase
-from ganeti.cmdlib.common import _GetWantedNodes, _SupportsOob
+from ganeti.cmdlib.base import NoHooksLU, QueryBase
+from ganeti.cmdlib.common import GetWantedNodes, SupportsOob
class LUOobCommand(NoHooksLU):
"""
if self.op.node_names:
- self.op.node_names = _GetWantedNodes(self, self.op.node_names)
+ self.op.node_names = GetWantedNodes(self, self.op.node_names)
lock_names = self.op.node_names
else:
lock_names = locking.ALL_SET
if (self.op.command in self._SKIP_MASTER and
self.master_node in self.op.node_names):
master_node_obj = self.cfg.GetNodeInfo(self.master_node)
- master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
+ master_oob_handler = SupportsOob(self.cfg, master_node_obj)
if master_oob_handler:
additional_text = ("run '%s %s %s' if you want to operate on the"
node_entry = [(constants.RS_NORMAL, node.name)]
ret.append(node_entry)
- oob_program = _SupportsOob(self.cfg, node)
+ oob_program = SupportsOob(self.cfg, node)
if not oob_program:
node_entry.append((constants.RS_UNAVAIL, None))
utils.CommaJoin(errs))
-class _ExtStorageQuery(_QueryBase):
+class ExtStorageQuery(QueryBase):
FIELDS = query.EXTSTORAGE_FIELDS
def ExpandNames(self, lu):
REQ_BGL = False
def CheckArguments(self):
- self.eq = _ExtStorageQuery(qlang.MakeSimpleFilter("name", self.op.names),
+ self.eq = ExtStorageQuery(qlang.MakeSimpleFilter("name", self.op.names),
self.op.output_fields, False)
def ExpandNames(self):
def ExpandNames(self):
if self.op.nodes:
- self.op.nodes = _GetWantedNodes(self, self.op.nodes)
+ self.op.nodes = GetWantedNodes(self, self.op.nodes)
self.needed_locks = {
locking.LEVEL_NODE: self.op.nodes,
from ganeti import qlang
from ganeti import query
from ganeti import utils
-from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, _QueryBase
-from ganeti.cmdlib.common import _ShareAll, _CheckNodeGroupInstances
+from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase
+from ganeti.cmdlib.common import ShareAll, CheckNodeGroupInstances
def _BuildNetworkHookEnv(name, subnet, gateway, network6, gateway6,
self.cfg.Update(self.network, feedback_fn)
-class _NetworkQuery(_QueryBase):
+class NetworkQuery(QueryBase):
FIELDS = query.NETWORK_FIELDS
def ExpandNames(self, lu):
lu.needed_locks = {}
- lu.share_locks = _ShareAll()
+ lu.share_locks = ShareAll()
self.do_locking = self.use_locking
REQ_BGL = False
def CheckArguments(self):
- self.nq = _NetworkQuery(qlang.MakeSimpleFilter("name", self.op.names),
+ self.nq = NetworkQuery(qlang.MakeSimpleFilter("name", self.op.names),
self.op.output_fields, self.op.use_locking)
def ExpandNames(self):
# Check if locked instances are still correct
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
if self.op.conflicts_check:
- _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+ CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
self.netparams = {
constants.NIC_MODE: self.network_mode,
# Check if locked instances are still correct
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
- _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+ CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
self.group = self.cfg.GetNodeGroup(self.group_uuid)
self.connected = True
from ganeti import utils
from ganeti.masterd import iallocator
-from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, _QueryBase, \
+from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase, \
ResultWithJobs
-from ganeti.cmdlib.common import _CheckParamsNotGlobal, \
- _MergeAndVerifyHvState, _MergeAndVerifyDiskState, \
- _IsExclusiveStorageEnabledNode, _CheckNodePVs, \
- _RedistributeAncillaryFiles, _ExpandNodeName, _ShareAll, _SupportsOob, \
- _CheckInstanceState, INSTANCE_DOWN, _GetUpdatedParams, \
- _AdjustCandidatePool, _CheckIAllocatorOrNode, _LoadNodeEvacResult, \
- _GetWantedNodes, _MapInstanceDisksToNodes, _RunPostHook, \
- _FindFaultyInstanceDisks
+from ganeti.cmdlib.common import CheckParamsNotGlobal, \
+ MergeAndVerifyHvState, MergeAndVerifyDiskState, \
+ IsExclusiveStorageEnabledNode, CheckNodePVs, \
+ RedistributeAncillaryFiles, ExpandNodeName, ShareAll, SupportsOob, \
+ CheckInstanceState, INSTANCE_DOWN, GetUpdatedParams, \
+ AdjustCandidatePool, CheckIAllocatorOrNode, LoadNodeEvacResult, \
+ GetWantedNodes, MapInstanceDisksToNodes, RunPostHook, \
+ FindFaultyInstanceDisks
def _DecideSelfPromotion(lu, exceptions=None):
if self.op.ndparams:
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
- _CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
- "node", "cluster or group")
+ CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
+ "node", "cluster or group")
if self.op.hv_state:
- self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
+ self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, None)
if self.op.disk_state:
- self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
+ self.new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, None)
# TODO: If we need to have multiple DnsOnlyRunner we probably should make
# it a property on the base class.
vg_name = cfg.GetVGName()
if vg_name is not None:
vparams = {constants.NV_PVLIST: [vg_name]}
- excl_stor = _IsExclusiveStorageEnabledNode(cfg, self.new_node)
+ excl_stor = IsExclusiveStorageEnabledNode(cfg, self.new_node)
cname = self.cfg.GetClusterName()
result = rpcrunner.call_node_verify_light([node], vparams, cname)[node]
- (errmsgs, _) = _CheckNodePVs(result.payload, excl_stor)
+ (errmsgs, _) = CheckNodePVs(result.payload, excl_stor)
if errmsgs:
raise errors.OpPrereqError("Checks on node PVs failed: %s" %
"; ".join(errmsgs), errors.ECODE_ENVIRON)
raise errors.OpExecError("ssh/hostname verification failed")
if self.op.readd:
- _RedistributeAncillaryFiles(self)
+ RedistributeAncillaryFiles(self)
self.context.ReaddNode(new_node)
# make sure we redistribute the config
self.cfg.Update(new_node, feedback_fn)
self.LogWarning("Node failed to demote itself from master"
" candidate status: %s" % msg)
else:
- _RedistributeAncillaryFiles(self, additional_nodes=[node],
- additional_vm=self.op.vm_capable)
+ RedistributeAncillaryFiles(self, additional_nodes=[node],
+ additional_vm=self.op.vm_capable)
self.context.AddNode(new_node, self.proc.GetECId())
_FLAGS = ["master_candidate", "drained", "offline"]
def CheckArguments(self):
- self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
+ self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
self.op.master_capable, self.op.vm_capable,
self.op.secondary_ip, self.op.ndparams, self.op.hv_state,
# Get all locks except nodes in shared mode; they are not used for anything
# but read-only access
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
self.share_locks[locking.LEVEL_NODE] = 0
self.share_locks[locking.LEVEL_NODE_RES] = 0
self.share_locks[locking.LEVEL_NODE_ALLOC] = 0
# away from the respective state, as only real changes are kept
# TODO: We might query the real power state if it supports OOB
- if _SupportsOob(self.cfg, node):
+ if SupportsOob(self.cfg, node):
if self.op.offline is False and not (node.powered or
self.op.powered is True):
raise errors.OpPrereqError(("Node %s needs to be turned on before its"
# On online nodes, check that no instances are running, and that
# the node has the new ip and we can reach it.
for instance in affected_instances.values():
- _CheckInstanceState(self, instance, INSTANCE_DOWN,
- msg="cannot change secondary ip")
+ CheckInstanceState(self, instance, INSTANCE_DOWN,
+ msg="cannot change secondary ip")
_CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
if master.name != node.name:
errors.ECODE_ENVIRON)
if self.op.ndparams:
- new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
+ new_ndparams = GetUpdatedParams(self.node.ndparams, self.op.ndparams)
utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
- _CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
- "node", "cluster or group")
+ CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
+ "node", "cluster or group")
self.new_ndparams = new_ndparams
if self.op.hv_state:
- self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
- self.node.hv_state_static)
+ self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
+ self.node.hv_state_static)
if self.op.disk_state:
self.new_disk_state = \
- _MergeAndVerifyDiskState(self.op.disk_state,
- self.node.disk_state_static)
+ MergeAndVerifyDiskState(self.op.disk_state,
+ self.node.disk_state_static)
def Exec(self, feedback_fn):
"""Modifies a node.
# we locked all nodes, we adjust the CP before updating this node
if self.lock_all:
- _AdjustCandidatePool(self, [node.name])
+ AdjustCandidatePool(self, [node.name])
if self.op.secondary_ip:
node.secondary_ip = self.op.secondary_ip
REQ_BGL = False
def CheckArguments(self):
- self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
+ self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
raise errors.OpPrereqError("The node is the master and the force"
" parameter was not set",
constants.IALLOCATOR_NEVAC_MODES)
def CheckArguments(self):
- _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
+ CheckIAllocatorOrNode(self, "iallocator", "remote_node")
def ExpandNames(self):
- self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
+ self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
if self.op.remote_node is not None:
- self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
+ self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
assert self.op.remote_node
if self.op.remote_node == self.op.node_name:
errors.ECODE_INVAL)
# Declare locks
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
self.needed_locks = {
locking.LEVEL_INSTANCE: [],
locking.LEVEL_NODEGROUP: [],
(self.op.iallocator, ial.info),
errors.ECODE_NORES)
- jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, True)
+ jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, True)
elif self.op.remote_node is not None:
assert self.op.mode == constants.NODE_EVAC_SEC
pass
def ExpandNames(self):
- self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
+ self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
self.needed_locks = {
locking.LEVEL_NODE: [self.op.node_name],
}
REQ_BGL = False
def CheckArguments(self):
- self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
+ self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
storage_type = self.op.storage_type
(self.op.name, self.op.node_name))
-class _NodeQuery(_QueryBase):
+class NodeQuery(QueryBase):
FIELDS = query.NODE_FIELDS
def ExpandNames(self, lu):
lu.needed_locks = {}
- lu.share_locks = _ShareAll()
+ lu.share_locks = ShareAll()
if self.names:
- self.wanted = _GetWantedNodes(lu, self.names)
+ self.wanted = GetWantedNodes(lu, self.names)
else:
self.wanted = locking.ALL_SET
node_to_secondary = None
if query.NQ_OOB in self.requested_data:
- oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
+ oob_support = dict((name, bool(SupportsOob(lu.cfg, node)))
for name, node in all_info.iteritems())
else:
oob_support = None
REQ_BGL = False
def CheckArguments(self):
- self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
+ self.nq = NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
self.op.output_fields, self.op.use_locking)
def ExpandNames(self):
selected=self.op.output_fields)
def ExpandNames(self):
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
if self.op.nodes:
self.needed_locks = {
- locking.LEVEL_NODE: _GetWantedNodes(self, self.op.nodes),
+ locking.LEVEL_NODE: GetWantedNodes(self, self.op.nodes),
}
else:
self.needed_locks = {
volumes = self.rpc.call_node_volumes(nodenames)
ilist = self.cfg.GetAllInstancesInfo()
- vol2inst = _MapInstanceDisksToNodes(ilist.values())
+ vol2inst = MapInstanceDisksToNodes(ilist.values())
output = []
for node in nodenames:
selected=self.op.output_fields)
def ExpandNames(self):
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
if self.op.nodes:
self.needed_locks = {
- locking.LEVEL_NODE: _GetWantedNodes(self, self.op.nodes),
+ locking.LEVEL_NODE: GetWantedNodes(self, self.op.nodes),
}
else:
self.needed_locks = {
Any errors are signaled by raising errors.OpPrereqError.
"""
- self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
+ self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
node = self.cfg.GetNodeInfo(self.op.node_name)
assert node is not None
"Not owning BGL"
# Promote nodes to master candidate as needed
- _AdjustCandidatePool(self, exceptions=[node.name])
+ AdjustCandidatePool(self, exceptions=[node.name])
self.context.RemoveNode(node.name)
# Run post hooks on the node before it's removed
- _RunPostHook(self, node.name)
+ RunPostHook(self, node.name)
result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
msg = result.fail_msg
constants.ETC_HOSTS_REMOVE,
node.name, None)
result.Raise("Can't update hosts file with new host data")
- _RedistributeAncillaryFiles(self)
+ RedistributeAncillaryFiles(self)
class LURepairNodeStorage(NoHooksLU):
REQ_BGL = False
def CheckArguments(self):
- self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
+ self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
storage_type = self.op.storage_type
def _CheckFaultyDisks(self, instance, node_name):
"""Ensure faulty disks abort the opcode or at least warn."""
try:
- if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
- node_name, True):
+ if FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
+ node_name, True):
raise errors.OpPrereqError("Instance '%s' has faulty disks on"
" node '%s'" % (instance.name, node_name),
errors.ECODE_STATE)
from ganeti import locking
from ganeti import qlang
from ganeti import query
-from ganeti.cmdlib.base import NoHooksLU, _QueryBase
+from ganeti.cmdlib.base import QueryBase, NoHooksLU
-class _OsQuery(_QueryBase):
+class OsQuery(QueryBase):
FIELDS = query.OS_FIELDS
def ExpandNames(self, lu):
return status_filter
def CheckArguments(self):
- self.oq = _OsQuery(self._BuildFilter(self.op.output_fields, self.op.names),
+ self.oq = OsQuery(self._BuildFilter(self.op.output_fields, self.op.names),
self.op.output_fields, False)
def ExpandNames(self):
from ganeti import constants
from ganeti import errors
from ganeti import query
-from ganeti.cmdlib.backup import _ExportQuery
+from ganeti.cmdlib.backup import ExportQuery
from ganeti.cmdlib.base import NoHooksLU
-from ganeti.cmdlib.cluster import _ClusterQuery
-from ganeti.cmdlib.group import _GroupQuery
-from ganeti.cmdlib.instance_query import _InstanceQuery
-from ganeti.cmdlib.misc import _ExtStorageQuery
-from ganeti.cmdlib.network import _NetworkQuery
-from ganeti.cmdlib.node import _NodeQuery
-from ganeti.cmdlib.operating_system import _OsQuery
+from ganeti.cmdlib.cluster import ClusterQuery
+from ganeti.cmdlib.group import GroupQuery
+from ganeti.cmdlib.instance_query import InstanceQuery
+from ganeti.cmdlib.misc import ExtStorageQuery
+from ganeti.cmdlib.network import NetworkQuery
+from ganeti.cmdlib.node import NodeQuery
+from ganeti.cmdlib.operating_system import OsQuery
#: Query type implementations
_QUERY_IMPL = {
- constants.QR_CLUSTER: _ClusterQuery,
- constants.QR_INSTANCE: _InstanceQuery,
- constants.QR_NODE: _NodeQuery,
- constants.QR_GROUP: _GroupQuery,
- constants.QR_NETWORK: _NetworkQuery,
- constants.QR_OS: _OsQuery,
- constants.QR_EXTSTORAGE: _ExtStorageQuery,
- constants.QR_EXPORT: _ExportQuery,
+ constants.QR_CLUSTER: ClusterQuery,
+ constants.QR_INSTANCE: InstanceQuery,
+ constants.QR_NODE: NodeQuery,
+ constants.QR_GROUP: GroupQuery,
+ constants.QR_NETWORK: NetworkQuery,
+ constants.QR_OS: OsQuery,
+ constants.QR_EXTSTORAGE: ExtStorageQuery,
+ constants.QR_EXPORT: ExportQuery,
}
assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP
from ganeti import objects
from ganeti import utils
from ganeti.cmdlib.base import NoHooksLU
-from ganeti.cmdlib.common import _ExpandNodeName, _ExpandInstanceName, \
- _ShareAll
+from ganeti.cmdlib.common import ExpandNodeName, ExpandInstanceName, ShareAll
class TagsLU(NoHooksLU): # pylint: disable=W0223
self.needed_locks = {}
if self.op.kind == constants.TAG_NODE:
- self.op.name = _ExpandNodeName(self.cfg, self.op.name)
+ self.op.name = ExpandNodeName(self.cfg, self.op.name)
lock_level = locking.LEVEL_NODE
lock_name = self.op.name
elif self.op.kind == constants.TAG_INSTANCE:
- self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
+ self.op.name = ExpandInstanceName(self.cfg, self.op.name)
lock_level = locking.LEVEL_INSTANCE
lock_name = self.op.name
elif self.op.kind == constants.TAG_NODEGROUP:
TagsLU.ExpandNames(self)
# Share locks as this is only a read operation
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
def Exec(self, feedback_fn):
"""Returns the tag list.
from ganeti import utils
from ganeti.masterd import iallocator
from ganeti.cmdlib.base import NoHooksLU
-from ganeti.cmdlib.common import _ExpandInstanceName, _GetWantedNodes, \
- _GetWantedInstances
+from ganeti.cmdlib.common import ExpandInstanceName, GetWantedNodes, \
+ GetWantedInstances
class LUTestDelay(NoHooksLU):
# _GetWantedNodes can be used here, but is not always appropriate to use
# this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
# more information.
- self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
+ self.op.on_nodes = GetWantedNodes(self, self.op.on_nodes)
self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
def _TestDelay(self):
if self.op.hypervisor is None:
self.op.hypervisor = self.cfg.GetHypervisorType()
elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
- fname = _ExpandInstanceName(self.cfg, self.op.name)
+ fname = ExpandInstanceName(self.cfg, self.op.name)
self.op.name = fname
self.relocate_from = \
list(self.cfg.GetInstanceInfo(fname).secondary_nodes)
constants.IALLOCATOR_MODE_NODE_EVAC):
if not self.op.instances:
raise errors.OpPrereqError("Missing instances", errors.ECODE_INVAL)
- self.op.instances = _GetWantedInstances(self, self.op.instances)
+ self.op.instances = GetWantedInstances(self, self.op.instances)
else:
raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
self.op.mode, errors.ECODE_INVAL)
op = OpTest()
lu = TestLU(op)
- c_i = lambda: common._CheckIAllocatorOrNode(lu, "iallocator", "node")
+ c_i = lambda: common.CheckIAllocatorOrNode(lu, "iallocator", "node")
# Neither node nor iallocator given
for n in (None, []):
assert iallocator._NEVAC_RESULT(alloc_result)
lu = _FakeLU()
- result = common._LoadNodeEvacResult(lu, alloc_result,
- early_release, use_nodes)
+ result = common.LoadNodeEvacResult(lu, alloc_result,
+ early_release, use_nodes)
if moved:
(_, (info_args, )) = lu.info_log.pop(0)
assert iallocator._NEVAC_RESULT(alloc_result)
lu = _FakeLU()
- self.assertRaises(errors.OpExecError, common._LoadNodeEvacResult,
+ self.assertRaises(errors.OpExecError, common.LoadNodeEvacResult,
lu, alloc_result, False, False)
self.assertFalse(lu.info_log)
(_, (args, )) = lu.warning_log.pop(0)
class TestHvStateHelper(unittest.TestCase):
def testWithoutOpData(self):
- self.assertEqual(common._MergeAndVerifyHvState(None, NotImplemented),
+ self.assertEqual(common.MergeAndVerifyHvState(None, NotImplemented),
None)
def testWithoutOldData(self):
constants.HVST_MEMORY_TOTAL: 4096,
},
}
- self.assertEqual(common._MergeAndVerifyHvState(new, None), new)
+ self.assertEqual(common.MergeAndVerifyHvState(new, None), new)
def testWithWrongHv(self):
new = {
constants.HVST_MEMORY_TOTAL: 4096,
},
}
- self.assertRaises(errors.OpPrereqError, common._MergeAndVerifyHvState,
+ self.assertRaises(errors.OpPrereqError, common.MergeAndVerifyHvState,
new, None)
class TestDiskStateHelper(unittest.TestCase):
def testWithoutOpData(self):
- self.assertEqual(common._MergeAndVerifyDiskState(None, NotImplemented),
+ self.assertEqual(common.MergeAndVerifyDiskState(None, NotImplemented),
None)
def testWithoutOldData(self):
},
},
}
- self.assertEqual(common._MergeAndVerifyDiskState(new, None), new)
+ self.assertEqual(common.MergeAndVerifyDiskState(new, None), new)
def testWithWrongStorageType(self):
new = {
},
},
}
- self.assertRaises(errors.OpPrereqError, common._MergeAndVerifyDiskState,
+ self.assertRaises(errors.OpPrereqError, common.MergeAndVerifyDiskState,
new, None)
def test(self):
compute_fn = _ValidateComputeMinMaxSpec
- ret = common._ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
- [1024], 1, constants.DT_PLAIN,
- _compute_fn=compute_fn)
+ ret = common.ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
+ [1024], 1, constants.DT_PLAIN,
+ _compute_fn=compute_fn)
self.assertEqual(ret, [])
def testDiskFull(self):
compute_fn = _NoDiskComputeMinMaxSpec
- ret = common._ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
- [1024], 1, constants.DT_PLAIN,
- _compute_fn=compute_fn)
+ ret = common.ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
+ [1024], 1, constants.DT_PLAIN,
+ _compute_fn=compute_fn)
self.assertEqual(ret, [constants.ISPEC_DISK_COUNT])
def testDiskLess(self):
compute_fn = _NoDiskComputeMinMaxSpec
- ret = common._ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
- [1024], 1, constants.DT_DISKLESS,
- _compute_fn=compute_fn)
+ ret = common.ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
+ [1024], 1, constants.DT_DISKLESS,
+ _compute_fn=compute_fn)
self.assertEqual(ret, [])
def testWrongTemplates(self):
compute_fn = _ValidateComputeMinMaxSpec
- ret = common._ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
- [1024], 1, constants.DT_DRBD8,
- _compute_fn=compute_fn)
+ ret = common.ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
+ [1024], 1, constants.DT_DRBD8,
+ _compute_fn=compute_fn)
self.assertEqual(len(ret), 1)
self.assertTrue("Disk template" in ret[0])
def testInvalidArguments(self):
- self.assertRaises(AssertionError, common._ComputeIPolicySpecViolation,
+ self.assertRaises(AssertionError, common.ComputeIPolicySpecViolation,
self._MICRO_IPOL, 1024, 1, 1, 1, [], 1,
constants.DT_PLAIN,)
def testInvalidSpec(self):
spec = _SpecWrapper([None, False, "foo", None, "bar", None])
compute_fn = spec.ComputeMinMaxSpec
- ret = common._ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
- [1024], 1, constants.DT_PLAIN,
- _compute_fn=compute_fn)
+ ret = common.ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
+ [1024], 1, constants.DT_PLAIN,
+ _compute_fn=compute_fn)
self.assertEqual(ret, ["foo", "bar"])
self.assertFalse(spec.spec)
constants.IPOLICY_DTS: [disk_template],
}
def AssertComputeViolation(ipolicy, violations):
- ret = common._ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count,
- disk_count, nic_count,
- disk_sizes, spindle_use,
- disk_template)
+ ret = common.ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count,
+ disk_count, nic_count,
+ disk_sizes, spindle_use,
+ disk_template)
self.assertEqual(len(ret), violations)
AssertComputeViolation(ipolicy1, 0)
disk_template=constants.DT_PLAIN)
stub = _StubComputeIPolicySpecViolation(2048, 2, 1, 0, [512], 4,
constants.DT_PLAIN)
- ret = common._ComputeIPolicyInstanceViolation(NotImplemented, instance,
- cfg, _compute_fn=stub)
+ ret = common.ComputeIPolicyInstanceViolation(NotImplemented, instance,
+ cfg, _compute_fn=stub)
self.assertEqual(ret, [])
instance2 = objects.Instance(beparams={}, disks=disks, nics=[],
disk_template=constants.DT_PLAIN)
- ret = common._ComputeIPolicyInstanceViolation(NotImplemented, instance2,
- cfg, _compute_fn=stub)
+ ret = common.ComputeIPolicyInstanceViolation(NotImplemented, instance2,
+ cfg, _compute_fn=stub)
self.assertEqual(ret, [])
def testNoViolation(self):
compute_recoder = _CallRecorder(return_value=[])
- instance._CheckTargetNodeIPolicy(self.lu, NotImplemented, self.instance,
- self.target_node, NotImplemented,
- _compute_fn=compute_recoder)
+ instance.CheckTargetNodeIPolicy(self.lu, NotImplemented, self.instance,
+ self.target_node, NotImplemented,
+ _compute_fn=compute_recoder)
self.assertTrue(compute_recoder.called)
self.assertEqual(self.lu.warning_log, [])
def testNoIgnore(self):
compute_recoder = _CallRecorder(return_value=["mem_size not in range"])
- self.assertRaises(errors.OpPrereqError, instance._CheckTargetNodeIPolicy,
+ self.assertRaises(errors.OpPrereqError, instance.CheckTargetNodeIPolicy,
self.lu, NotImplemented, self.instance,
self.target_node, NotImplemented,
_compute_fn=compute_recoder)
def testIgnoreViolation(self):
compute_recoder = _CallRecorder(return_value=["mem_size not in range"])
- instance._CheckTargetNodeIPolicy(self.lu, NotImplemented, self.instance,
+ instance.CheckTargetNodeIPolicy(self.lu, NotImplemented, self.instance,
self.target_node, NotImplemented,
ignore=True, _compute_fn=compute_recoder)
self.assertTrue(compute_recoder.called)
def testEmptyContainer(self):
container = []
chgdesc = []
- instance.ApplyContainerMods("test", container, chgdesc, [], None, None,
+ instance._ApplyContainerMods("test", container, chgdesc, [], None, None,
None)
self.assertEqual(container, [])
self.assertEqual(chgdesc, [])
def testAdd(self):
container = []
chgdesc = []
- mods = instance.PrepareContainerMods([
+ mods = instance._PrepareContainerMods([
(constants.DDM_ADD, -1, "Hello"),
(constants.DDM_ADD, -1, "World"),
(constants.DDM_ADD, 0, "Start"),
(constants.DDM_ADD, -1, "End"),
], None)
- instance.ApplyContainerMods("test", container, chgdesc, mods,
+ instance._ApplyContainerMods("test", container, chgdesc, mods,
None, None, None)
self.assertEqual(container, ["Start", "Hello", "World", "End"])
self.assertEqual(chgdesc, [])
- mods = instance.PrepareContainerMods([
+ mods = instance._PrepareContainerMods([
(constants.DDM_ADD, 0, "zero"),
(constants.DDM_ADD, 3, "Added"),
(constants.DDM_ADD, 5, "four"),
(constants.DDM_ADD, 7, "xyz"),
], None)
- instance.ApplyContainerMods("test", container, chgdesc, mods,
+ instance._ApplyContainerMods("test", container, chgdesc, mods,
None, None, None)
self.assertEqual(container,
["zero", "Start", "Hello", "Added", "World", "four",
self.assertEqual(chgdesc, [])
for idx in [-2, len(container) + 1]:
- mods = instance.PrepareContainerMods([
+ mods = instance._PrepareContainerMods([
(constants.DDM_ADD, idx, "error"),
], None)
- self.assertRaises(IndexError, instance.ApplyContainerMods,
+ self.assertRaises(IndexError, instance._ApplyContainerMods,
"test", container, None, mods, None, None, None)
def testRemoveError(self):
for idx in [0, 1, 2, 100, -1, -4]:
- mods = instance.PrepareContainerMods([
+ mods = instance._PrepareContainerMods([
(constants.DDM_REMOVE, idx, None),
], None)
- self.assertRaises(IndexError, instance.ApplyContainerMods,
+ self.assertRaises(IndexError, instance._ApplyContainerMods,
"test", [], None, mods, None, None, None)
- mods = instance.PrepareContainerMods([
+ mods = instance._PrepareContainerMods([
(constants.DDM_REMOVE, 0, object()),
], None)
- self.assertRaises(AssertionError, instance.ApplyContainerMods,
+ self.assertRaises(AssertionError, instance._ApplyContainerMods,
"test", [""], None, mods, None, None, None)
def testAddError(self):
for idx in range(-100, -1) + [100]:
- mods = instance.PrepareContainerMods([
+ mods = instance._PrepareContainerMods([
(constants.DDM_ADD, idx, None),
], None)
- self.assertRaises(IndexError, instance.ApplyContainerMods,
+ self.assertRaises(IndexError, instance._ApplyContainerMods,
"test", [], None, mods, None, None, None)
def testRemove(self):
container = ["item 1", "item 2"]
- mods = instance.PrepareContainerMods([
+ mods = instance._PrepareContainerMods([
(constants.DDM_ADD, -1, "aaa"),
(constants.DDM_REMOVE, -1, None),
(constants.DDM_ADD, -1, "bbb"),
], None)
chgdesc = []
- instance.ApplyContainerMods("test", container, chgdesc, mods,
+ instance._ApplyContainerMods("test", container, chgdesc, mods,
None, None, None)
self.assertEqual(container, ["item 1", "item 2", "bbb"])
self.assertEqual(chgdesc, [
def testModify(self):
container = ["item 1", "item 2"]
- mods = instance.PrepareContainerMods([
+ mods = instance._PrepareContainerMods([
(constants.DDM_MODIFY, -1, "a"),
(constants.DDM_MODIFY, 0, "b"),
(constants.DDM_MODIFY, 1, "c"),
], None)
chgdesc = []
- instance.ApplyContainerMods("test", container, chgdesc, mods,
+ instance._ApplyContainerMods("test", container, chgdesc, mods,
None, None, None)
self.assertEqual(container, ["item 1", "item 2"])
self.assertEqual(chgdesc, [])
for idx in [-2, len(container) + 1]:
- mods = instance.PrepareContainerMods([
+ mods = instance._PrepareContainerMods([
(constants.DDM_MODIFY, idx, "error"),
], None)
- self.assertRaises(IndexError, instance.ApplyContainerMods,
+ self.assertRaises(IndexError, instance._ApplyContainerMods,
"test", container, None, mods, None, None, None)
class _PrivateData:
def testAddWithCreateFunction(self):
container = []
chgdesc = []
- mods = instance.PrepareContainerMods([
+ mods = instance._PrepareContainerMods([
(constants.DDM_ADD, -1, "Hello"),
(constants.DDM_ADD, -1, "World"),
(constants.DDM_ADD, 0, "Start"),
(constants.DDM_REMOVE, 2, None),
(constants.DDM_ADD, 1, "More"),
], self._PrivateData)
- instance.ApplyContainerMods("test", container, chgdesc, mods,
+ instance._ApplyContainerMods("test", container, chgdesc, mods,
self._CreateTestFn, self._ModifyTestFn,
self._RemoveTestFn)
self.assertEqual(container, [
return copy.deepcopy(constants.DISK_DT_DEFAULTS)
def testWrongDiskTemplate(self):
- gdt = instance._GenerateDiskTemplate
+ gdt = instance.GenerateDiskTemplate
disk_template = "##unknown##"
assert disk_template not in constants.DISK_TEMPLATES
self.GetDiskParams())
def testDiskless(self):
- gdt = instance._GenerateDiskTemplate
+ gdt = instance.GenerateDiskTemplate
result = gdt(self.lu, constants.DT_DISKLESS, "inst27734.example.com",
"node30113.example.com", [], [],
file_driver=NotImplemented,
req_file_storage=NotImplemented,
req_shr_file_storage=NotImplemented):
- gdt = instance._GenerateDiskTemplate
+ gdt = instance.GenerateDiskTemplate
map(lambda params: utils.ForceDictType(params,
constants.IDISK_PARAMS_TYPES),
])
def testDrbd8(self):
- gdt = instance._GenerateDiskTemplate
+ gdt = instance.GenerateDiskTemplate
drbd8_defaults = constants.DISK_LD_DEFAULTS[constants.LD_DRBD8]
drbd8_default_metavg = drbd8_defaults[constants.LDP_DEFAULT_METAVG]
disk_template=constants.DT_PLAIN,
disks=disks)
- self.assertRaises(errors.OpExecError, instance._WipeDisks, lu, inst)
+ self.assertRaises(errors.OpExecError, instance.WipeDisks, lu, inst)
def _FailingWipeCb(self, (disk, _), offset, size):
# This should only ever be called for the first disk
disks=disks)
try:
- instance._WipeDisks(lu, inst)
+ instance.WipeDisks(lu, inst)
except errors.OpExecError, err:
self.assertTrue(str(err), "Could not wipe disk 0 at offset 0 ")
else:
(lu, inst, pauset, progresst) = self._PrepareWipeTest(0, disks)
- instance._WipeDisks(lu, inst)
+ instance.WipeDisks(lu, inst)
self.assertEqual(pauset.history, [
("disk0", 1024, True),
self._PrepareWipeTest(start_offset, disks)
# Test start offset with only one disk
- instance._WipeDisks(lu, inst,
- disks=[(1, disks[1], start_offset)])
+ instance.WipeDisks(lu, inst,
+ disks=[(1, disks[1], start_offset)])
# Only the second disk may have been paused and wiped
self.assertEqual(pauset.history, [
class TestCopyLockList(unittest.TestCase):
def test(self):
- self.assertEqual(instance._CopyLockList([]), [])
- self.assertEqual(instance._CopyLockList(None), None)
- self.assertEqual(instance._CopyLockList(locking.ALL_SET), locking.ALL_SET)
+ self.assertEqual(instance.CopyLockList([]), [])
+ self.assertEqual(instance.CopyLockList(None), None)
+ self.assertEqual(instance.CopyLockList(locking.ALL_SET), locking.ALL_SET)
names = ["foo", "bar"]
- output = instance._CopyLockList(names)
+ output = instance.CopyLockList(names)
self.assertEqual(names, output)
self.assertNotEqual(id(names), id(output), msg="List was not copied")
}
if not isgroup:
diff_policy[constants.ISPECS_STD] = diff_std
- new_policy = common._GetUpdatedIPolicy(old_policy, diff_policy,
- group_policy=isgroup)
+ new_policy = common.GetUpdatedIPolicy(old_policy, diff_policy,
+ group_policy=isgroup)
self.assertTrue(constants.ISPECS_MINMAX in new_policy)
self.assertEqual(new_policy[constants.ISPECS_MINMAX], diff_minmax)
self.assertEqual(new_std[key], old_std[key])
def _TestSet(self, old_policy, diff_policy, isgroup):
- new_policy = common._GetUpdatedIPolicy(old_policy, diff_policy,
+ new_policy = common.GetUpdatedIPolicy(old_policy, diff_policy,
group_policy=isgroup)
for key in diff_policy:
self.assertTrue(key in new_policy)
diff_policy = {
constants.IPOLICY_SPINDLE_RATIO: constants.VALUE_DEFAULT,
}
- new_policy = common._GetUpdatedIPolicy(old_policy, diff_policy,
- group_policy=True)
+ new_policy = common.GetUpdatedIPolicy(old_policy, diff_policy,
+ group_policy=True)
for key in diff_policy:
self.assertFalse(key in new_policy)
for key in old_policy:
self.assertTrue(key in new_policy)
self.assertEqual(new_policy[key], old_policy[key])
- self.assertRaises(errors.OpPrereqError, common._GetUpdatedIPolicy,
+ self.assertRaises(errors.OpPrereqError, common.GetUpdatedIPolicy,
old_policy, diff_policy, group_policy=False)
def testUnsetEmpty(self):
diff_policy = {
key: constants.VALUE_DEFAULT,
}
- new_policy = common._GetUpdatedIPolicy(old_policy, diff_policy,
- group_policy=True)
+ new_policy = common.GetUpdatedIPolicy(old_policy, diff_policy,
+ group_policy=True)
self.assertEqual(new_policy, old_policy)
def _TestInvalidKeys(self, old_policy, isgroup):
INVALID_KEY: 3,
}
invalid_policy = INVALID_DICT
- self.assertRaises(errors.OpPrereqError, common._GetUpdatedIPolicy,
+ self.assertRaises(errors.OpPrereqError, common.GetUpdatedIPolicy,
old_policy, invalid_policy, group_policy=isgroup)
invalid_ispecs = {
constants.ISPECS_MINMAX: [INVALID_DICT],
}
- self.assertRaises(errors.TypeEnforcementError, common._GetUpdatedIPolicy,
+ self.assertRaises(errors.TypeEnforcementError, common.GetUpdatedIPolicy,
old_policy, invalid_ispecs, group_policy=isgroup)
if isgroup:
invalid_for_group = {
constants.ISPECS_STD: constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
}
- self.assertRaises(errors.OpPrereqError, common._GetUpdatedIPolicy,
+ self.assertRaises(errors.OpPrereqError, common.GetUpdatedIPolicy,
old_policy, invalid_for_group, group_policy=isgroup)
good_ispecs = self._OLD_CLUSTER_POLICY[constants.ISPECS_MINMAX]
invalid_ispecs = copy.deepcopy(good_ispecs)
ispec = minmax[key]
ispec[INVALID_KEY] = None
self.assertRaises(errors.TypeEnforcementError,
- common._GetUpdatedIPolicy, old_policy,
+ common.GetUpdatedIPolicy, old_policy,
invalid_policy, group_policy=isgroup)
del ispec[INVALID_KEY]
for par in constants.ISPECS_PARAMETERS:
oldv = ispec[par]
ispec[par] = "this_is_not_good"
self.assertRaises(errors.TypeEnforcementError,
- common._GetUpdatedIPolicy,
+ common.GetUpdatedIPolicy,
old_policy, invalid_policy, group_policy=isgroup)
ispec[par] = oldv
# This is to make sure that no two errors were present during the tests
- common._GetUpdatedIPolicy(old_policy, invalid_policy,
- group_policy=isgroup)
+ common.GetUpdatedIPolicy(old_policy, invalid_policy,
+ group_policy=isgroup)
def testInvalidKeys(self):
self._TestInvalidKeys(self._OLD_GROUP_POLICY, True)
bad_policy = {
par: "invalid_value",
}
- self.assertRaises(errors.OpPrereqError, common._GetUpdatedIPolicy, {},
+ self.assertRaises(errors.OpPrereqError, common.GetUpdatedIPolicy, {},
bad_policy, group_policy=True)
if __name__ == "__main__":