(instance.name, msg), errors.ECODE_STATE)
+def _CheckMinMaxSpecs(name, ipolicy, value):
+ """Checks if value is in the desired range.
+
+ @param name: name of the parameter for which we perform the check
+ @param ipolicy: dictionary containing min, max and std values
+ @param value: actual value that we want to use
+ @return: None or element not meeting the criteria
+
+
+ """
+ if value in [None, constants.VALUE_AUTO]:
+ return None
+ max_v = ipolicy[constants.ISPECS_MAX].get(name, value)
+ min_v = ipolicy[constants.ISPECS_MIN].get(name, value)
+ if value > max_v or min_v > value:
+ return ("%s value %s is not in range [%s, %s]" %
+ (name, value, min_v, max_v))
+ return None
+
+
+def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
+ nic_count, disk_sizes,
+ _check_spec_fn=_CheckMinMaxSpecs):
+ """Verifies ipolicy against provided specs.
+
+ @type ipolicy: dict
+ @param ipolicy: The ipolicy
+ @type mem_size: int
+ @param mem_size: The memory size
+ @type cpu_count: int
+ @param cpu_count: Used cpu cores
+ @type disk_count: int
+ @param disk_count: Number of disks used
+ @type nic_count: int
+ @param nic_count: Number of nics used
+ @type disk_sizes: list of ints
+ @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
+ @param _check_spec_fn: The checking function (unittest only)
+ @return: A list of violations, or an empty list of no violations are found
+
+ """
+ assert disk_count == len(disk_sizes)
+
+ test_settings = [
+ (constants.ISPEC_MEM_SIZE, mem_size),
+ (constants.ISPEC_CPU_COUNT, cpu_count),
+ (constants.ISPEC_DISK_COUNT, disk_count),
+ (constants.ISPEC_NIC_COUNT, nic_count),
+ ] + map((lambda d: (constants.ISPEC_DISK_SIZE, d)), disk_sizes)
+
+ return filter(None,
+ (_check_spec_fn(name, ipolicy, value)
+ for (name, value) in test_settings))
+
+
+def _ComputeIPolicyInstanceViolation(ipolicy, instance,
+ _compute_fn=_ComputeIPolicySpecViolation):
+ """Compute if instance meets the specs of ipolicy.
+
+ @type ipolicy: dict
+ @param ipolicy: The ipolicy to verify against
+ @type instance: L{objects.Instance}
+ @param instance: The instance to verify
+ @param _compute_fn: The function to verify ipolicy (unittest only)
+ @see: L{_ComputeIPolicySpecViolation}
+
+ """
+ mem_size = instance.beparams.get(constants.BE_MAXMEM, None)
+ cpu_count = instance.beparams.get(constants.BE_VCPUS, None)
+ disk_count = len(instance.disks)
+ disk_sizes = [disk.size for disk in instance.disks]
+ nic_count = len(instance.nics)
+
+ return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
+ disk_sizes)
+
+
+def _ComputeIPolicyInstanceSpecViolation(ipolicy, instance_spec,
+ _compute_fn=_ComputeIPolicySpecViolation):
+ """Compute if instance specs meets the specs of ipolicy.
+
+ @type ipolicy: dict
+ @param ipolicy: The ipolicy to verify against
+ @param instance_spec: dict
+ @param instance_spec: The instance spec to verify
+ @param _compute_fn: The function to verify ipolicy (unittest only)
+ @see: L{_ComputeIPolicySpecViolation}
+
+ """
+ mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
+ cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
+ disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
+ disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
+ nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
+
+ return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
+ disk_sizes)
+
+
+def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
+ target_group,
+ _compute_fn=_ComputeIPolicyInstanceViolation):
+ """Compute if instance meets the specs of the new target group.
+
+ @param ipolicy: The ipolicy to verify
+ @param instance: The instance object to verify
+ @param current_group: The current group of the instance
+ @param target_group: The new group of the instance
+ @param _compute_fn: The function to verify ipolicy (unittest only)
+ @see: L{_ComputeIPolicySpecViolation}
+
+ """
+ if current_group == target_group:
+ return []
+ else:
+ return _compute_fn(ipolicy, instance)
+
+
+def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
+ _compute_fn=_ComputeIPolicyNodeViolation):
+ """Checks that the target node is correct in terms of instance policy.
+
+ @param ipolicy: The ipolicy to verify
+ @param instance: The instance object to verify
+ @param node: The new node to relocate
+ @param ignore: Ignore violations of the ipolicy
+ @param _compute_fn: The function to verify ipolicy (unittest only)
+ @see: L{_ComputeIPolicySpecViolation}
+
+ """
+ primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
+ res = _compute_fn(ipolicy, instance, primary_node.group, node.group)
+
+ if res:
+ msg = ("Instance does not meet target node group's (%s) instance"
+ " policy: %s") % (node.group, utils.CommaJoin(res))
+ if ignore:
+ lu.LogWarning(msg)
+ else:
+ raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+
+
def _ExpandItemName(fn, name, kind):
"""Expand an item name.
return mc_now < mc_should
+def _CalculateGroupIPolicy(cluster, group):
+ """Calculate instance policy for group.
+
+ """
+ return cluster.SimpleFillIPolicy(group.ipolicy)
+
+
def _CheckNicsBridgesExist(lu, target_nics, target_node):
"""Check that the brigdes needed by a list of nics exist.
ems = self.cfg.GetUseExternalMipScript()
result = self.rpc.call_node_deactivate_master_ip(master_params.name,
master_params, ems)
- result.Raise("Could not disable the master role")
+ if result.fail_msg:
+ self.LogWarning("Error disabling the master IP address: %s",
+ result.fail_msg)
return master_params.name
node_vol_should = {}
instanceconfig.MapLVsByNode(node_vol_should)
+ ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(), self.group_info)
+ err = _ComputeIPolicyInstanceViolation(ipolicy, instanceconfig)
+ _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, err)
+
for node in node_vol_should:
n_img = node_image[node]
if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
for name, values in svalues.items()))
for storage, svalues in new_disk_state.items())
+ if self.op.ipolicy:
+ ipolicy = {}
+ for key, value in self.op.ipolicy.items():
+ utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
+ ipolicy[key] = _GetUpdatedParams(cluster.ipolicy.get(key, {}),
+ value)
+ objects.InstancePolicy.CheckParameterSyntax(ipolicy)
+ self.new_ipolicy = ipolicy
+
if self.op.nicparams:
utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
if self.op.nicparams:
self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
+ if self.op.ipolicy:
+ self.cluster.ipolicy = self.new_ipolicy
if self.op.osparams:
self.cluster.osparams = self.new_osp
if self.op.ndparams:
if self.op.ndparams:
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
+ if self.op.hv_state:
+ self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
+
+ if self.op.disk_state:
+ self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
+
def Exec(self, feedback_fn):
"""Adds the new node to the cluster.
else:
new_node.ndparams = {}
+ if self.op.hv_state:
+ new_node.hv_state_static = self.new_hv_state
+
+ if self.op.disk_state:
+ new_node.disk_state_static = self.new_disk_state
+
# check connectivity
result = self.rpc.call_version([node])[node]
result.Raise("Can't get version information from node %s" % node)
"os_hvp": os_hvp,
"beparams": cluster.beparams,
"osparams": cluster.osparams,
+ "ipolicy": cluster.ipolicy,
"nicparams": cluster.nicparams,
"ndparams": cluster.ndparams,
"candidate_pool_size": cluster.candidate_pool_size,
_CheckNodeOnline(self, instance.primary_node)
bep = self.cfg.GetClusterInfo().FillBE(instance)
+ bep.update(self.op.beparams)
# check bridges existence
_CheckInstanceBridgesExist(self, instance)
cleanup=False,
failover=True,
ignore_consistency=ignore_consistency,
- shutdown_timeout=shutdown_timeout)
+ shutdown_timeout=shutdown_timeout,
+ ignore_ipolicy=self.op.ignore_ipolicy)
self.tasklets = [self._migrater]
def DeclareLocks(self, level):
self._migrater = TLMigrateInstance(self, self.op.instance_name,
cleanup=self.op.cleanup,
failover=False,
- fallback=self.op.allow_failover)
+ fallback=self.op.allow_failover,
+ ignore_ipolicy=self.op.ignore_ipolicy)
self.tasklets = [self._migrater]
def DeclareLocks(self, level):
_CheckNodeOnline(self, target_node)
_CheckNodeNotDrained(self, target_node)
_CheckNodeVmCapable(self, target_node)
+ ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(),
+ self.cfg.GetNodeGroup(node.group))
+ _CheckTargetNodeIPolicy(self, ipolicy, instance, node,
+ ignore=self.op.ignore_ipolicy)
if instance.admin_state == constants.ADMINST_UP:
# check memory requirements on the secondary node
mode=self.op.mode,
live=self.op.live,
iallocator=self.op.iallocator,
- target_node=self.op.target_node)]
+ target_node=self.op.target_node,
+ ignore_ipolicy=self.op.ignore_ipolicy)]
for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)
]
and target node
@type shutdown_timeout: int
@ivar shutdown_timeout: In case of failover timeout of the shutdown
+ @type ignore_ipolicy: bool
+ @ivar ignore_ipolicy: If true, we can ignore instance policy when migrating
"""
def __init__(self, lu, instance_name, cleanup=False,
failover=False, fallback=False,
ignore_consistency=False,
- shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT):
+ shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT,
+ ignore_ipolicy=False):
"""Initializes this class.
"""
self.fallback = fallback
self.ignore_consistency = ignore_consistency
self.shutdown_timeout = shutdown_timeout
+ self.ignore_ipolicy = ignore_ipolicy
def CheckPrereq(self):
"""Check prerequisites.
instance = self.cfg.GetInstanceInfo(instance_name)
assert instance is not None
self.instance = instance
+ cluster = self.cfg.GetClusterInfo()
if (not self.cleanup and
not instance.admin_state == constants.ADMINST_UP and
# BuildHooksEnv
self.target_node = self.lu.op.target_node
+ # Check that the target node is correct in terms of instance policy
+ nodeinfo = self.cfg.GetNodeInfo(self.target_node)
+ group_info = self.cfg.GetNodeGroup(nodeinfo.group)
+ ipolicy = _CalculateGroupIPolicy(cluster, group_info)
+ _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
+ ignore=self.ignore_ipolicy)
+
# self.target_node is already populated, either directly or by the
# iallocator run
target_node = self.target_node
" node can be passed)" %
(instance.disk_template, text),
errors.ECODE_INVAL)
+ nodeinfo = self.cfg.GetNodeInfo(target_node)
+ group_info = self.cfg.GetNodeGroup(nodeinfo.group)
+ ipolicy = _CalculateGroupIPolicy(cluster, group_info)
+ _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
+ ignore=self.ignore_ipolicy)
- i_be = self.cfg.GetClusterInfo().FillBE(instance)
+ i_be = cluster.FillBE(instance)
# check memory requirements on the secondary node
if not self.failover or instance.admin_state == constants.ADMINST_UP:
self.lu.LogInfo("Not checking memory on the secondary node as"
" instance will not be started")
+ # check if failover must be forced instead of migration
+ if (not self.cleanup and not self.failover and
+ i_be[constants.BE_ALWAYS_FAILOVER]):
+ if self.fallback:
+ self.lu.LogInfo("Instance configured to always failover; fallback"
+ " to failover")
+ self.failover = True
+ else:
+ raise errors.OpPrereqError("This instance has been configured to"
+ " always failover, please allow failover",
+ errors.ECODE_STATE)
+
# check bridge existance
_CheckInstanceBridgesExist(self.lu, instance, node=target_node)
self.lu.op.live = None
elif self.lu.op.mode is None:
# read the default value from the hypervisor
- i_hv = self.cfg.GetClusterInfo().FillHV(self.instance,
- skip_globals=False)
+ i_hv = cluster.FillHV(self.instance, skip_globals=False)
self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
"""Run the allocator based on input opcode.
"""
+ # FIXME: add a self.ignore_ipolicy option
ial = IAllocator(self.cfg, self.rpc,
mode=constants.IALLOCATOR_MODE_RELOC,
name=self.instance_name,
dt_params = disk_params[disk_template]
if disk_template == constants.DT_DRBD8:
drbd_params = {
- constants.RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
- constants.BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
- constants.NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
+ constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
+ constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
+ constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
+ constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
+ constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
+ constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
+ constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
+ constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
+ constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
+ constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
+ constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
+ constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
}
drbd_params = \
# data LV
data_params = {
- constants.STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
+ constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
}
data_params = \
objects.FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
# metadata LV
meta_params = {
- constants.STRIPES: dt_params[constants.DRBD_META_STRIPES],
+ constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
}
meta_params = \
objects.FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
elif disk_template == constants.DT_PLAIN:
params = {
- constants.STRIPES: dt_params[constants.LV_STRIPES],
+ constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
}
params = \
objects.FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
names.append(lv_prefix + "_meta")
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
+ drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
data_vg = disk.get(constants.IDISK_VG, vgname)
- meta_vg = disk.get(constants.IDISK_METAVG, data_vg)
+ meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
disk[constants.IDISK_SIZE],
[data_vg, meta_vg],
# pylint: disable=W0142
self.instance_file_storage_dir = utils.PathJoin(*joinargs)
- def CheckPrereq(self):
+ def CheckPrereq(self): # pylint: disable=R0914
"""Check prerequisites.
"""
constants.IDISK_SIZE: size,
constants.IDISK_MODE: mode,
constants.IDISK_VG: data_vg,
- constants.IDISK_METAVG: disk.get(constants.IDISK_METAVG, data_vg),
}
+ if constants.IDISK_METAVG in disk:
+ new_disk[constants.IDISK_METAVG] = disk[constants.IDISK_METAVG]
if constants.IDISK_ADOPT in disk:
new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
self.disks.append(new_disk)
_ReleaseLocks(self, locking.LEVEL_NODE,
keep=filter(None, [self.op.pnode, self.op.snode,
self.op.src_node]))
+ _ReleaseLocks(self, locking.LEVEL_NODE_RES,
+ keep=filter(None, [self.op.pnode, self.op.snode,
+ self.op.src_node]))
#### node related checks
nodenames = [pnode.name] + self.secondaries
+ # Verify instance specs
+ ispec = {
+ constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
+ constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
+ constants.ISPEC_DISK_COUNT: len(self.disks),
+ constants.ISPEC_DISK_SIZE: [disk["size"] for disk in self.disks],
+ constants.ISPEC_NIC_COUNT: len(self.nics),
+ }
+
+ group_info = self.cfg.GetNodeGroup(pnode.group)
+ ipolicy = _CalculateGroupIPolicy(cluster, group_info)
+ res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
+ if not self.op.ignore_ipolicy and res:
+ raise errors.OpPrereqError(("Instance allocation to group %s violates"
+ " policy: %s") % (pnode.group,
+ utils.CommaJoin(res)),
+ errors.ECODE_INVAL)
+
# disk parameters (not customizable at instance or node level)
# just use the primary node parameters, ignoring the secondary.
- self.diskparams = self.cfg.GetNodeGroup(pnode.group).diskparams
+ self.diskparams = group_info.diskparams
if not self.adopt_disks:
# Check lv size requirements, if not adopting
_CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
+ snode_group = self.cfg.GetNodeGroup(snode_info.group)
+ ipolicy = _CalculateGroupIPolicy(cluster, snode_group)
+ _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
+ ignore=self.op.ignore_ipolicy)
if pnode_info.group != snode_info.group:
self.LogWarning("The primary and secondary nodes are in two"
" different node groups; the disk parameters"
if self.op.ndparams:
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
+ if self.op.hv_state:
+ self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
+ else:
+ self.new_hv_state = None
+
+ if self.op.disk_state:
+ self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
+ else:
+ self.new_disk_state = None
+
if self.op.diskparams:
for templ in constants.DISK_TEMPLATES:
if templ not in self.op.diskparams:
else:
self.op.diskparams = self.cfg.GetClusterInfo().diskparams
+ if self.op.ipolicy:
+ cluster = self.cfg.GetClusterInfo()
+ full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
+ objects.InstancePolicy.CheckParameterSyntax(full_ipolicy)
+
def BuildHooksEnv(self):
"""Build hooks env.
uuid=self.group_uuid,
alloc_policy=self.op.alloc_policy,
ndparams=self.op.ndparams,
- diskparams=self.op.diskparams)
+ diskparams=self.op.diskparams,
+ ipolicy=self.op.ipolicy,
+ hv_state_static=self.new_hv_state,
+ disk_state_static=self.new_disk_state)
self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
del self.remove_locks[locking.LEVEL_NODEGROUP]
lu.needed_locks = {}
self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
+ self._cluster = lu.cfg.GetClusterInfo()
name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
if not self.names:
# Do not pass on node information if it was not requested.
group_to_nodes = None
- return query.GroupQueryData([self._all_groups[uuid]
+ return query.GroupQueryData(self._cluster,
+ [self._all_groups[uuid]
for uuid in self.wanted],
group_to_nodes, group_to_instances)
self.op.diskparams,
self.op.alloc_policy,
self.op.hv_state,
- self.op.disk_state
+ self.op.disk_state,
+ self.op.ipolicy,
]
if all_changes.count(None) == len(all_changes):
_MergeAndVerifyDiskState(self.op.disk_state,
self.group.disk_state_static)
+ if self.op.ipolicy:
+ g_ipolicy = {}
+ for key, value in self.op.ipolicy.iteritems():
+ g_ipolicy[key] = _GetUpdatedParams(self.group.ipolicy.get(key, {}),
+ value,
+ use_none=True)
+ utils.ForceDictType(g_ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
+ self.new_ipolicy = g_ipolicy
+ objects.InstancePolicy.CheckParameterSyntax(self.new_ipolicy)
+
def BuildHooksEnv(self):
"""Build hooks env.
if self.op.disk_state:
self.group.disk_state_static = self.new_disk_state
+ if self.op.ipolicy:
+ self.group.ipolicy = self.new_ipolicy
+
self.cfg.Update(self.group, feedback_fn)
return result