#
#
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
return params_copy
+def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
+ """Return the new version of a instance policy.
+
+ @param group_policy: whether this policy applies to a group and thus
+ we should support removal of policy entries
+
+ """
+ use_none = use_default = group_policy
+ ipolicy = copy.deepcopy(old_ipolicy)
+ for key, value in new_ipolicy.items():
+ if key not in constants.IPOLICY_ALL_KEYS:
+ raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
+ errors.ECODE_INVAL)
+ if key in constants.IPOLICY_PARAMETERS:
+ utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
+ ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
+ use_none=use_none,
+ use_default=use_default)
+ else:
+ # FIXME: we assume all others are lists; this should be redone
+ # in a nicer way
+ if not value or value == [constants.VALUE_DEFAULT]:
+ if group_policy:
+ del ipolicy[key]
+ else:
+ raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
+ " on the cluster'" % key,
+ errors.ECODE_INVAL)
+ else:
+ ipolicy[key] = list(value)
+ try:
+ objects.InstancePolicy.CheckParameterSyntax(ipolicy)
+ except errors.ConfigurationError, err:
+ raise errors.OpPrereqError("Invalid instance policy: %s" % err,
+ errors.ECODE_INVAL)
+ return ipolicy
+
+
def _UpdateAndVerifySubDict(base, updates, type_check):
"""Updates and verifies a dict with sub dicts of the same type.
if msg is None:
msg = "can't use instance from outside %s states" % ", ".join(req_states)
if instance.admin_state not in req_states:
- raise errors.OpPrereqError("Instance %s is marked to be %s, %s" %
- (instance, instance.admin_state, msg),
+ raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
+ (instance.name, instance.admin_state, msg),
errors.ECODE_STATE)
if constants.ADMINST_UP not in req_states:
(instance.name, msg), errors.ECODE_STATE)
-def _CheckMinMaxSpecs(name, ipolicy, value):
- """Checks if value is in the desired range.
+def _ComputeMinMaxSpec(name, ipolicy, value):
+ """Computes if value is in the desired range.
@param name: name of the parameter for which we perform the check
@param ipolicy: dictionary containing min, max and std values
def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
nic_count, disk_sizes,
- _check_spec_fn=_CheckMinMaxSpecs):
+ _compute_fn=_ComputeMinMaxSpec):
"""Verifies ipolicy against provided specs.
@type ipolicy: dict
@param nic_count: Number of nics used
@type disk_sizes: list of ints
@param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
- @param _check_spec_fn: The checking function (unittest only)
+ @param _compute_fn: The compute function (unittest only)
@return: A list of violations, or an empty list of no violations are found
"""
] + map((lambda d: (constants.ISPEC_DISK_SIZE, d)), disk_sizes)
return filter(None,
- (_check_spec_fn(name, ipolicy, value)
+ (_compute_fn(name, ipolicy, value)
for (name, value) in test_settings))
@see: L{_ComputeIPolicySpecViolation}
"""
- res = _compute_fn(ipolicy, instance, instance.primary_node.group, node.group)
+ primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
+ res = _compute_fn(ipolicy, instance, primary_node.group, node.group)
if res:
msg = ("Instance does not meet target node group's (%s) instance"
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances):
+ """Computes a set of any instances that would violate the new ipolicy.
+
+ @param old_ipolicy: The current (still in-place) ipolicy
+ @param new_ipolicy: The new (to become) ipolicy
+ @param instances: List of instances to verify
+ @return: A list of instances which violates the new ipolicy but did not before
+
+ """
+ return (_ComputeViolatingInstances(old_ipolicy, instances) -
+ _ComputeViolatingInstances(new_ipolicy, instances))
+
+
def _ExpandItemName(fn, name, kind):
"""Expand an item name.
return cluster.SimpleFillIPolicy(group.ipolicy)
+def _ComputeViolatingInstances(ipolicy, instances):
+ """Computes a set of instances who violates given ipolicy.
+
+ @param ipolicy: The ipolicy to verify
+ @type instances: object.Instance
+ @param instances: List of instances to verify
+ @return: A frozenset of instance names violating the ipolicy
+
+ """
+ return frozenset([inst.name for inst in instances
+ if _ComputeIPolicyInstanceViolation(ipolicy, inst)])
+
+
def _CheckNicsBridgesExist(lu, target_nics, target_node):
"""Check that the brigdes needed by a list of nics exist.
msg = "cannot reach the master IP"
_ErrorIf(True, constants.CV_ENODENET, node, msg)
- def _VerifyInstancePolicy(self, instance):
- """Verify instance specs against instance policy set on node group level.
-
-
- """
- cluster = self.cfg.GetClusterInfo()
- full_beparams = cluster.FillBE(instance)
- ipolicy = cluster.SimpleFillIPolicy(self.group_info.ipolicy)
-
- mem_size = full_beparams.get(constants.BE_MAXMEM, None)
- cpu_count = full_beparams.get(constants.BE_VCPUS, None)
- disk_count = len(instance.disks)
- disk_sizes = [disk.size for disk in instance.disks]
- nic_count = len(instance.nics)
-
- test_settings = [
- (constants.ISPEC_MEM_SIZE, mem_size),
- (constants.ISPEC_CPU_COUNT, cpu_count),
- (constants.ISPEC_DISK_COUNT, disk_count),
- (constants.ISPEC_NIC_COUNT, nic_count),
- ] + map((lambda d: (constants.ISPEC_DISK_SIZE, d)), disk_sizes)
-
- for (name, value) in test_settings:
- test_result = _CheckMinMaxSpecs(name, ipolicy, value)
- self._ErrorIf(test_result is not None,
- constants.CV_EINSTANCEPOLICY, instance.name,
- test_result)
-
def _VerifyInstance(self, instance, instanceconfig, node_image,
diskstatus):
"""Verify an instance.
# all nodes to be modified.
self.needed_locks = {
locking.LEVEL_NODE: locking.ALL_SET,
+ locking.LEVEL_INSTANCE: locking.ALL_SET,
+ locking.LEVEL_NODEGROUP: locking.ALL_SET,
+ }
+ self.share_locks = {
+ locking.LEVEL_NODE: 1,
+ locking.LEVEL_INSTANCE: 1,
+ locking.LEVEL_NODEGROUP: 1,
}
- self.share_locks[locking.LEVEL_NODE] = 1
def BuildHooksEnv(self):
"""Build hooks env.
for storage, svalues in new_disk_state.items())
if self.op.ipolicy:
- ipolicy = {}
- for key, value in self.op.ipolicy.items():
- utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
- ipolicy[key] = _GetUpdatedParams(cluster.ipolicy.get(key, {}),
- value)
- objects.InstancePolicy.CheckParameterSyntax(ipolicy)
- self.new_ipolicy = ipolicy
+ self.new_ipolicy = _GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
+ group_policy=False)
+
+ all_instances = self.cfg.GetAllInstancesInfo().values()
+ violations = set()
+ for group in self.cfg.GetAllNodeGroupsInfo().values():
+ instances = frozenset([inst for inst in all_instances
+ if compat.any(node in group.members
+ for node in inst.all_nodes)])
+ new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
+ new = _ComputeNewInstanceViolations(_CalculateGroupIPolicy(cluster,
+ group),
+ new_ipolicy, instances)
+ if new:
+ violations.update(new)
+
+ if violations:
+ self.LogWarning("After the ipolicy change the following instances"
+ " violate them: %s",
+ utils.CommaJoin(violations))
if self.op.nicparams:
utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
def ExpandNames(self):
self._ExpandAndLockInstance()
+ self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_NODE_RES:
+ self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES)
def BuildHooksEnv(self):
"""Build hooks env.
if not remote_info.payload: # not running already
_CheckNodeFreeMemory(self, instance.primary_node,
"starting instance %s" % instance.name,
- bep[constants.BE_MAXMEM], instance.hypervisor)
+ bep[constants.BE_MINMEM], instance.hypervisor)
def Exec(self, feedback_fn):
"""Start the instance.
self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
else:
self.needed_locks[locking.LEVEL_NODE] = []
+ self.needed_locks[locking.LEVEL_NODE_RES] = []
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
_CheckNodeOnline(self, target_node)
_CheckNodeNotDrained(self, target_node)
_CheckNodeVmCapable(self, target_node)
- ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(), node.group)
+ ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(),
+ self.cfg.GetNodeGroup(node.group))
_CheckTargetNodeIPolicy(self, ipolicy, instance, node,
ignore=self.op.ignore_ipolicy)
# Check that the target node is correct in terms of instance policy
nodeinfo = self.cfg.GetNodeInfo(self.target_node)
- ipolicy = _CalculateGroupIPolicy(cluster, nodeinfo.group)
+ group_info = self.cfg.GetNodeGroup(nodeinfo.group)
+ ipolicy = _CalculateGroupIPolicy(cluster, group_info)
_CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
ignore=self.ignore_ipolicy)
(instance.disk_template, text),
errors.ECODE_INVAL)
nodeinfo = self.cfg.GetNodeInfo(target_node)
- ipolicy = _CalculateGroupIPolicy(cluster, nodeinfo.group)
+ group_info = self.cfg.GetNodeGroup(nodeinfo.group)
+ ipolicy = _CalculateGroupIPolicy(cluster, group_info)
_CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
ignore=self.ignore_ipolicy)
i_be = cluster.FillBE(instance)
# check memory requirements on the secondary node
- if not self.failover or instance.admin_state == constants.ADMINST_UP:
+ if (not self.cleanup and
+ (not self.failover or instance.admin_state == constants.ADMINST_UP)):
_CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
instance.name, i_be[constants.BE_MAXMEM],
instance.hypervisor)
self._GoReconnect(False)
self._WaitUntilSync()
+ # If the instance's disk template is `rbd' and there was a successful
+ # migration, unmap the device from the source node.
+ if self.instance.disk_template == constants.DT_RBD:
+ disks = _ExpandCheckDisks(instance, instance.disks)
+ self.feedback_fn("* unmapping instance's disks from %s" % source_node)
+ for disk in disks:
+ result = self.rpc.call_blockdev_shutdown(source_node, disk)
+ msg = result.fail_msg
+ if msg:
+ logging.error("Migration was successful, but couldn't unmap the"
+ " block device %s on source node %s: %s",
+ disk.iv_name, source_node, msg)
+ logging.error("You need to unmap the device %s manually on %s",
+ disk.iv_name, source_node)
+
self.feedback_fn("* done")
def _ExecFailover(self):
elif disk_template == constants.DT_BLOCK:
result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
+ elif disk_template == constants.DT_RBD:
+ params = {
+ constants.LDP_POOL: dt_params[constants.RBD_POOL]
+ }
+ params = \
+ objects.FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD],
+ params)
+ result.append(params)
+
return result
if template_name == constants.DT_DISKLESS:
pass
elif template_name == constants.DT_PLAIN:
- if len(secondary_nodes) != 0:
+ if secondary_nodes:
raise errors.ProgrammerError("Wrong template configuration")
names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
disk_dev.mode = disk[constants.IDISK_MODE]
disks.append(disk_dev)
elif template_name == constants.DT_FILE:
- if len(secondary_nodes) != 0:
+ if secondary_nodes:
raise errors.ProgrammerError("Wrong template configuration")
opcodes.RequireFileStorage()
params=ld_params[0])
disks.append(disk_dev)
elif template_name == constants.DT_SHARED_FILE:
- if len(secondary_nodes) != 0:
+ if secondary_nodes:
raise errors.ProgrammerError("Wrong template configuration")
opcodes.RequireSharedFileStorage()
params=ld_params[0])
disks.append(disk_dev)
elif template_name == constants.DT_BLOCK:
- if len(secondary_nodes) != 0:
+ if secondary_nodes:
raise errors.ProgrammerError("Wrong template configuration")
for idx, disk in enumerate(disk_info):
mode=disk[constants.IDISK_MODE],
params=ld_params[0])
disks.append(disk_dev)
+ elif template_name == constants.DT_RBD:
+ if secondary_nodes:
+ raise errors.ProgrammerError("Wrong template configuration")
+
+ names = _GenerateUniqueNames(lu, [".rbd.disk%d" % (base_index + i)
+ for i in range(disk_count)])
+
+ for idx, disk in enumerate(disk_info):
+ disk_index = idx + base_index
+ disk_dev = objects.Disk(dev_type=constants.LD_RBD,
+ size=disk[constants.IDISK_SIZE],
+ logical_id=("rbd", names[idx]),
+ iv_name="disk/%d" % disk_index,
+ mode=disk[constants.IDISK_MODE],
+ params=ld_params[0])
+ disks.append(disk_dev)
else:
raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
constants.DT_FILE: None,
constants.DT_SHARED_FILE: 0,
constants.DT_BLOCK: 0,
+ constants.DT_RBD: 0,
}
if disk_template not in req_size_dict:
_ReleaseLocks(self, locking.LEVEL_NODE,
keep=filter(None, [self.op.pnode, self.op.snode,
self.op.src_node]))
+ _ReleaseLocks(self, locking.LEVEL_NODE_RES,
+ keep=filter(None, [self.op.pnode, self.op.snode,
+ self.op.src_node]))
#### node related checks
constants.ISPEC_NIC_COUNT: len(self.nics),
}
- ipolicy = _CalculateGroupIPolicy(cluster, pnode.group)
+ group_info = self.cfg.GetNodeGroup(pnode.group)
+ ipolicy = _CalculateGroupIPolicy(cluster, group_info)
res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
if not self.op.ignore_ipolicy and res:
raise errors.OpPrereqError(("Instance allocation to group %s violates"
# disk parameters (not customizable at instance or node level)
# just use the primary node parameters, ignoring the secondary.
- self.diskparams = self.cfg.GetNodeGroup(pnode.group).diskparams
+ self.diskparams = group_info.diskparams
if not self.adopt_disks:
- # Check lv size requirements, if not adopting
- req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
- _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
+ if self.op.disk_template == constants.DT_RBD:
+ # _CheckRADOSFreeSpace() is just a placeholder.
+ # Any function that checks prerequisites can be placed here.
+ # Check if there is enough space on the RADOS cluster.
+ _CheckRADOSFreeSpace()
+ else:
+ # Check lv size requirements, if not adopting
+ req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
+ _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
return list(iobj.all_nodes)
+def _CheckRADOSFreeSpace():
+ """Compute disk size requirements inside the RADOS cluster.
+
+ """
+ # For the RADOS cluster we assume there is always enough space.
+ pass
+
+
class LUInstanceConsole(NoHooksLU):
"""Connect to an instance's console.
self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
self.op.iallocator, self.op.remote_node,
- self.op.disks, False, self.op.early_release)
+ self.op.disks, False, self.op.early_release,
+ self.op.ignore_ipolicy)
self.tasklets = [self.replacer]
"""
def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
- disks, delay_iallocator, early_release):
+ disks, delay_iallocator, early_release, ignore_ipolicy):
"""Initializes this class.
"""
self.disks = disks
self.delay_iallocator = delay_iallocator
self.early_release = early_release
+ self.ignore_ipolicy = ignore_ipolicy
# Runtime data
self.instance = None
if not self.disks:
self.disks = range(len(self.instance.disks))
+ # TODO: This is ugly, but right now we can't distinguish between internal
+ # submitted opcode and external one. We should fix that.
+ if self.remote_node_info:
+ # We change the node, lets verify it still meets instance policy
+ new_group_info = self.cfg.GetNodeGroup(self.remote_node_info.group)
+ ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(),
+ new_group_info)
+ _CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
+ ignore=self.ignore_ipolicy)
+
# TODO: compute disk parameters
primary_node_info = self.cfg.GetNodeInfo(instance.primary_node)
secondary_node_info = self.cfg.GetNodeInfo(secondary_node)
self._ExpandAndLockInstance()
self.needed_locks[locking.LEVEL_NODE] = []
self.needed_locks[locking.LEVEL_NODE_RES] = []
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
self.disk = instance.FindDisk(self.op.disk)
if instance.disk_template not in (constants.DT_FILE,
- constants.DT_SHARED_FILE):
+ constants.DT_SHARED_FILE,
+ constants.DT_RBD):
# TODO: check the free disk space for file, when that feature will be
# supported
_CheckNodesFreeDiskPerVG(self, nodenames,
def CheckArguments(self):
if not (self.op.nics or self.op.disks or self.op.disk_template or
self.op.hvparams or self.op.beparams or self.op.os_name or
- self.op.online_inst or self.op.offline_inst):
+ self.op.online_inst or self.op.offline_inst or
+ self.op.runtime_mem):
raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
if self.op.hvparams:
env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
if self.op.disk_template:
env["NEW_DISK_TEMPLATE"] = self.op.disk_template
+ if self.op.runtime_mem:
+ env["RUNTIME_MEMORY"] = self.op.runtime_mem
return env
_CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
- ipolicy = _CalculateGroupIPolicy(cluster, snode_info.group)
+ snode_group = self.cfg.GetNodeGroup(snode_info.group)
+ ipolicy = _CalculateGroupIPolicy(cluster, snode_group)
_CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
ignore=self.op.ignore_ipolicy)
if pnode_info.group != snode_info.group:
" %s, due to not enough memory" % node,
errors.ECODE_STATE)
+ if self.op.runtime_mem:
+ remote_info = self.rpc.call_instance_info(instance.primary_node,
+ instance.name,
+ instance.hypervisor)
+ remote_info.Raise("Error checking node %s" % instance.primary_node)
+ if not remote_info.payload: # not running already
+ raise errors.OpPrereqError("Instance %s is not running" % instance.name,
+ errors.ECODE_STATE)
+
+ current_memory = remote_info.payload["memory"]
+ if (not self.op.force and
+ (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
+ self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
+ raise errors.OpPrereqError("Instance %s must have memory between %d"
+ " and %d MB of memory unless --force is"
+ " given" % (instance.name,
+ self.be_proposed[constants.BE_MINMEM],
+ self.be_proposed[constants.BE_MAXMEM]),
+ errors.ECODE_INVAL)
+
+ if self.op.runtime_mem > current_memory:
+ _CheckNodeFreeMemory(self, instance.primary_node,
+ "ballooning memory for instance %s" %
+ instance.name,
+ self.op.memory - current_memory,
+ instance.hypervisor)
+
# NIC processing
self.nic_pnew = {}
self.nic_pinst = {}
result = []
instance = self.instance
+
+ # runtime memory
+ if self.op.runtime_mem:
+ rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
+ instance,
+ self.op.runtime_mem)
+ rpcres.Raise("Cannot modify instance runtime memory")
+ result.append(("runtime_memory", self.op.runtime_mem))
+
# disk changes
for disk_op, disk_dict in self.op.disks:
if disk_op == constants.DDM_REMOVE:
if self.op.ipolicy:
cluster = self.cfg.GetClusterInfo()
full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
- objects.InstancePolicy.CheckParameterSyntax(full_ipolicy)
+ try:
+ objects.InstancePolicy.CheckParameterSyntax(full_ipolicy)
+ except errors.ConfigurationError, err:
+ raise errors.OpPrereqError("Invalid instance policy: %s" % err,
+ errors.ECODE_INVAL)
def BuildHooksEnv(self):
"""Build hooks env.
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
self.needed_locks = {
+ locking.LEVEL_INSTANCE: [],
locking.LEVEL_NODEGROUP: [self.group_uuid],
}
+ self.share_locks[locking.LEVEL_INSTANCE] = 1
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_INSTANCE:
+ assert not self.needed_locks[locking.LEVEL_INSTANCE]
+
+ # Lock instances optimistically, needs verification once group lock has
+ # been acquired
+ self.needed_locks[locking.LEVEL_INSTANCE] = \
+ self.cfg.GetNodeGroupInstances(self.group_uuid)
+
def CheckPrereq(self):
"""Check prerequisites.
"""
+ owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+
+ # Check if locked instances are still correct
+ _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+
self.group = self.cfg.GetNodeGroup(self.group_uuid)
+ cluster = self.cfg.GetClusterInfo()
if self.group is None:
raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
self.group.disk_state_static)
if self.op.ipolicy:
- g_ipolicy = {}
- for key, value in self.op.ipolicy.iteritems():
- g_ipolicy[key] = _GetUpdatedParams(self.group.ipolicy.get(key, {}),
- value,
- use_none=True)
- utils.ForceDictType(g_ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
- self.new_ipolicy = g_ipolicy
- objects.InstancePolicy.CheckParameterSyntax(self.new_ipolicy)
+ self.new_ipolicy = _GetUpdatedIPolicy(self.group.ipolicy,
+ self.op.ipolicy,
+ group_policy=True)
+
+ new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
+ inst_filter = lambda inst: inst.name in owned_instances
+ instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
+ violations = \
+ _ComputeNewInstanceViolations(_CalculateGroupIPolicy(cluster,
+ self.group),
+ new_ipolicy, instances)
+
+ if violations:
+ self.LogWarning("After the ipolicy change the following instances"
+ " violate them: %s",
+ utils.CommaJoin(violations))
def BuildHooksEnv(self):
"""Build hooks env.
"""Compute node groups data.
"""
+ cluster = cfg.GetClusterInfo()
ng = dict((guuid, {
"name": gdata.name,
"alloc_policy": gdata.alloc_policy,
+ "ipolicy": _CalculateGroupIPolicy(cluster, gdata),
})
for guuid, gdata in cfg.GetAllNodeGroupsInfo().items())