self.op = op
self.cfg = context.cfg
self.glm = context.glm
+ # readability alias
+ self.owned_locks = context.glm.list_owned
self.context = context
self.rpc = rpc
# Dicts used to declare locking needs to mcpu
# future we might want to have different behaviors depending on the value
# of self.recalculate_locks[locking.LEVEL_NODE]
wanted_nodes = []
- for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
- instance = self.context.cfg.GetInstanceInfo(instance_name)
+ locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
+ for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
wanted_nodes.append(instance.primary_node)
if not primary_only:
wanted_nodes.extend(instance.secondary_nodes)
"""
if self.do_locking:
- names = lu.glm.list_owned(lock_level)
+ names = lu.owned_locks(lock_level)
else:
names = all_names
return dict.fromkeys(locking.LEVELS, 1)
+def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups):
+ """Checks if the owned node groups are still correct for an instance.
+
+ @type cfg: L{config.ConfigWriter}
+ @param cfg: The cluster configuration
+ @type instance_name: string
+ @param instance_name: Instance name
+ @type owned_groups: set or frozenset
+ @param owned_groups: List of currently owned node groups
+
+ """
+ inst_groups = cfg.GetInstanceNodeGroups(instance_name)
+
+ if not owned_groups.issuperset(inst_groups):
+ raise errors.OpPrereqError("Instance %s's node groups changed since"
+ " locks were acquired, current groups are"
+ " are '%s', owning groups '%s'; retry the"
+ " operation" %
+ (instance_name,
+ utils.CommaJoin(inst_groups),
+ utils.CommaJoin(owned_groups)),
+ errors.ECODE_STATE)
+
+ return inst_groups
+
+
def _SupportsOob(cfg, node):
"""Tells if node supports OOB.
release = []
# Determine which locks to release
- for name in lu.glm.list_owned(level):
+ for name in lu.owned_locks(level):
if should_release(name):
release.append(name)
else:
retain.append(name)
- assert len(lu.glm.list_owned(level)) == (len(retain) + len(release))
+ assert len(lu.owned_locks(level)) == (len(retain) + len(release))
# Release just some locks
lu.glm.release(level, names=release)
- assert frozenset(lu.glm.list_owned(level)) == frozenset(retain)
+ assert frozenset(lu.owned_locks(level)) == frozenset(retain)
else:
# Release everything
lu.glm.release(level)
" iallocator")
+def _GetDefaultIAllocator(cfg, iallocator):
+ """Decides on which iallocator to use.
+
+ @type cfg: L{config.ConfigWriter}
+ @param cfg: Cluster configuration object
+ @type iallocator: string or None
+ @param iallocator: Iallocator specified in opcode
+ @rtype: string
+ @return: Iallocator name
+
+ """
+ if not iallocator:
+ # Use default iallocator
+ iallocator = cfg.GetDefaultIAllocator()
+
+ if not iallocator:
+ raise errors.OpPrereqError("No iallocator was specified, neither in the"
+ " opcode nor as a cluster-wide default",
+ errors.ECODE_INVAL)
+
+ return iallocator
+
+
class LUClusterPostInit(LogicalUnit):
"""Logical unit for running hooks after cluster initialization.
# volumes for these instances are healthy, we will need to do an
# extra call to their secondaries. We ensure here those nodes will
# be locked.
- for inst in self.glm.list_owned(locking.LEVEL_INSTANCE):
+ for inst in self.owned_locks(locking.LEVEL_INSTANCE):
# Important: access only the instances whose lock is owned
if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR:
nodes.update(all_inst_info[inst].secondary_nodes)
group_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
unlocked_nodes = \
- group_nodes.difference(self.glm.list_owned(locking.LEVEL_NODE))
+ group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
unlocked_instances = \
- group_instances.difference(self.glm.list_owned(locking.LEVEL_INSTANCE))
+ group_instances.difference(self.owned_locks(locking.LEVEL_INSTANCE))
if unlocked_nodes:
raise errors.OpPrereqError("Missing lock for nodes: %s" %
extra_lv_nodes.add(nname)
unlocked_lv_nodes = \
- extra_lv_nodes.difference(self.glm.list_owned(locking.LEVEL_NODE))
+ extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
if unlocked_lv_nodes:
raise errors.OpPrereqError("these nodes could be locked: %s" %
}
def Exec(self, feedback_fn):
- group_names = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+ group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
# Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
# going via the node before it's locked, requiring verification
# later on
[group_uuid
- for instance_name in
- self.glm.list_owned(locking.LEVEL_INSTANCE)
- for group_uuid in
- self.cfg.GetInstanceNodeGroups(instance_name)])
+ for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
+ for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
elif level == locking.LEVEL_NODE:
# This will only lock the nodes in the group to be verified which contain
self._LockInstancesNodes()
# Lock all nodes in group to be verified
- assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
+ assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
def CheckPrereq(self):
- owned_instances = frozenset(self.glm.list_owned(locking.LEVEL_INSTANCE))
- owned_groups = frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP))
- owned_nodes = frozenset(self.glm.list_owned(locking.LEVEL_NODE))
+ owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+ owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+ owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
assert self.group_uuid in owned_groups
errors.ECODE_STATE)
# Get instance information
- self.instances = dict((name, self.cfg.GetInstanceInfo(name))
- for name in owned_instances)
+ self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
# Check if node groups for locked instances are still correct
for (instance_name, inst) in self.instances.items():
assert owned_nodes.issuperset(inst.all_nodes), \
"Instance %s's nodes changed while we kept the lock" % instance_name
- inst_groups = self.cfg.GetInstanceNodeGroups(instance_name)
- if not owned_groups.issuperset(inst_groups):
- raise errors.OpPrereqError("Instance %s's node groups changed since"
- " locks were acquired, current groups are"
- " are '%s', owning groups '%s'; retry the"
- " operation" %
- (instance_name,
- utils.CommaJoin(inst_groups),
- utils.CommaJoin(owned_groups)),
- errors.ECODE_STATE)
+ _CheckInstanceNodeGroups(self.cfg, instance_name, owned_groups)
def Exec(self, feedback_fn):
"""Verify integrity of cluster disks.
if inst.admin_up])
if nv_dict:
- nodes = utils.NiceSort(set(self.glm.list_owned(locking.LEVEL_NODE)) &
+ nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
set(self.cfg.GetVmCapableNodeList()))
node_lvs = self.rpc.call_lv_list(nodes, [])
"""
if self.wanted_names is None:
- self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
+ self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
- self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
- in self.wanted_names]
+ self.wanted_instances = \
+ map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
def _EnsureChildSizes(self, disk):
"""Ensure children of the disk have the needed disk size.
" drbd-based instances exist",
errors.ECODE_INVAL)
- node_list = self.glm.list_owned(locking.LEVEL_NODE)
+ node_list = self.owned_locks(locking.LEVEL_NODE)
# if vg_name not None, checks given volume group on all nodes
if self.op.vg_name:
if self.op.drbd_helper:
# checks given drbd helper on all nodes
helpers = self.rpc.call_drbd_helper(node_list)
- for node in node_list:
- ninfo = self.cfg.GetNodeInfo(node)
+ for (node, ninfo) in self.cfg.GetMultiNodeInfo(node_list):
if ninfo.offline:
self.LogInfo("Not checking drbd helper on offline node %s", node)
continue
if self.op.command in self._SKIP_MASTER:
assert self.master_node not in self.op.node_names
- for node_name in self.op.node_names:
- node = self.cfg.GetNodeInfo(node_name)
-
+ for (node_name, node) in self.cfg.GetMultiNodeInfo(self.op.node_names):
if node is None:
raise errors.OpPrereqError("Node %s not found" % node_name,
errors.ECODE_NOENT)
node = self.cfg.GetNodeInfo(self.op.node_name)
assert node is not None
- instance_list = self.cfg.GetInstanceList()
-
masternode = self.cfg.GetMasterNode()
if node.name == masternode:
raise errors.OpPrereqError("Node is the master node, failover to another"
" node is required", errors.ECODE_INVAL)
- for instance_name in instance_list:
- instance = self.cfg.GetInstanceInfo(instance_name)
+ for instance_name, instance in self.cfg.GetAllInstancesInfo():
if node.name in instance.all_nodes:
raise errors.OpPrereqError("Instance %s is still running on the node,"
" please remove first" % instance_name,
"""Computes the list of nodes and their attributes.
"""
- nodenames = self.glm.list_owned(locking.LEVEL_NODE)
+ nodenames = self.owned_locks(locking.LEVEL_NODE)
volumes = self.rpc.call_node_volumes(nodenames)
ilist = self.cfg.GetAllInstancesInfo()
"""Computes the list of nodes and their attributes.
"""
- self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
+ self.nodes = self.owned_locks(locking.LEVEL_NODE)
# Always get name to sort by
if constants.SF_NAME in self.op.output_fields:
# via the node before it's locked, requiring verification later on
lu.needed_locks[locking.LEVEL_NODEGROUP] = \
set(group_uuid
- for instance_name in
- lu.glm.list_owned(locking.LEVEL_INSTANCE)
- for group_uuid in
- lu.cfg.GetInstanceNodeGroups(instance_name))
+ for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
+ for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
elif level == locking.LEVEL_NODE:
lu._LockInstancesNodes() # pylint: disable-msg=W0212
@staticmethod
def _CheckGroupLocks(lu):
- owned_instances = frozenset(lu.glm.list_owned(locking.LEVEL_INSTANCE))
- owned_groups = frozenset(lu.glm.list_owned(locking.LEVEL_NODEGROUP))
+ owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
+ owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
# Check if node groups for locked instances are still correct
for instance_name in owned_instances:
- inst_groups = lu.cfg.GetInstanceNodeGroups(instance_name)
- if not owned_groups.issuperset(inst_groups):
- raise errors.OpPrereqError("Instance %s's node groups changed since"
- " locks were acquired, current groups are"
- " are '%s', owning groups '%s'; retry the"
- " operation" %
- (instance_name,
- utils.CommaJoin(inst_groups),
- utils.CommaJoin(owned_groups)),
- errors.ECODE_STATE)
+ _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
def _GetQueryData(self, lu):
"""Computes the list of instances and their attributes.
if query.IQ_NODES in self.requested_data:
node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
instance_list)))
- nodes = dict((name, lu.cfg.GetNodeInfo(name)) for name in node_names)
+ nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
for uuid in set(map(operator.attrgetter("group"),
nodes.values())))
self.changed_primary_ip = False
- for existing_node_name in node_list:
- existing_node = cfg.GetNodeInfo(existing_node_name)
-
+ for existing_node_name, existing_node in cfg.GetMultiNodeInfo(node_list):
if self.op.readd and node == existing_node_name:
if existing_node.secondary_ip != secondary_ip:
raise errors.OpPrereqError("Readded node doesn't have the same IP"
instances_keep = []
# Build list of instances to release
- for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
- instance = self.context.cfg.GetInstanceInfo(instance_name)
+ locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
+ for instance_name, instance in self.cfg.GetMultiInstanceInfo(locked_i):
if (instance.disk_template in constants.DTS_INT_MIRROR and
self.op.node_name in instance.all_nodes):
instances_keep.append(instance_name)
_ReleaseLocks(self, locking.LEVEL_INSTANCE, keep=instances_keep)
- assert (set(self.glm.list_owned(locking.LEVEL_INSTANCE)) ==
+ assert (set(self.owned_locks(locking.LEVEL_INSTANCE)) ==
set(instances_keep))
def BuildHooksEnv(self):
# running the iallocator and the actual migration, a good consistency model
# will have to be found.
- assert (frozenset(self.glm.list_owned(locking.LEVEL_NODE)) ==
+ assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
frozenset([self.op.node_name]))
return ResultWithJobs(jobs)
# directly, or through an iallocator.
self.all_nodes = [self.source_node, self.target_node]
- self.nodes_ip = {
- self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
- self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
- }
+ self.nodes_ip = dict((name, node.secondary_ip) for (name, node)
+ in self.cfg.GetMultiNodeInfo(self.all_nodes))
if self.failover:
feedback_fn("Failover instance %s" % self.instance.name)
src_path = self.op.src_path
if src_node is None:
- locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
+ locked_nodes = self.owned_locks(locking.LEVEL_NODE)
exp_list = self.rpc.call_export_list(locked_nodes)
found = False
for node in exp_list:
disk_abort = not _WaitForSync(self, iobj)
elif iobj.disk_template in constants.DTS_INT_MIRROR:
# make sure the disks are not degraded (still sync-ing is ok)
- time.sleep(15)
feedback_fn("* checking mirrors status")
disk_abort = not _WaitForSync(self, iobj, oneshot=True)
else:
if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
if self.op.mode == constants.INSTANCE_CREATE:
if not self.op.no_install:
+ pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
+ not self.op.wait_for_sync)
+ if pause_sync:
+ feedback_fn("* pausing disk sync to install instance OS")
+ result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
+ iobj.disks, True)
+ for idx, success in enumerate(result.payload):
+ if not success:
+ logging.warn("pause-sync of instance %s for disk %d failed",
+ instance, idx)
+
feedback_fn("* running the instance OS create scripts...")
# FIXME: pass debug option from opcode to backend
result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
self.op.debug_level)
+ if pause_sync:
+ feedback_fn("* resuming disk sync")
+ result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
+ iobj.disks, False)
+ for idx, success in enumerate(result.payload):
+ if not success:
+ logging.warn("resume-sync of instance %s for disk %d failed",
+ instance, idx)
+
result.Raise("Could not add os for instance %s"
" on node %s" % (instance, pnode_name))
# Lock member nodes of all locked groups
self.needed_locks[locking.LEVEL_NODE] = [node_name
- for group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
+ for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
for node_name in self.cfg.GetNodeGroup(group_uuid).members]
else:
self._LockInstancesNodes()
assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
self.op.iallocator is None)
- owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+ owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
if owned_groups:
- groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
- if owned_groups != groups:
- raise errors.OpExecError("Node groups used by instance '%s' changed"
- " since lock was acquired, current list is %r,"
- " used to be '%s'" %
- (self.op.instance_name,
- utils.CommaJoin(groups),
- utils.CommaJoin(owned_groups)))
+ _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
return LogicalUnit.CheckPrereq(self)
if remote_node is None:
self.remote_node_info = None
else:
- assert remote_node in self.lu.glm.list_owned(locking.LEVEL_NODE), \
+ assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \
"Remote node '%s' is not locked" % remote_node
self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
instance.FindDisk(disk_idx)
# Get secondary node IP addresses
- self.node_secondary_ip = \
- dict((node_name, self.cfg.GetNodeInfo(node_name).secondary_ip)
- for node_name in touched_nodes)
+ self.node_secondary_ip = dict((name, node.secondary_ip) for (name, node)
+ in self.cfg.GetMultiNodeInfo(touched_nodes))
def Exec(self, feedback_fn):
"""Execute disk replacement.
if __debug__:
# Verify owned locks before starting operation
- owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
- assert set(owned_locks) == set(self.node_secondary_ip), \
+ owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
+ assert set(owned_nodes) == set(self.node_secondary_ip), \
("Incorrect node locks, owning %s, expected %s" %
- (owned_locks, self.node_secondary_ip.keys()))
+ (owned_nodes, self.node_secondary_ip.keys()))
- owned_locks = self.lu.glm.list_owned(locking.LEVEL_INSTANCE)
- assert list(owned_locks) == [self.instance_name], \
+ owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
+ assert list(owned_instances) == [self.instance_name], \
"Instance '%s' not locked" % self.instance_name
assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
if __debug__:
# Verify owned locks
- owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
+ owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
nodes = frozenset(self.node_secondary_ip)
- assert ((self.early_release and not owned_locks) or
- (not self.early_release and not (set(owned_locks) - nodes))), \
+ assert ((self.early_release and not owned_nodes) or
+ (not self.early_release and not (set(owned_nodes) - nodes))), \
("Not owning the correct locks, early_release=%s, owned=%r,"
- " nodes=%r" % (self.early_release, owned_locks, nodes))
+ " nodes=%r" % (self.early_release, owned_nodes, nodes))
return result
def CheckPrereq(self):
# Verify locks
- owned_instances = self.glm.list_owned(locking.LEVEL_INSTANCE)
- owned_nodes = self.glm.list_owned(locking.LEVEL_NODE)
- owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+ owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
+ owned_nodes = self.owned_locks(locking.LEVEL_NODE)
+ owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
assert owned_nodes == self.lock_nodes
"""
if self.wanted_names is None:
assert self.op.use_locking, "Locking was not used"
- self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
+ self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
- self.wanted_instances = [self.cfg.GetInstanceInfo(name)
- for name in self.wanted_names]
+ self.wanted_instances = \
+ map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
def _ComputeBlockdevStatus(self, node, instance_name, dev):
"""Returns the status of a block device
cluster = self.cfg.GetClusterInfo()
- for instance in self.wanted_instances:
- pnode = self.cfg.GetNodeInfo(instance.primary_node)
-
+ pri_nodes = self.cfg.GetMultiNodeInfo(i.primary_node
+ for i in self.wanted_instances)
+ for instance, (_, pnode) in zip(self.wanted_instances, pri_nodes):
if self.op.static or pnode.offline:
remote_state = None
if pnode.offline:
}
+class LUInstanceChangeGroup(LogicalUnit):
+ HPATH = "instance-change-group"
+ HTYPE = constants.HTYPE_INSTANCE
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self.share_locks = _ShareAll()
+ self.needed_locks = {
+ locking.LEVEL_NODEGROUP: [],
+ locking.LEVEL_NODE: [],
+ }
+
+ self._ExpandAndLockInstance()
+
+ if self.op.target_groups:
+ self.req_target_uuids = map(self.cfg.LookupNodeGroup,
+ self.op.target_groups)
+ else:
+ self.req_target_uuids = None
+
+ self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_NODEGROUP:
+ assert not self.needed_locks[locking.LEVEL_NODEGROUP]
+
+ if self.req_target_uuids:
+ lock_groups = set(self.req_target_uuids)
+
+ # Lock all groups used by instance optimistically; this requires going
+ # via the node before it's locked, requiring verification later on
+ instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
+ lock_groups.update(instance_groups)
+ else:
+ # No target groups, need to lock all of them
+ lock_groups = locking.ALL_SET
+
+ self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
+
+ elif level == locking.LEVEL_NODE:
+ if self.req_target_uuids:
+ # Lock all nodes used by instances
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
+ self._LockInstancesNodes()
+
+ # Lock all nodes in all potential target groups
+ lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
+ self.cfg.GetInstanceNodeGroups(self.op.instance_name))
+ member_nodes = [node_name
+ for group in lock_groups
+ for node_name in self.cfg.GetNodeGroup(group).members]
+ self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
+ else:
+ # Lock all nodes as all groups are potential targets
+ self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+
+ def CheckPrereq(self):
+ owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+ owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+ owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
+
+ assert (self.req_target_uuids is None or
+ owned_groups.issuperset(self.req_target_uuids))
+ assert owned_instances == set([self.op.instance_name])
+
+ # Get instance information
+ self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+
+ # Check if node groups for locked instance are still correct
+ assert owned_nodes.issuperset(self.instance.all_nodes), \
+ ("Instance %s's nodes changed while we kept the lock" %
+ self.op.instance_name)
+
+ inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
+ owned_groups)
+
+ if self.req_target_uuids:
+ # User requested specific target groups
+ self.target_uuids = self.req_target_uuids
+ else:
+ # All groups except those used by the instance are potential targets
+ self.target_uuids = owned_groups - inst_groups
+
+ conflicting_groups = self.target_uuids & inst_groups
+ if conflicting_groups:
+ raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
+ " used by the instance '%s'" %
+ (utils.CommaJoin(conflicting_groups),
+ self.op.instance_name),
+ errors.ECODE_INVAL)
+
+ if not self.target_uuids:
+ raise errors.OpPrereqError("There are no possible target groups",
+ errors.ECODE_INVAL)
+
+ def BuildHooksEnv(self):
+ """Build hooks env.
+
+ """
+ assert self.target_uuids
+
+ env = {
+ "TARGET_GROUPS": " ".join(self.target_uuids),
+ }
+
+ env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+
+ return env
+
+ def BuildHooksNodes(self):
+ """Build hooks nodes.
+
+ """
+ mn = self.cfg.GetMasterNode()
+ return ([mn], [mn])
+
+ def Exec(self, feedback_fn):
+ instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
+
+ assert instances == [self.op.instance_name], "Instance not locked"
+
+ ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
+ instances=instances, target_groups=list(self.target_uuids))
+
+ ial.Run(self.op.iallocator)
+
+ if not ial.success:
+ raise errors.OpPrereqError("Can't compute solution for changing group of"
+ " instance '%s' using iallocator '%s': %s" %
+ (self.op.instance_name, self.op.iallocator,
+ ial.info),
+ errors.ECODE_NORES)
+
+ jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
+
+ self.LogInfo("Iallocator returned %s job(s) for changing group of"
+ " instance '%s'", len(jobs), self.op.instance_name)
+
+ return ResultWithJobs(jobs)
+
+
class LUBackupQuery(NoHooksLU):
"""Query the exports list
that node.
"""
- self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
+ self.nodes = self.owned_locks(locking.LEVEL_NODE)
rpcresult = self.rpc.call_export_list(self.nodes)
result = {}
for node in rpcresult:
fqdn_warn = True
instance_name = self.op.instance_name
- locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
+ locked_nodes = self.owned_locks(locking.LEVEL_NODE)
exportlist = self.rpc.call_export_list(locked_nodes)
found = False
for node in exportlist:
"""
assert self.needed_locks[locking.LEVEL_NODEGROUP]
- assert (frozenset(self.glm.list_owned(locking.LEVEL_NODE)) ==
+ assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
frozenset(self.op.nodes))
expected_locks = (set([self.group_uuid]) |
self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
- actual_locks = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+ actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
if actual_locks != expected_locks:
raise errors.OpExecError("Nodes changed groups since locks were acquired,"
" current groups are '%s', used to be '%s'" %
utils.CommaJoin(self.req_target_uuids)),
errors.ECODE_INVAL)
- if not self.op.iallocator:
- # Use default iallocator
- self.op.iallocator = self.cfg.GetDefaultIAllocator()
-
- if not self.op.iallocator:
- raise errors.OpPrereqError("No iallocator was specified, neither in the"
- " opcode nor as a cluster-wide default",
- errors.ECODE_INVAL)
+ self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
self.share_locks = _ShareAll()
self.needed_locks = {
# via the node before it's locked, requiring verification later on
lock_groups.update(group_uuid
for instance_name in
- self.glm.list_owned(locking.LEVEL_INSTANCE)
+ self.owned_locks(locking.LEVEL_INSTANCE)
for group_uuid in
self.cfg.GetInstanceNodeGroups(instance_name))
else:
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
self._LockInstancesNodes()
- # Lock all nodes in group to be evacuated
- assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
- member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
+ # Lock all nodes in group to be evacuated and target groups
+ owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+ assert self.group_uuid in owned_groups
+ member_nodes = [node_name
+ for group in owned_groups
+ for node_name in self.cfg.GetNodeGroup(group).members]
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
def CheckPrereq(self):
- owned_instances = frozenset(self.glm.list_owned(locking.LEVEL_INSTANCE))
- owned_groups = frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP))
- owned_nodes = frozenset(self.glm.list_owned(locking.LEVEL_NODE))
+ owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+ owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+ owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
assert owned_groups.issuperset(self.req_target_uuids)
assert self.group_uuid in owned_groups
errors.ECODE_STATE)
# Get instance information
- self.instances = dict((name, self.cfg.GetInstanceInfo(name))
- for name in owned_instances)
+ self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
# Check if node groups for locked instances are still correct
for instance_name in owned_instances:
inst = self.instances[instance_name]
- assert self.group_uuid in self.cfg.GetInstanceNodeGroups(instance_name), \
- "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
assert owned_nodes.issuperset(inst.all_nodes), \
"Instance %s's nodes changed while we kept the lock" % instance_name
- inst_groups = self.cfg.GetInstanceNodeGroups(instance_name)
- if not owned_groups.issuperset(inst_groups):
- raise errors.OpPrereqError("Instance %s's node groups changed since"
- " locks were acquired, current groups"
- " are '%s', owning groups '%s'; retry the"
- " operation" %
- (instance_name,
- utils.CommaJoin(inst_groups),
- utils.CommaJoin(owned_groups)),
- errors.ECODE_STATE)
+ inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
+ owned_groups)
+
+ assert self.group_uuid in inst_groups, \
+ "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
if self.req_target_uuids:
# User requested specific target groups
if group_uuid != self.group_uuid]
if not self.target_uuids:
- raise errors.OpExecError("There are no possible target groups")
+ raise errors.OpPrereqError("There are no possible target groups",
+ errors.ECODE_INVAL)
def BuildHooksEnv(self):
"""Build hooks env.
"""
mn = self.cfg.GetMasterNode()
- assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
+ assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
return (run_nodes, run_nodes)
def Exec(self, feedback_fn):
- instances = list(self.glm.list_owned(locking.LEVEL_INSTANCE))
+ instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
assert self.group_uuid not in self.target_uuids