Revision b7a1c816 lib/cmdlib.py
b/lib/cmdlib.py | ||
---|---|---|
6653 | 6653 |
def ExpandNames(self): |
6654 | 6654 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
6655 | 6655 |
|
6656 |
self.needed_locks = {} |
|
6657 |
|
|
6658 |
# Create tasklets for migrating instances for all instances on this node |
|
6659 |
names = [] |
|
6660 |
tasklets = [] |
|
6661 |
|
|
6662 |
self.lock_all_nodes = False |
|
6663 |
|
|
6664 |
for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name): |
|
6665 |
logging.debug("Migrating instance %s", inst.name) |
|
6666 |
names.append(inst.name) |
|
6667 |
|
|
6668 |
tasklets.append(TLMigrateInstance(self, inst.name, cleanup=False)) |
|
6669 |
|
|
6670 |
if inst.disk_template in constants.DTS_EXT_MIRROR: |
|
6671 |
# We need to lock all nodes, as the iallocator will choose the |
|
6672 |
# destination nodes afterwards |
|
6673 |
self.lock_all_nodes = True |
|
6674 |
|
|
6675 |
self.tasklets = tasklets |
|
6676 |
|
|
6677 |
# Declare node locks |
|
6678 |
if self.lock_all_nodes: |
|
6679 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET |
|
6680 |
else: |
|
6681 |
self.needed_locks[locking.LEVEL_NODE] = [self.op.node_name] |
|
6682 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND |
|
6683 |
|
|
6684 |
# Declare instance locks |
|
6685 |
self.needed_locks[locking.LEVEL_INSTANCE] = names |
|
6686 |
|
|
6687 |
def DeclareLocks(self, level): |
|
6688 |
if level == locking.LEVEL_NODE and not self.lock_all_nodes: |
|
6689 |
self._LockInstancesNodes() |
|
6656 |
self.share_locks = dict.fromkeys(locking.LEVELS, 1) |
|
6657 |
self.needed_locks = { |
|
6658 |
locking.LEVEL_NODE: [self.op.node_name], |
|
6659 |
} |
|
6690 | 6660 |
|
6691 | 6661 |
def BuildHooksEnv(self): |
6692 | 6662 |
"""Build hooks env. |
... | ... | |
6705 | 6675 |
nl = [self.cfg.GetMasterNode()] |
6706 | 6676 |
return (nl, nl) |
6707 | 6677 |
|
6678 |
def CheckPrereq(self): |
|
6679 |
pass |
|
6680 |
|
|
6681 |
def Exec(self, feedback_fn): |
|
6682 |
# Prepare jobs for migration instances |
|
6683 |
jobs = [ |
|
6684 |
[opcodes.OpInstanceMigrate(instance_name=inst.name, |
|
6685 |
mode=self.op.mode, |
|
6686 |
live=self.op.live, |
|
6687 |
iallocator=self.op.iallocator, |
|
6688 |
target_node=self.op.target_node)] |
|
6689 |
for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name) |
|
6690 |
] |
|
6691 |
|
|
6692 |
# TODO: Run iallocator in this opcode and pass correct placement options to |
|
6693 |
# OpInstanceMigrate. Since other jobs can modify the cluster between |
|
6694 |
# running the iallocator and the actual migration, a good consistency model |
|
6695 |
# will have to be found. |
|
6696 |
|
|
6697 |
assert (frozenset(self.glm.list_owned(locking.LEVEL_NODE)) == |
|
6698 |
frozenset([self.op.node_name])) |
|
6699 |
|
|
6700 |
return ResultWithJobs(jobs) |
|
6701 |
|
|
6708 | 6702 |
|
6709 | 6703 |
class TLMigrateInstance(Tasklet): |
6710 | 6704 |
"""Tasklet class for instance migration. |
Also available in: Unified diff