Revision e1f23243 lib/cmdlib.py
b/lib/cmdlib.py | ||
---|---|---|
9801 | 9801 |
return result |
9802 | 9802 |
|
9803 | 9803 |
|
9804 |
class LUNodeEvacuate(NoHooksLU): |
|
9805 |
"""Evacuates instances off a list of nodes. |
|
9806 |
|
|
9807 |
""" |
|
9808 |
REQ_BGL = False |
|
9809 |
|
|
9810 |
def CheckArguments(self): |
|
9811 |
_CheckIAllocatorOrNode(self, "iallocator", "remote_node") |
|
9812 |
|
|
9813 |
def ExpandNames(self): |
|
9814 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
|
9815 |
|
|
9816 |
if self.op.remote_node is not None: |
|
9817 |
self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node) |
|
9818 |
assert self.op.remote_node |
|
9819 |
|
|
9820 |
if self.op.remote_node == self.op.node_name: |
|
9821 |
raise errors.OpPrereqError("Can not use evacuated node as a new" |
|
9822 |
" secondary node", errors.ECODE_INVAL) |
|
9823 |
|
|
9824 |
if self.op.mode != constants.IALLOCATOR_NEVAC_SEC: |
|
9825 |
raise errors.OpPrereqError("Without the use of an iallocator only" |
|
9826 |
" secondary instances can be evacuated", |
|
9827 |
errors.ECODE_INVAL) |
|
9828 |
|
|
9829 |
# Declare locks |
|
9830 |
self.share_locks = dict.fromkeys(locking.LEVELS, 1) |
|
9831 |
self.needed_locks = { |
|
9832 |
locking.LEVEL_INSTANCE: [], |
|
9833 |
locking.LEVEL_NODEGROUP: [], |
|
9834 |
locking.LEVEL_NODE: [], |
|
9835 |
} |
|
9836 |
|
|
9837 |
if self.op.remote_node is None: |
|
9838 |
# Iallocator will choose any node(s) in the same group |
|
9839 |
group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_name]) |
|
9840 |
else: |
|
9841 |
group_nodes = frozenset([self.op.remote_node]) |
|
9842 |
|
|
9843 |
# Determine nodes to be locked |
|
9844 |
self.lock_nodes = set([self.op.node_name]) | group_nodes |
|
9845 |
|
|
9846 |
def _DetermineInstances(self): |
|
9847 |
"""Builds list of instances to operate on. |
|
9848 |
|
|
9849 |
""" |
|
9850 |
assert self.op.mode in constants.IALLOCATOR_NEVAC_MODES |
|
9851 |
|
|
9852 |
if self.op.mode == constants.IALLOCATOR_NEVAC_PRI: |
|
9853 |
# Primary instances only |
|
9854 |
inst_fn = _GetNodePrimaryInstances |
|
9855 |
assert self.op.remote_node is None, \ |
|
9856 |
"Evacuating primary instances requires iallocator" |
|
9857 |
elif self.op.mode == constants.IALLOCATOR_NEVAC_SEC: |
|
9858 |
# Secondary instances only |
|
9859 |
inst_fn = _GetNodeSecondaryInstances |
|
9860 |
else: |
|
9861 |
# All instances |
|
9862 |
assert self.op.mode == constants.IALLOCATOR_NEVAC_ALL |
|
9863 |
inst_fn = _GetNodeInstances |
|
9864 |
|
|
9865 |
return inst_fn(self.cfg, self.op.node_name) |
|
9866 |
|
|
9867 |
def DeclareLocks(self, level): |
|
9868 |
if level == locking.LEVEL_INSTANCE: |
|
9869 |
# Lock instances optimistically, needs verification once node and group |
|
9870 |
# locks have been acquired |
|
9871 |
self.needed_locks[locking.LEVEL_INSTANCE] = \ |
|
9872 |
set(i.name for i in self._DetermineInstances()) |
|
9873 |
|
|
9874 |
elif level == locking.LEVEL_NODEGROUP: |
|
9875 |
# Lock node groups optimistically, needs verification once nodes have |
|
9876 |
# been acquired |
|
9877 |
self.needed_locks[locking.LEVEL_NODEGROUP] = \ |
|
9878 |
self.cfg.GetNodeGroupsFromNodes(self.lock_nodes) |
|
9879 |
|
|
9880 |
elif level == locking.LEVEL_NODE: |
|
9881 |
self.needed_locks[locking.LEVEL_NODE] = self.lock_nodes |
|
9882 |
|
|
9883 |
def CheckPrereq(self): |
|
9884 |
# Verify locks |
|
9885 |
owned_instances = self.glm.list_owned(locking.LEVEL_INSTANCE) |
|
9886 |
owned_nodes = self.glm.list_owned(locking.LEVEL_NODE) |
|
9887 |
owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP) |
|
9888 |
|
|
9889 |
assert owned_nodes == self.lock_nodes |
|
9890 |
|
|
9891 |
wanted_groups = self.cfg.GetNodeGroupsFromNodes(owned_nodes) |
|
9892 |
if owned_groups != wanted_groups: |
|
9893 |
raise errors.OpExecError("Node groups changed since locks were acquired," |
|
9894 |
" current groups are '%s', used to be '%s'" % |
|
9895 |
(utils.CommaJoin(wanted_groups), |
|
9896 |
utils.CommaJoin(owned_groups))) |
|
9897 |
|
|
9898 |
# Determine affected instances |
|
9899 |
self.instances = self._DetermineInstances() |
|
9900 |
self.instance_names = [i.name for i in self.instances] |
|
9901 |
|
|
9902 |
if set(self.instance_names) != owned_instances: |
|
9903 |
raise errors.OpExecError("Instances on node '%s' changed since locks" |
|
9904 |
" were acquired, current instances are '%s'," |
|
9905 |
" used to be '%s'" % |
|
9906 |
(self.op.node_name, |
|
9907 |
utils.CommaJoin(self.instance_names), |
|
9908 |
utils.CommaJoin(owned_instances))) |
|
9909 |
|
|
9910 |
if self.instance_names: |
|
9911 |
self.LogInfo("Evacuating instances from node '%s': %s", |
|
9912 |
self.op.node_name, |
|
9913 |
utils.CommaJoin(utils.NiceSort(self.instance_names))) |
|
9914 |
else: |
|
9915 |
self.LogInfo("No instances to evacuate from node '%s'", |
|
9916 |
self.op.node_name) |
|
9917 |
|
|
9918 |
if self.op.remote_node is not None: |
|
9919 |
for i in self.instances: |
|
9920 |
if i.primary_node == self.op.remote_node: |
|
9921 |
raise errors.OpPrereqError("Node %s is the primary node of" |
|
9922 |
" instance %s, cannot use it as" |
|
9923 |
" secondary" % |
|
9924 |
(self.op.remote_node, i.name), |
|
9925 |
errors.ECODE_INVAL) |
|
9926 |
|
|
9927 |
def Exec(self, feedback_fn): |
|
9928 |
assert (self.op.iallocator is not None) ^ (self.op.remote_node is not None) |
|
9929 |
|
|
9930 |
if not self.instance_names: |
|
9931 |
# No instances to evacuate |
|
9932 |
jobs = [] |
|
9933 |
|
|
9934 |
elif self.op.iallocator is not None: |
|
9935 |
# TODO: Implement relocation to other group |
|
9936 |
ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_NODE_EVAC, |
|
9937 |
evac_mode=self.op.mode, |
|
9938 |
instances=list(self.instance_names)) |
|
9939 |
|
|
9940 |
ial.Run(self.op.iallocator) |
|
9941 |
|
|
9942 |
if not ial.success: |
|
9943 |
raise errors.OpPrereqError("Can't compute node evacuation using" |
|
9944 |
" iallocator '%s': %s" % |
|
9945 |
(self.op.iallocator, ial.info), |
|
9946 |
errors.ECODE_NORES) |
|
9947 |
|
|
9948 |
jobs = [[opcodes.OpCode.LoadOpCode(state) for state in jobset] |
|
9949 |
for jobset in ial.result] |
|
9950 |
|
|
9951 |
# Set "early_release" flag on opcodes where available |
|
9952 |
early_release = self.op.early_release |
|
9953 |
for op in itertools.chain(*jobs): # pylint: disable-msg=W0142 |
|
9954 |
try: |
|
9955 |
op.early_release = early_release |
|
9956 |
except AttributeError: |
|
9957 |
assert not isinstance(op, opcodes.OpInstanceReplaceDisks) |
|
9958 |
|
|
9959 |
elif self.op.remote_node is not None: |
|
9960 |
assert self.op.mode == constants.IALLOCATOR_NEVAC_SEC |
|
9961 |
jobs = [ |
|
9962 |
[opcodes.OpInstanceReplaceDisks(instance_name=instance_name, |
|
9963 |
remote_node=self.op.remote_node, |
|
9964 |
disks=[], |
|
9965 |
mode=constants.REPLACE_DISK_CHG, |
|
9966 |
early_release=self.op.early_release)] |
|
9967 |
for instance_name in self.instance_names |
|
9968 |
] |
|
9969 |
|
|
9970 |
else: |
|
9971 |
raise errors.ProgrammerError("No iallocator or remote node") |
|
9972 |
|
|
9973 |
return ResultWithJobs(jobs) |
|
9974 |
|
|
9975 |
|
|
9804 | 9976 |
class LUInstanceGrowDisk(LogicalUnit): |
9805 | 9977 |
"""Grow a disk of an instance. |
9806 | 9978 |
|
... | ... | |
12502 | 12674 |
else: |
12503 | 12675 |
raise errors.ProgrammerError("Unhandled mode '%s'" % self.mode) |
12504 | 12676 |
|
12677 |
elif self.mode == constants.IALLOCATOR_MODE_NODE_EVAC: |
|
12678 |
assert self.evac_mode in constants.IALLOCATOR_NEVAC_MODES |
|
12679 |
|
|
12505 | 12680 |
self.out_data = rdict |
12506 | 12681 |
|
12507 | 12682 |
@staticmethod |
Also available in: Unified diff