63 |
63 |
_ComputeNewInstanceViolations, _GetUpdatedParams, _CheckOSParams, \
|
64 |
64 |
_CheckHVParams, _AdjustCandidatePool, _CheckNodePVs, \
|
65 |
65 |
_ComputeIPolicyInstanceViolation, _AnnotateDiskParams, _SupportsOob, \
|
66 |
|
_ComputeIPolicySpecViolation
|
|
66 |
_ComputeIPolicySpecViolation, _GetDefaultIAllocator, \
|
|
67 |
_CheckInstancesNodeGroups, _LoadNodeEvacResult, _MapInstanceDisksToNodes, \
|
|
68 |
_CheckInstanceNodeGroups
|
67 |
69 |
|
68 |
70 |
from ganeti.cmdlib.cluster import LUClusterActivateMasterIp, \
|
69 |
71 |
LUClusterDeactivateMasterIp, LUClusterConfigQuery, LUClusterDestroy, \
|
... | ... | |
71 |
73 |
LUClusterRename, LUClusterRepairDiskSizes, LUClusterSetParams, \
|
72 |
74 |
LUClusterVerify, LUClusterVerifyConfig, LUClusterVerifyGroup, \
|
73 |
75 |
LUClusterVerifyDisks
|
|
76 |
from ganeti.cmdlib.group import LUGroupAdd, LUGroupAssignNodes, \
|
|
77 |
_GroupQuery, LUGroupQuery, LUGroupSetParams, LUGroupRemove, \
|
|
78 |
LUGroupRename, LUGroupEvacuate, LUGroupVerifyDisks
|
74 |
79 |
from ganeti.cmdlib.tags import LUTagsGet, LUTagsSearch, LUTagsSet, LUTagsDel
|
75 |
80 |
from ganeti.cmdlib.network import LUNetworkAdd, LUNetworkRemove, \
|
76 |
81 |
LUNetworkSetParams, _NetworkQuery, LUNetworkQuery, LUNetworkConnect, \
|
... | ... | |
91 |
96 |
]))
|
92 |
97 |
|
93 |
98 |
|
94 |
|
def _CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
|
95 |
|
cur_group_uuid):
|
96 |
|
"""Checks if node groups for locked instances are still correct.
|
97 |
|
|
98 |
|
@type cfg: L{config.ConfigWriter}
|
99 |
|
@param cfg: Cluster configuration
|
100 |
|
@type instances: dict; string as key, L{objects.Instance} as value
|
101 |
|
@param instances: Dictionary, instance name as key, instance object as value
|
102 |
|
@type owned_groups: iterable of string
|
103 |
|
@param owned_groups: List of owned groups
|
104 |
|
@type owned_nodes: iterable of string
|
105 |
|
@param owned_nodes: List of owned nodes
|
106 |
|
@type cur_group_uuid: string or None
|
107 |
|
@param cur_group_uuid: Optional group UUID to check against instance's groups
|
108 |
|
|
109 |
|
"""
|
110 |
|
for (name, inst) in instances.items():
|
111 |
|
assert owned_nodes.issuperset(inst.all_nodes), \
|
112 |
|
"Instance %s's nodes changed while we kept the lock" % name
|
113 |
|
|
114 |
|
inst_groups = _CheckInstanceNodeGroups(cfg, name, owned_groups)
|
115 |
|
|
116 |
|
assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
|
117 |
|
"Instance %s has no node in group %s" % (name, cur_group_uuid)
|
118 |
|
|
119 |
|
|
120 |
|
def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups,
|
121 |
|
primary_only=False):
|
122 |
|
"""Checks if the owned node groups are still correct for an instance.
|
123 |
|
|
124 |
|
@type cfg: L{config.ConfigWriter}
|
125 |
|
@param cfg: The cluster configuration
|
126 |
|
@type instance_name: string
|
127 |
|
@param instance_name: Instance name
|
128 |
|
@type owned_groups: set or frozenset
|
129 |
|
@param owned_groups: List of currently owned node groups
|
130 |
|
@type primary_only: boolean
|
131 |
|
@param primary_only: Whether to check node groups for only the primary node
|
132 |
|
|
133 |
|
"""
|
134 |
|
inst_groups = cfg.GetInstanceNodeGroups(instance_name, primary_only)
|
135 |
|
|
136 |
|
if not owned_groups.issuperset(inst_groups):
|
137 |
|
raise errors.OpPrereqError("Instance %s's node groups changed since"
|
138 |
|
" locks were acquired, current groups are"
|
139 |
|
" are '%s', owning groups '%s'; retry the"
|
140 |
|
" operation" %
|
141 |
|
(instance_name,
|
142 |
|
utils.CommaJoin(inst_groups),
|
143 |
|
utils.CommaJoin(owned_groups)),
|
144 |
|
errors.ECODE_STATE)
|
145 |
|
|
146 |
|
return inst_groups
|
147 |
|
|
148 |
|
|
149 |
99 |
def _IsExclusiveStorageEnabledNode(cfg, node):
|
150 |
100 |
"""Whether exclusive_storage is in effect for the given node.
|
151 |
101 |
|
... | ... | |
241 |
191 |
assert not lu.glm.is_owned(level), "No locks should be owned"
|
242 |
192 |
|
243 |
193 |
|
244 |
|
def _MapInstanceDisksToNodes(instances):
|
245 |
|
"""Creates a map from (node, volume) to instance name.
|
246 |
|
|
247 |
|
@type instances: list of L{objects.Instance}
|
248 |
|
@rtype: dict; tuple of (node name, volume name) as key, instance name as value
|
249 |
|
|
250 |
|
"""
|
251 |
|
return dict(((node, vol), inst.name)
|
252 |
|
for inst in instances
|
253 |
|
for (node, vols) in inst.MapLVsByNode().items()
|
254 |
|
for vol in vols)
|
255 |
|
|
256 |
|
|
257 |
194 |
def _CheckOutputFields(static, dynamic, selected):
|
258 |
195 |
"""Checks whether all selected fields are valid.
|
259 |
196 |
|
... | ... | |
836 |
773 |
" iallocator", errors.ECODE_INVAL)
|
837 |
774 |
|
838 |
775 |
|
839 |
|
def _GetDefaultIAllocator(cfg, ialloc):
|
840 |
|
"""Decides on which iallocator to use.
|
841 |
|
|
842 |
|
@type cfg: L{config.ConfigWriter}
|
843 |
|
@param cfg: Cluster configuration object
|
844 |
|
@type ialloc: string or None
|
845 |
|
@param ialloc: Iallocator specified in opcode
|
846 |
|
@rtype: string
|
847 |
|
@return: Iallocator name
|
848 |
|
|
849 |
|
"""
|
850 |
|
if not ialloc:
|
851 |
|
# Use default iallocator
|
852 |
|
ialloc = cfg.GetDefaultIAllocator()
|
853 |
|
|
854 |
|
if not ialloc:
|
855 |
|
raise errors.OpPrereqError("No iallocator was specified, neither in the"
|
856 |
|
" opcode nor as a cluster-wide default",
|
857 |
|
errors.ECODE_INVAL)
|
858 |
|
|
859 |
|
return ialloc
|
860 |
|
|
861 |
|
|
862 |
776 |
def _CheckHostnameSane(lu, name):
|
863 |
777 |
"""Ensures that a given hostname resolves to a 'sane' name.
|
864 |
778 |
|
... | ... | |
880 |
794 |
return hostname
|
881 |
795 |
|
882 |
796 |
|
883 |
|
class LUGroupVerifyDisks(NoHooksLU):
|
884 |
|
"""Verifies the status of all disks in a node group.
|
885 |
|
|
886 |
|
"""
|
887 |
|
REQ_BGL = False
|
888 |
|
|
889 |
|
def ExpandNames(self):
|
890 |
|
# Raises errors.OpPrereqError on its own if group can't be found
|
891 |
|
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
|
892 |
|
|
893 |
|
self.share_locks = _ShareAll()
|
894 |
|
self.needed_locks = {
|
895 |
|
locking.LEVEL_INSTANCE: [],
|
896 |
|
locking.LEVEL_NODEGROUP: [],
|
897 |
|
locking.LEVEL_NODE: [],
|
898 |
|
|
899 |
|
# This opcode is acquires all node locks in a group. LUClusterVerifyDisks
|
900 |
|
# starts one instance of this opcode for every group, which means all
|
901 |
|
# nodes will be locked for a short amount of time, so it's better to
|
902 |
|
# acquire the node allocation lock as well.
|
903 |
|
locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
|
904 |
|
}
|
905 |
|
|
906 |
|
def DeclareLocks(self, level):
|
907 |
|
if level == locking.LEVEL_INSTANCE:
|
908 |
|
assert not self.needed_locks[locking.LEVEL_INSTANCE]
|
909 |
|
|
910 |
|
# Lock instances optimistically, needs verification once node and group
|
911 |
|
# locks have been acquired
|
912 |
|
self.needed_locks[locking.LEVEL_INSTANCE] = \
|
913 |
|
self.cfg.GetNodeGroupInstances(self.group_uuid)
|
914 |
|
|
915 |
|
elif level == locking.LEVEL_NODEGROUP:
|
916 |
|
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
|
917 |
|
|
918 |
|
self.needed_locks[locking.LEVEL_NODEGROUP] = \
|
919 |
|
set([self.group_uuid] +
|
920 |
|
# Lock all groups used by instances optimistically; this requires
|
921 |
|
# going via the node before it's locked, requiring verification
|
922 |
|
# later on
|
923 |
|
[group_uuid
|
924 |
|
for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
|
925 |
|
for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
|
926 |
|
|
927 |
|
elif level == locking.LEVEL_NODE:
|
928 |
|
# This will only lock the nodes in the group to be verified which contain
|
929 |
|
# actual instances
|
930 |
|
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
931 |
|
self._LockInstancesNodes()
|
932 |
|
|
933 |
|
# Lock all nodes in group to be verified
|
934 |
|
assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
|
935 |
|
member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
|
936 |
|
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
|
937 |
|
|
938 |
|
def CheckPrereq(self):
|
939 |
|
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
|
940 |
|
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
|
941 |
|
owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
|
942 |
|
|
943 |
|
assert self.group_uuid in owned_groups
|
944 |
|
|
945 |
|
# Check if locked instances are still correct
|
946 |
|
_CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
|
947 |
|
|
948 |
|
# Get instance information
|
949 |
|
self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
|
950 |
|
|
951 |
|
# Check if node groups for locked instances are still correct
|
952 |
|
_CheckInstancesNodeGroups(self.cfg, self.instances,
|
953 |
|
owned_groups, owned_nodes, self.group_uuid)
|
954 |
|
|
955 |
|
def Exec(self, feedback_fn):
|
956 |
|
"""Verify integrity of cluster disks.
|
957 |
|
|
958 |
|
@rtype: tuple of three items
|
959 |
|
@return: a tuple of (dict of node-to-node_error, list of instances
|
960 |
|
which need activate-disks, dict of instance: (node, volume) for
|
961 |
|
missing volumes
|
962 |
|
|
963 |
|
"""
|
964 |
|
res_nodes = {}
|
965 |
|
res_instances = set()
|
966 |
|
res_missing = {}
|
967 |
|
|
968 |
|
nv_dict = _MapInstanceDisksToNodes(
|
969 |
|
[inst for inst in self.instances.values()
|
970 |
|
if inst.admin_state == constants.ADMINST_UP])
|
971 |
|
|
972 |
|
if nv_dict:
|
973 |
|
nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
|
974 |
|
set(self.cfg.GetVmCapableNodeList()))
|
975 |
|
|
976 |
|
node_lvs = self.rpc.call_lv_list(nodes, [])
|
977 |
|
|
978 |
|
for (node, node_res) in node_lvs.items():
|
979 |
|
if node_res.offline:
|
980 |
|
continue
|
981 |
|
|
982 |
|
msg = node_res.fail_msg
|
983 |
|
if msg:
|
984 |
|
logging.warning("Error enumerating LVs on node %s: %s", node, msg)
|
985 |
|
res_nodes[node] = msg
|
986 |
|
continue
|
987 |
|
|
988 |
|
for lv_name, (_, _, lv_online) in node_res.payload.items():
|
989 |
|
inst = nv_dict.pop((node, lv_name), None)
|
990 |
|
if not (lv_online or inst is None):
|
991 |
|
res_instances.add(inst)
|
992 |
|
|
993 |
|
# any leftover items in nv_dict are missing LVs, let's arrange the data
|
994 |
|
# better
|
995 |
|
for key, inst in nv_dict.iteritems():
|
996 |
|
res_missing.setdefault(inst, []).append(list(key))
|
997 |
|
|
998 |
|
return (res_nodes, list(res_instances), res_missing)
|
999 |
|
|
1000 |
|
|
1001 |
797 |
def _WaitForSync(lu, instance, disks=None, oneshot=False):
|
1002 |
798 |
"""Sleep and poll for an instance's disk to sync.
|
1003 |
799 |
|
... | ... | |
8607 |
8403 |
return ResultWithJobs(jobs)
|
8608 |
8404 |
|
8609 |
8405 |
|
8610 |
|
def _SetOpEarlyRelease(early_release, op):
|
8611 |
|
"""Sets C{early_release} flag on opcodes if available.
|
8612 |
|
|
8613 |
|
"""
|
8614 |
|
try:
|
8615 |
|
op.early_release = early_release
|
8616 |
|
except AttributeError:
|
8617 |
|
assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
|
8618 |
|
|
8619 |
|
return op
|
8620 |
|
|
8621 |
|
|
8622 |
|
def _NodeEvacDest(use_nodes, group, nodes):
|
8623 |
|
"""Returns group or nodes depending on caller's choice.
|
8624 |
|
|
8625 |
|
"""
|
8626 |
|
if use_nodes:
|
8627 |
|
return utils.CommaJoin(nodes)
|
8628 |
|
else:
|
8629 |
|
return group
|
8630 |
|
|
8631 |
|
|
8632 |
|
def _LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
|
8633 |
|
"""Unpacks the result of change-group and node-evacuate iallocator requests.
|
8634 |
|
|
8635 |
|
Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
|
8636 |
|
L{constants.IALLOCATOR_MODE_CHG_GROUP}.
|
8637 |
|
|
8638 |
|
@type lu: L{LogicalUnit}
|
8639 |
|
@param lu: Logical unit instance
|
8640 |
|
@type alloc_result: tuple/list
|
8641 |
|
@param alloc_result: Result from iallocator
|
8642 |
|
@type early_release: bool
|
8643 |
|
@param early_release: Whether to release locks early if possible
|
8644 |
|
@type use_nodes: bool
|
8645 |
|
@param use_nodes: Whether to display node names instead of groups
|
8646 |
|
|
8647 |
|
"""
|
8648 |
|
(moved, failed, jobs) = alloc_result
|
8649 |
|
|
8650 |
|
if failed:
|
8651 |
|
failreason = utils.CommaJoin("%s (%s)" % (name, reason)
|
8652 |
|
for (name, reason) in failed)
|
8653 |
|
lu.LogWarning("Unable to evacuate instances %s", failreason)
|
8654 |
|
raise errors.OpExecError("Unable to evacuate instances %s" % failreason)
|
8655 |
|
|
8656 |
|
if moved:
|
8657 |
|
lu.LogInfo("Instances to be moved: %s",
|
8658 |
|
utils.CommaJoin("%s (to %s)" %
|
8659 |
|
(name, _NodeEvacDest(use_nodes, group, nodes))
|
8660 |
|
for (name, group, nodes) in moved))
|
8661 |
|
|
8662 |
|
return [map(compat.partial(_SetOpEarlyRelease, early_release),
|
8663 |
|
map(opcodes.OpCode.LoadOpCode, ops))
|
8664 |
|
for ops in jobs]
|
8665 |
|
|
8666 |
|
|
8667 |
8406 |
def _DiskSizeInBytesToMebibytes(lu, size):
|
8668 |
8407 |
"""Converts a disk size in bytes to mebibytes.
|
8669 |
8408 |
|
... | ... | |
11087 |
10826 |
" Domain Name.")
|
11088 |
10827 |
|
11089 |
10828 |
|
11090 |
|
class LUGroupAdd(LogicalUnit):
|
11091 |
|
"""Logical unit for creating node groups.
|
11092 |
|
|
11093 |
|
"""
|
11094 |
|
HPATH = "group-add"
|
11095 |
|
HTYPE = constants.HTYPE_GROUP
|
11096 |
|
REQ_BGL = False
|
11097 |
|
|
11098 |
|
def ExpandNames(self):
|
11099 |
|
# We need the new group's UUID here so that we can create and acquire the
|
11100 |
|
# corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
|
11101 |
|
# that it should not check whether the UUID exists in the configuration.
|
11102 |
|
self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
|
11103 |
|
self.needed_locks = {}
|
11104 |
|
self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
|
11105 |
|
|
11106 |
|
def CheckPrereq(self):
|
11107 |
|
"""Check prerequisites.
|
11108 |
|
|
11109 |
|
This checks that the given group name is not an existing node group
|
11110 |
|
already.
|
11111 |
|
|
11112 |
|
"""
|
11113 |
|
try:
|
11114 |
|
existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
|
11115 |
|
except errors.OpPrereqError:
|
11116 |
|
pass
|
11117 |
|
else:
|
11118 |
|
raise errors.OpPrereqError("Desired group name '%s' already exists as a"
|
11119 |
|
" node group (UUID: %s)" %
|
11120 |
|
(self.op.group_name, existing_uuid),
|
11121 |
|
errors.ECODE_EXISTS)
|
11122 |
|
|
11123 |
|
if self.op.ndparams:
|
11124 |
|
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
|
11125 |
|
|
11126 |
|
if self.op.hv_state:
|
11127 |
|
self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
|
11128 |
|
else:
|
11129 |
|
self.new_hv_state = None
|
11130 |
|
|
11131 |
|
if self.op.disk_state:
|
11132 |
|
self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
|
11133 |
|
else:
|
11134 |
|
self.new_disk_state = None
|
11135 |
|
|
11136 |
|
if self.op.diskparams:
|
11137 |
|
for templ in constants.DISK_TEMPLATES:
|
11138 |
|
if templ in self.op.diskparams:
|
11139 |
|
utils.ForceDictType(self.op.diskparams[templ],
|
11140 |
|
constants.DISK_DT_TYPES)
|
11141 |
|
self.new_diskparams = self.op.diskparams
|
11142 |
|
try:
|
11143 |
|
utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
|
11144 |
|
except errors.OpPrereqError, err:
|
11145 |
|
raise errors.OpPrereqError("While verify diskparams options: %s" % err,
|
11146 |
|
errors.ECODE_INVAL)
|
11147 |
|
else:
|
11148 |
|
self.new_diskparams = {}
|
11149 |
|
|
11150 |
|
if self.op.ipolicy:
|
11151 |
|
cluster = self.cfg.GetClusterInfo()
|
11152 |
|
full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
|
11153 |
|
try:
|
11154 |
|
objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
|
11155 |
|
except errors.ConfigurationError, err:
|
11156 |
|
raise errors.OpPrereqError("Invalid instance policy: %s" % err,
|
11157 |
|
errors.ECODE_INVAL)
|
11158 |
|
|
11159 |
|
def BuildHooksEnv(self):
|
11160 |
|
"""Build hooks env.
|
11161 |
|
|
11162 |
|
"""
|
11163 |
|
return {
|
11164 |
|
"GROUP_NAME": self.op.group_name,
|
11165 |
|
}
|
11166 |
|
|
11167 |
|
def BuildHooksNodes(self):
|
11168 |
|
"""Build hooks nodes.
|
11169 |
|
|
11170 |
|
"""
|
11171 |
|
mn = self.cfg.GetMasterNode()
|
11172 |
|
return ([mn], [mn])
|
11173 |
|
|
11174 |
|
def Exec(self, feedback_fn):
|
11175 |
|
"""Add the node group to the cluster.
|
11176 |
|
|
11177 |
|
"""
|
11178 |
|
group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
|
11179 |
|
uuid=self.group_uuid,
|
11180 |
|
alloc_policy=self.op.alloc_policy,
|
11181 |
|
ndparams=self.op.ndparams,
|
11182 |
|
diskparams=self.new_diskparams,
|
11183 |
|
ipolicy=self.op.ipolicy,
|
11184 |
|
hv_state_static=self.new_hv_state,
|
11185 |
|
disk_state_static=self.new_disk_state)
|
11186 |
|
|
11187 |
|
self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
|
11188 |
|
del self.remove_locks[locking.LEVEL_NODEGROUP]
|
11189 |
|
|
11190 |
|
|
11191 |
|
class LUGroupAssignNodes(NoHooksLU):
|
11192 |
|
"""Logical unit for assigning nodes to groups.
|
11193 |
|
|
11194 |
|
"""
|
11195 |
|
REQ_BGL = False
|
11196 |
|
|
11197 |
|
def ExpandNames(self):
|
11198 |
|
# These raise errors.OpPrereqError on their own:
|
11199 |
|
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
|
11200 |
|
self.op.nodes = _GetWantedNodes(self, self.op.nodes)
|
11201 |
|
|
11202 |
|
# We want to lock all the affected nodes and groups. We have readily
|
11203 |
|
# available the list of nodes, and the *destination* group. To gather the
|
11204 |
|
# list of "source" groups, we need to fetch node information later on.
|
11205 |
|
self.needed_locks = {
|
11206 |
|
locking.LEVEL_NODEGROUP: set([self.group_uuid]),
|
11207 |
|
locking.LEVEL_NODE: self.op.nodes,
|
11208 |
|
}
|
11209 |
|
|
11210 |
|
def DeclareLocks(self, level):
|
11211 |
|
if level == locking.LEVEL_NODEGROUP:
|
11212 |
|
assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
|
11213 |
|
|
11214 |
|
# Try to get all affected nodes' groups without having the group or node
|
11215 |
|
# lock yet. Needs verification later in the code flow.
|
11216 |
|
groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
|
11217 |
|
|
11218 |
|
self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
|
11219 |
|
|
11220 |
|
def CheckPrereq(self):
|
11221 |
|
"""Check prerequisites.
|
11222 |
|
|
11223 |
|
"""
|
11224 |
|
assert self.needed_locks[locking.LEVEL_NODEGROUP]
|
11225 |
|
assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
|
11226 |
|
frozenset(self.op.nodes))
|
11227 |
|
|
11228 |
|
expected_locks = (set([self.group_uuid]) |
|
11229 |
|
self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
|
11230 |
|
actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
|
11231 |
|
if actual_locks != expected_locks:
|
11232 |
|
raise errors.OpExecError("Nodes changed groups since locks were acquired,"
|
11233 |
|
" current groups are '%s', used to be '%s'" %
|
11234 |
|
(utils.CommaJoin(expected_locks),
|
11235 |
|
utils.CommaJoin(actual_locks)))
|
11236 |
|
|
11237 |
|
self.node_data = self.cfg.GetAllNodesInfo()
|
11238 |
|
self.group = self.cfg.GetNodeGroup(self.group_uuid)
|
11239 |
|
instance_data = self.cfg.GetAllInstancesInfo()
|
11240 |
|
|
11241 |
|
if self.group is None:
|
11242 |
|
raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
|
11243 |
|
(self.op.group_name, self.group_uuid))
|
11244 |
|
|
11245 |
|
(new_splits, previous_splits) = \
|
11246 |
|
self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
|
11247 |
|
for node in self.op.nodes],
|
11248 |
|
self.node_data, instance_data)
|
11249 |
|
|
11250 |
|
if new_splits:
|
11251 |
|
fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
|
11252 |
|
|
11253 |
|
if not self.op.force:
|
11254 |
|
raise errors.OpExecError("The following instances get split by this"
|
11255 |
|
" change and --force was not given: %s" %
|
11256 |
|
fmt_new_splits)
|
11257 |
|
else:
|
11258 |
|
self.LogWarning("This operation will split the following instances: %s",
|
11259 |
|
fmt_new_splits)
|
11260 |
|
|
11261 |
|
if previous_splits:
|
11262 |
|
self.LogWarning("In addition, these already-split instances continue"
|
11263 |
|
" to be split across groups: %s",
|
11264 |
|
utils.CommaJoin(utils.NiceSort(previous_splits)))
|
11265 |
|
|
11266 |
|
def Exec(self, feedback_fn):
|
11267 |
|
"""Assign nodes to a new group.
|
11268 |
|
|
11269 |
|
"""
|
11270 |
|
mods = [(node_name, self.group_uuid) for node_name in self.op.nodes]
|
11271 |
|
|
11272 |
|
self.cfg.AssignGroupNodes(mods)
|
11273 |
|
|
11274 |
|
@staticmethod
|
11275 |
|
def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
|
11276 |
|
"""Check for split instances after a node assignment.
|
11277 |
|
|
11278 |
|
This method considers a series of node assignments as an atomic operation,
|
11279 |
|
and returns information about split instances after applying the set of
|
11280 |
|
changes.
|
11281 |
|
|
11282 |
|
In particular, it returns information about newly split instances, and
|
11283 |
|
instances that were already split, and remain so after the change.
|
11284 |
|
|
11285 |
|
Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
|
11286 |
|
considered.
|
11287 |
|
|
11288 |
|
@type changes: list of (node_name, new_group_uuid) pairs.
|
11289 |
|
@param changes: list of node assignments to consider.
|
11290 |
|
@param node_data: a dict with data for all nodes
|
11291 |
|
@param instance_data: a dict with all instances to consider
|
11292 |
|
@rtype: a two-tuple
|
11293 |
|
@return: a list of instances that were previously okay and result split as a
|
11294 |
|
consequence of this change, and a list of instances that were previously
|
11295 |
|
split and this change does not fix.
|
11296 |
|
|
11297 |
|
"""
|
11298 |
|
changed_nodes = dict((node, group) for node, group in changes
|
11299 |
|
if node_data[node].group != group)
|
11300 |
|
|
11301 |
|
all_split_instances = set()
|
11302 |
|
previously_split_instances = set()
|
11303 |
|
|
11304 |
|
def InstanceNodes(instance):
|
11305 |
|
return [instance.primary_node] + list(instance.secondary_nodes)
|
11306 |
|
|
11307 |
|
for inst in instance_data.values():
|
11308 |
|
if inst.disk_template not in constants.DTS_INT_MIRROR:
|
11309 |
|
continue
|
11310 |
|
|
11311 |
|
instance_nodes = InstanceNodes(inst)
|
11312 |
|
|
11313 |
|
if len(set(node_data[node].group for node in instance_nodes)) > 1:
|
11314 |
|
previously_split_instances.add(inst.name)
|
11315 |
|
|
11316 |
|
if len(set(changed_nodes.get(node, node_data[node].group)
|
11317 |
|
for node in instance_nodes)) > 1:
|
11318 |
|
all_split_instances.add(inst.name)
|
11319 |
|
|
11320 |
|
return (list(all_split_instances - previously_split_instances),
|
11321 |
|
list(previously_split_instances & all_split_instances))
|
11322 |
|
|
11323 |
|
|
11324 |
|
class _GroupQuery(_QueryBase):
|
11325 |
|
FIELDS = query.GROUP_FIELDS
|
11326 |
|
|
11327 |
|
def ExpandNames(self, lu):
|
11328 |
|
lu.needed_locks = {}
|
11329 |
|
|
11330 |
|
self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
|
11331 |
|
self._cluster = lu.cfg.GetClusterInfo()
|
11332 |
|
name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
|
11333 |
|
|
11334 |
|
if not self.names:
|
11335 |
|
self.wanted = [name_to_uuid[name]
|
11336 |
|
for name in utils.NiceSort(name_to_uuid.keys())]
|
11337 |
|
else:
|
11338 |
|
# Accept names to be either names or UUIDs.
|
11339 |
|
missing = []
|
11340 |
|
self.wanted = []
|
11341 |
|
all_uuid = frozenset(self._all_groups.keys())
|
11342 |
|
|
11343 |
|
for name in self.names:
|
11344 |
|
if name in all_uuid:
|
11345 |
|
self.wanted.append(name)
|
11346 |
|
elif name in name_to_uuid:
|
11347 |
|
self.wanted.append(name_to_uuid[name])
|
11348 |
|
else:
|
11349 |
|
missing.append(name)
|
11350 |
|
|
11351 |
|
if missing:
|
11352 |
|
raise errors.OpPrereqError("Some groups do not exist: %s" %
|
11353 |
|
utils.CommaJoin(missing),
|
11354 |
|
errors.ECODE_NOENT)
|
11355 |
|
|
11356 |
|
def DeclareLocks(self, lu, level):
|
11357 |
|
pass
|
11358 |
|
|
11359 |
|
def _GetQueryData(self, lu):
|
11360 |
|
"""Computes the list of node groups and their attributes.
|
11361 |
|
|
11362 |
|
"""
|
11363 |
|
do_nodes = query.GQ_NODE in self.requested_data
|
11364 |
|
do_instances = query.GQ_INST in self.requested_data
|
11365 |
|
|
11366 |
|
group_to_nodes = None
|
11367 |
|
group_to_instances = None
|
11368 |
|
|
11369 |
|
# For GQ_NODE, we need to map group->[nodes], and group->[instances] for
|
11370 |
|
# GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
|
11371 |
|
# latter GetAllInstancesInfo() is not enough, for we have to go through
|
11372 |
|
# instance->node. Hence, we will need to process nodes even if we only need
|
11373 |
|
# instance information.
|
11374 |
|
if do_nodes or do_instances:
|
11375 |
|
all_nodes = lu.cfg.GetAllNodesInfo()
|
11376 |
|
group_to_nodes = dict((uuid, []) for uuid in self.wanted)
|
11377 |
|
node_to_group = {}
|
11378 |
|
|
11379 |
|
for node in all_nodes.values():
|
11380 |
|
if node.group in group_to_nodes:
|
11381 |
|
group_to_nodes[node.group].append(node.name)
|
11382 |
|
node_to_group[node.name] = node.group
|
11383 |
|
|
11384 |
|
if do_instances:
|
11385 |
|
all_instances = lu.cfg.GetAllInstancesInfo()
|
11386 |
|
group_to_instances = dict((uuid, []) for uuid in self.wanted)
|
11387 |
|
|
11388 |
|
for instance in all_instances.values():
|
11389 |
|
node = instance.primary_node
|
11390 |
|
if node in node_to_group:
|
11391 |
|
group_to_instances[node_to_group[node]].append(instance.name)
|
11392 |
|
|
11393 |
|
if not do_nodes:
|
11394 |
|
# Do not pass on node information if it was not requested.
|
11395 |
|
group_to_nodes = None
|
11396 |
|
|
11397 |
|
return query.GroupQueryData(self._cluster,
|
11398 |
|
[self._all_groups[uuid]
|
11399 |
|
for uuid in self.wanted],
|
11400 |
|
group_to_nodes, group_to_instances,
|
11401 |
|
query.GQ_DISKPARAMS in self.requested_data)
|
11402 |
|
|
11403 |
|
|
11404 |
|
class LUGroupQuery(NoHooksLU):
|
11405 |
|
"""Logical unit for querying node groups.
|
11406 |
|
|
11407 |
|
"""
|
11408 |
|
REQ_BGL = False
|
11409 |
|
|
11410 |
|
def CheckArguments(self):
|
11411 |
|
self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
|
11412 |
|
self.op.output_fields, False)
|
11413 |
|
|
11414 |
|
def ExpandNames(self):
|
11415 |
|
self.gq.ExpandNames(self)
|
11416 |
|
|
11417 |
|
def DeclareLocks(self, level):
|
11418 |
|
self.gq.DeclareLocks(self, level)
|
11419 |
|
|
11420 |
|
def Exec(self, feedback_fn):
|
11421 |
|
return self.gq.OldStyleQuery(self)
|
11422 |
|
|
11423 |
|
|
11424 |
|
class LUGroupSetParams(LogicalUnit):
|
11425 |
|
"""Modifies the parameters of a node group.
|
11426 |
|
|
11427 |
|
"""
|
11428 |
|
HPATH = "group-modify"
|
11429 |
|
HTYPE = constants.HTYPE_GROUP
|
11430 |
|
REQ_BGL = False
|
11431 |
|
|
11432 |
|
def CheckArguments(self):
|
11433 |
|
all_changes = [
|
11434 |
|
self.op.ndparams,
|
11435 |
|
self.op.diskparams,
|
11436 |
|
self.op.alloc_policy,
|
11437 |
|
self.op.hv_state,
|
11438 |
|
self.op.disk_state,
|
11439 |
|
self.op.ipolicy,
|
11440 |
|
]
|
11441 |
|
|
11442 |
|
if all_changes.count(None) == len(all_changes):
|
11443 |
|
raise errors.OpPrereqError("Please pass at least one modification",
|
11444 |
|
errors.ECODE_INVAL)
|
11445 |
|
|
11446 |
|
def ExpandNames(self):
|
11447 |
|
# This raises errors.OpPrereqError on its own:
|
11448 |
|
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
|
11449 |
|
|
11450 |
|
self.needed_locks = {
|
11451 |
|
locking.LEVEL_INSTANCE: [],
|
11452 |
|
locking.LEVEL_NODEGROUP: [self.group_uuid],
|
11453 |
|
}
|
11454 |
|
|
11455 |
|
self.share_locks[locking.LEVEL_INSTANCE] = 1
|
11456 |
|
|
11457 |
|
def DeclareLocks(self, level):
|
11458 |
|
if level == locking.LEVEL_INSTANCE:
|
11459 |
|
assert not self.needed_locks[locking.LEVEL_INSTANCE]
|
11460 |
|
|
11461 |
|
# Lock instances optimistically, needs verification once group lock has
|
11462 |
|
# been acquired
|
11463 |
|
self.needed_locks[locking.LEVEL_INSTANCE] = \
|
11464 |
|
self.cfg.GetNodeGroupInstances(self.group_uuid)
|
11465 |
|
|
11466 |
|
@staticmethod
|
11467 |
|
def _UpdateAndVerifyDiskParams(old, new):
|
11468 |
|
"""Updates and verifies disk parameters.
|
11469 |
|
|
11470 |
|
"""
|
11471 |
|
new_params = _GetUpdatedParams(old, new)
|
11472 |
|
utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
|
11473 |
|
return new_params
|
11474 |
|
|
11475 |
|
def CheckPrereq(self):
|
11476 |
|
"""Check prerequisites.
|
11477 |
|
|
11478 |
|
"""
|
11479 |
|
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
|
11480 |
|
|
11481 |
|
# Check if locked instances are still correct
|
11482 |
|
_CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
|
11483 |
|
|
11484 |
|
self.group = self.cfg.GetNodeGroup(self.group_uuid)
|
11485 |
|
cluster = self.cfg.GetClusterInfo()
|
11486 |
|
|
11487 |
|
if self.group is None:
|
11488 |
|
raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
|
11489 |
|
(self.op.group_name, self.group_uuid))
|
11490 |
|
|
11491 |
|
if self.op.ndparams:
|
11492 |
|
new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
|
11493 |
|
utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
|
11494 |
|
self.new_ndparams = new_ndparams
|
11495 |
|
|
11496 |
|
if self.op.diskparams:
|
11497 |
|
diskparams = self.group.diskparams
|
11498 |
|
uavdp = self._UpdateAndVerifyDiskParams
|
11499 |
|
# For each disktemplate subdict update and verify the values
|
11500 |
|
new_diskparams = dict((dt,
|
11501 |
|
uavdp(diskparams.get(dt, {}),
|
11502 |
|
self.op.diskparams[dt]))
|
11503 |
|
for dt in constants.DISK_TEMPLATES
|
11504 |
|
if dt in self.op.diskparams)
|
11505 |
|
# As we've all subdicts of diskparams ready, lets merge the actual
|
11506 |
|
# dict with all updated subdicts
|
11507 |
|
self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
|
11508 |
|
try:
|
11509 |
|
utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
|
11510 |
|
except errors.OpPrereqError, err:
|
11511 |
|
raise errors.OpPrereqError("While verify diskparams options: %s" % err,
|
11512 |
|
errors.ECODE_INVAL)
|
11513 |
|
|
11514 |
|
if self.op.hv_state:
|
11515 |
|
self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
|
11516 |
|
self.group.hv_state_static)
|
11517 |
|
|
11518 |
|
if self.op.disk_state:
|
11519 |
|
self.new_disk_state = \
|
11520 |
|
_MergeAndVerifyDiskState(self.op.disk_state,
|
11521 |
|
self.group.disk_state_static)
|
11522 |
|
|
11523 |
|
if self.op.ipolicy:
|
11524 |
|
self.new_ipolicy = _GetUpdatedIPolicy(self.group.ipolicy,
|
11525 |
|
self.op.ipolicy,
|
11526 |
|
group_policy=True)
|
11527 |
|
|
11528 |
|
new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
|
11529 |
|
inst_filter = lambda inst: inst.name in owned_instances
|
11530 |
|
instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
|
11531 |
|
gmi = ganeti.masterd.instance
|
11532 |
|
violations = \
|
11533 |
|
_ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
|
11534 |
|
self.group),
|
11535 |
|
new_ipolicy, instances, self.cfg)
|
11536 |
|
|
11537 |
|
if violations:
|
11538 |
|
self.LogWarning("After the ipolicy change the following instances"
|
11539 |
|
" violate them: %s",
|
11540 |
|
utils.CommaJoin(violations))
|
11541 |
|
|
11542 |
|
def BuildHooksEnv(self):
|
11543 |
|
"""Build hooks env.
|
11544 |
|
|
11545 |
|
"""
|
11546 |
|
return {
|
11547 |
|
"GROUP_NAME": self.op.group_name,
|
11548 |
|
"NEW_ALLOC_POLICY": self.op.alloc_policy,
|
11549 |
|
}
|
11550 |
|
|
11551 |
|
def BuildHooksNodes(self):
|
11552 |
|
"""Build hooks nodes.
|
11553 |
|
|
11554 |
|
"""
|
11555 |
|
mn = self.cfg.GetMasterNode()
|
11556 |
|
return ([mn], [mn])
|
11557 |
|
|
11558 |
|
def Exec(self, feedback_fn):
|
11559 |
|
"""Modifies the node group.
|
11560 |
|
|
11561 |
|
"""
|
11562 |
|
result = []
|
11563 |
|
|
11564 |
|
if self.op.ndparams:
|
11565 |
|
self.group.ndparams = self.new_ndparams
|
11566 |
|
result.append(("ndparams", str(self.group.ndparams)))
|
11567 |
|
|
11568 |
|
if self.op.diskparams:
|
11569 |
|
self.group.diskparams = self.new_diskparams
|
11570 |
|
result.append(("diskparams", str(self.group.diskparams)))
|
11571 |
|
|
11572 |
|
if self.op.alloc_policy:
|
11573 |
|
self.group.alloc_policy = self.op.alloc_policy
|
11574 |
|
|
11575 |
|
if self.op.hv_state:
|
11576 |
|
self.group.hv_state_static = self.new_hv_state
|
11577 |
|
|
11578 |
|
if self.op.disk_state:
|
11579 |
|
self.group.disk_state_static = self.new_disk_state
|
11580 |
|
|
11581 |
|
if self.op.ipolicy:
|
11582 |
|
self.group.ipolicy = self.new_ipolicy
|
11583 |
|
|
11584 |
|
self.cfg.Update(self.group, feedback_fn)
|
11585 |
|
return result
|
11586 |
|
|
11587 |
|
|
11588 |
|
class LUGroupRemove(LogicalUnit):
|
11589 |
|
HPATH = "group-remove"
|
11590 |
|
HTYPE = constants.HTYPE_GROUP
|
11591 |
|
REQ_BGL = False
|
11592 |
|
|
11593 |
|
def ExpandNames(self):
|
11594 |
|
# This will raises errors.OpPrereqError on its own:
|
11595 |
|
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
|
11596 |
|
self.needed_locks = {
|
11597 |
|
locking.LEVEL_NODEGROUP: [self.group_uuid],
|
11598 |
|
}
|
11599 |
|
|
11600 |
|
def CheckPrereq(self):
|
11601 |
|
"""Check prerequisites.
|
11602 |
|
|
11603 |
|
This checks that the given group name exists as a node group, that is
|
11604 |
|
empty (i.e., contains no nodes), and that is not the last group of the
|
11605 |
|
cluster.
|
11606 |
|
|
11607 |
|
"""
|
11608 |
|
# Verify that the group is empty.
|
11609 |
|
group_nodes = [node.name
|
11610 |
|
for node in self.cfg.GetAllNodesInfo().values()
|
11611 |
|
if node.group == self.group_uuid]
|
11612 |
|
|
11613 |
|
if group_nodes:
|
11614 |
|
raise errors.OpPrereqError("Group '%s' not empty, has the following"
|
11615 |
|
" nodes: %s" %
|
11616 |
|
(self.op.group_name,
|
11617 |
|
utils.CommaJoin(utils.NiceSort(group_nodes))),
|
11618 |
|
errors.ECODE_STATE)
|
11619 |
|
|
11620 |
|
# Verify the cluster would not be left group-less.
|
11621 |
|
if len(self.cfg.GetNodeGroupList()) == 1:
|
11622 |
|
raise errors.OpPrereqError("Group '%s' is the only group, cannot be"
|
11623 |
|
" removed" % self.op.group_name,
|
11624 |
|
errors.ECODE_STATE)
|
11625 |
|
|
11626 |
|
def BuildHooksEnv(self):
|
11627 |
|
"""Build hooks env.
|
11628 |
|
|
11629 |
|
"""
|
11630 |
|
return {
|
11631 |
|
"GROUP_NAME": self.op.group_name,
|
11632 |
|
}
|
11633 |
|
|
11634 |
|
def BuildHooksNodes(self):
|
11635 |
|
"""Build hooks nodes.
|
11636 |
|
|
11637 |
|
"""
|
11638 |
|
mn = self.cfg.GetMasterNode()
|
11639 |
|
return ([mn], [mn])
|
11640 |
|
|
11641 |
|
def Exec(self, feedback_fn):
|
11642 |
|
"""Remove the node group.
|
11643 |
|
|
11644 |
|
"""
|
11645 |
|
try:
|
11646 |
|
self.cfg.RemoveNodeGroup(self.group_uuid)
|
11647 |
|
except errors.ConfigurationError:
|
11648 |
|
raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
|
11649 |
|
(self.op.group_name, self.group_uuid))
|
11650 |
|
|
11651 |
|
self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
|
11652 |
|
|
11653 |
|
|
11654 |
|
class LUGroupRename(LogicalUnit):
|
11655 |
|
HPATH = "group-rename"
|
11656 |
|
HTYPE = constants.HTYPE_GROUP
|
11657 |
|
REQ_BGL = False
|
11658 |
|
|
11659 |
|
def ExpandNames(self):
|
11660 |
|
# This raises errors.OpPrereqError on its own:
|
11661 |
|
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
|
11662 |
|
|
11663 |
|
self.needed_locks = {
|
11664 |
|
locking.LEVEL_NODEGROUP: [self.group_uuid],
|
11665 |
|
}
|
11666 |
|
|
11667 |
|
def CheckPrereq(self):
|
11668 |
|
"""Check prerequisites.
|
11669 |
|
|
11670 |
|
Ensures requested new name is not yet used.
|
11671 |
|
|
11672 |
|
"""
|
11673 |
|
try:
|
11674 |
|
new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
|
11675 |
|
except errors.OpPrereqError:
|
11676 |
|
pass
|
11677 |
|
else:
|
11678 |
|
raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
|
11679 |
|
" node group (UUID: %s)" %
|
11680 |
|
(self.op.new_name, new_name_uuid),
|
11681 |
|
errors.ECODE_EXISTS)
|
11682 |
|
|
11683 |
|
def BuildHooksEnv(self):
|
11684 |
|
"""Build hooks env.
|
11685 |
|
|
11686 |
|
"""
|
11687 |
|
return {
|
11688 |
|
"OLD_NAME": self.op.group_name,
|
11689 |
|
"NEW_NAME": self.op.new_name,
|
11690 |
|
}
|
11691 |
|
|
11692 |
|
def BuildHooksNodes(self):
|
11693 |
|
"""Build hooks nodes.
|
11694 |
|
|
11695 |
|
"""
|
11696 |
|
mn = self.cfg.GetMasterNode()
|
11697 |
|
|
11698 |
|
all_nodes = self.cfg.GetAllNodesInfo()
|
11699 |
|
all_nodes.pop(mn, None)
|
11700 |
|
|
11701 |
|
run_nodes = [mn]
|
11702 |
|
run_nodes.extend(node.name for node in all_nodes.values()
|
11703 |
|
if node.group == self.group_uuid)
|
11704 |
|
|
11705 |
|
return (run_nodes, run_nodes)
|
11706 |
|
|
11707 |
|
def Exec(self, feedback_fn):
|
11708 |
|
"""Rename the node group.
|
11709 |
|
|
11710 |
|
"""
|
11711 |
|
group = self.cfg.GetNodeGroup(self.group_uuid)
|
11712 |
|
|
11713 |
|
if group is None:
|
11714 |
|
raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
|
11715 |
|
(self.op.group_name, self.group_uuid))
|
11716 |
|
|
11717 |
|
group.name = self.op.new_name
|
11718 |
|
self.cfg.Update(group, feedback_fn)
|
11719 |
|
|
11720 |
|
return self.op.new_name
|
11721 |
|
|
11722 |
|
|
11723 |
|
class LUGroupEvacuate(LogicalUnit):
|
11724 |
|
HPATH = "group-evacuate"
|
11725 |
|
HTYPE = constants.HTYPE_GROUP
|
11726 |
|
REQ_BGL = False
|
11727 |
|
|
11728 |
|
def ExpandNames(self):
|
11729 |
|
# This raises errors.OpPrereqError on its own:
|
11730 |
|
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
|
11731 |
|
|
11732 |
|
if self.op.target_groups:
|
11733 |
|
self.req_target_uuids = map(self.cfg.LookupNodeGroup,
|
11734 |
|
self.op.target_groups)
|
11735 |
|
else:
|
11736 |
|
self.req_target_uuids = []
|
11737 |
|
|
11738 |
|
if self.group_uuid in self.req_target_uuids:
|
11739 |
|
raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
|
11740 |
|
" as a target group (targets are %s)" %
|
11741 |
|
(self.group_uuid,
|
11742 |
|
utils.CommaJoin(self.req_target_uuids)),
|
11743 |
|
errors.ECODE_INVAL)
|
11744 |
|
|
11745 |
|
self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
|
11746 |
|
|
11747 |
|
self.share_locks = _ShareAll()
|
11748 |
|
self.needed_locks = {
|
11749 |
|
locking.LEVEL_INSTANCE: [],
|
11750 |
|
locking.LEVEL_NODEGROUP: [],
|
11751 |
|
locking.LEVEL_NODE: [],
|
11752 |
|
}
|
11753 |
|
|
11754 |
|
def DeclareLocks(self, level):
|
11755 |
|
if level == locking.LEVEL_INSTANCE:
|
11756 |
|
assert not self.needed_locks[locking.LEVEL_INSTANCE]
|
11757 |
|
|
11758 |
|
# Lock instances optimistically, needs verification once node and group
|
11759 |
|
# locks have been acquired
|
11760 |
|
self.needed_locks[locking.LEVEL_INSTANCE] = \
|
11761 |
|
self.cfg.GetNodeGroupInstances(self.group_uuid)
|
11762 |
|
|
11763 |
|
elif level == locking.LEVEL_NODEGROUP:
|
11764 |
|
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
|
11765 |
|
|
11766 |
|
if self.req_target_uuids:
|
11767 |
|
lock_groups = set([self.group_uuid] + self.req_target_uuids)
|
11768 |
|
|
11769 |
|
# Lock all groups used by instances optimistically; this requires going
|
11770 |
|
# via the node before it's locked, requiring verification later on
|
11771 |
|
lock_groups.update(group_uuid
|
11772 |
|
for instance_name in
|
11773 |
|
self.owned_locks(locking.LEVEL_INSTANCE)
|
11774 |
|
for group_uuid in
|
11775 |
|
self.cfg.GetInstanceNodeGroups(instance_name))
|
11776 |
|
else:
|
11777 |
|
# No target groups, need to lock all of them
|
11778 |
|
lock_groups = locking.ALL_SET
|
11779 |
|
|
11780 |
|
self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
|
11781 |
|
|
11782 |
|
elif level == locking.LEVEL_NODE:
|
11783 |
|
# This will only lock the nodes in the group to be evacuated which
|
11784 |
|
# contain actual instances
|
11785 |
|
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
11786 |
|
self._LockInstancesNodes()
|
11787 |
|
|
11788 |
|
# Lock all nodes in group to be evacuated and target groups
|
11789 |
|
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
|
11790 |
|
assert self.group_uuid in owned_groups
|
11791 |
|
member_nodes = [node_name
|
11792 |
|
for group in owned_groups
|
11793 |
|
for node_name in self.cfg.GetNodeGroup(group).members]
|
11794 |
|
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
|
11795 |
|
|
11796 |
|
def CheckPrereq(self):
|
11797 |
|
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
|
11798 |
|
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
|
11799 |
|
owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
|
11800 |
|
|
11801 |
|
assert owned_groups.issuperset(self.req_target_uuids)
|
11802 |
|
assert self.group_uuid in owned_groups
|
11803 |
|
|
11804 |
|
# Check if locked instances are still correct
|
11805 |
|
_CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
|
11806 |
|
|
11807 |
|
# Get instance information
|
11808 |
|
self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
|
11809 |
|
|
11810 |
|
# Check if node groups for locked instances are still correct
|
11811 |
|
_CheckInstancesNodeGroups(self.cfg, self.instances,
|
11812 |
|
owned_groups, owned_nodes, self.group_uuid)
|
11813 |
|
|
11814 |
|
if self.req_target_uuids:
|
11815 |
|
# User requested specific target groups
|
11816 |
|
self.target_uuids = self.req_target_uuids
|
11817 |
|
else:
|
11818 |
|
# All groups except the one to be evacuated are potential targets
|
11819 |
|
self.target_uuids = [group_uuid for group_uuid in owned_groups
|
11820 |
|
if group_uuid != self.group_uuid]
|
11821 |
|
|
11822 |
|
if not self.target_uuids:
|
11823 |
|
raise errors.OpPrereqError("There are no possible target groups",
|
11824 |
|
errors.ECODE_INVAL)
|
11825 |
|
|
11826 |
|
def BuildHooksEnv(self):
|
11827 |
|
"""Build hooks env.
|
11828 |
|
|
11829 |
|
"""
|
11830 |
|
return {
|
11831 |
|
"GROUP_NAME": self.op.group_name,
|
11832 |
|
"TARGET_GROUPS": " ".join(self.target_uuids),
|
11833 |
|
}
|
11834 |
|
|
11835 |
|
def BuildHooksNodes(self):
|
11836 |
|
"""Build hooks nodes.
|
11837 |
|
|
11838 |
|
"""
|
11839 |
|
mn = self.cfg.GetMasterNode()
|
11840 |
|
|
11841 |
|
assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
|
11842 |
|
|
11843 |
|
run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
|
11844 |
|
|
11845 |
|
return (run_nodes, run_nodes)
|
11846 |
|
|
11847 |
|
def Exec(self, feedback_fn):
|
11848 |
|
instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
|
11849 |
|
|
11850 |
|
assert self.group_uuid not in self.target_uuids
|
11851 |
|
|
11852 |
|
req = iallocator.IAReqGroupChange(instances=instances,
|
11853 |
|
target_groups=self.target_uuids)
|
11854 |
|
ial = iallocator.IAllocator(self.cfg, self.rpc, req)
|
11855 |
|
|
11856 |
|
ial.Run(self.op.iallocator)
|
11857 |
|
|
11858 |
|
if not ial.success:
|
11859 |
|
raise errors.OpPrereqError("Can't compute group evacuation using"
|
11860 |
|
" iallocator '%s': %s" %
|
11861 |
|
(self.op.iallocator, ial.info),
|
11862 |
|
errors.ECODE_NORES)
|
11863 |
|
|
11864 |
|
jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
|
11865 |
|
|
11866 |
|
self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
|
11867 |
|
len(jobs), self.op.group_name)
|
11868 |
|
|
11869 |
|
return ResultWithJobs(jobs)
|
11870 |
|
|
11871 |
|
|
11872 |
10829 |
class LURestrictedCommand(NoHooksLU):
|
11873 |
10830 |
"""Logical unit for executing restricted commands.
|
11874 |
10831 |
|