root / lib / cmdlib / group.py @ 702243ec
History | View | Annotate | Download (34.4 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Logical units dealing with node groups."""
|
23 |
|
24 |
import itertools |
25 |
import logging |
26 |
|
27 |
from ganeti import constants |
28 |
from ganeti import errors |
29 |
from ganeti import locking |
30 |
from ganeti import objects |
31 |
from ganeti import qlang |
32 |
from ganeti import query |
33 |
from ganeti import utils |
34 |
from ganeti.masterd import iallocator |
35 |
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase, \ |
36 |
ResultWithJobs
|
37 |
from ganeti.cmdlib.common import MergeAndVerifyHvState, \ |
38 |
MergeAndVerifyDiskState, GetWantedNodes, GetUpdatedParams, \ |
39 |
CheckNodeGroupInstances, GetUpdatedIPolicy, \ |
40 |
ComputeNewInstanceViolations, GetDefaultIAllocator, ShareAll, \ |
41 |
CheckInstancesNodeGroups, LoadNodeEvacResult, MapInstanceLvsToNodes, \ |
42 |
CheckIpolicyVsDiskTemplates
|
43 |
|
44 |
import ganeti.masterd.instance |
45 |
|
46 |
|
47 |
class LUGroupAdd(LogicalUnit): |
48 |
"""Logical unit for creating node groups.
|
49 |
|
50 |
"""
|
51 |
HPATH = "group-add"
|
52 |
HTYPE = constants.HTYPE_GROUP |
53 |
REQ_BGL = False
|
54 |
|
55 |
def ExpandNames(self): |
56 |
# We need the new group's UUID here so that we can create and acquire the
|
57 |
# corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
|
58 |
# that it should not check whether the UUID exists in the configuration.
|
59 |
self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId()) |
60 |
self.needed_locks = {}
|
61 |
self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid |
62 |
|
63 |
def _CheckIpolicy(self): |
64 |
"""Checks the group's ipolicy for consistency and validity.
|
65 |
|
66 |
"""
|
67 |
if self.op.ipolicy: |
68 |
cluster = self.cfg.GetClusterInfo()
|
69 |
full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
|
70 |
try:
|
71 |
objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
|
72 |
except errors.ConfigurationError, err:
|
73 |
raise errors.OpPrereqError("Invalid instance policy: %s" % err, |
74 |
errors.ECODE_INVAL) |
75 |
CheckIpolicyVsDiskTemplates(full_ipolicy, |
76 |
cluster.enabled_disk_templates) |
77 |
|
78 |
def CheckPrereq(self): |
79 |
"""Check prerequisites.
|
80 |
|
81 |
This checks that the given group name is not an existing node group
|
82 |
already.
|
83 |
|
84 |
"""
|
85 |
try:
|
86 |
existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
87 |
except errors.OpPrereqError:
|
88 |
pass
|
89 |
else:
|
90 |
raise errors.OpPrereqError("Desired group name '%s' already exists as a" |
91 |
" node group (UUID: %s)" %
|
92 |
(self.op.group_name, existing_uuid),
|
93 |
errors.ECODE_EXISTS) |
94 |
|
95 |
if self.op.ndparams: |
96 |
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
|
97 |
|
98 |
if self.op.hv_state: |
99 |
self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, None) |
100 |
else:
|
101 |
self.new_hv_state = None |
102 |
|
103 |
if self.op.disk_state: |
104 |
self.new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, None) |
105 |
else:
|
106 |
self.new_disk_state = None |
107 |
|
108 |
if self.op.diskparams: |
109 |
for templ in constants.DISK_TEMPLATES: |
110 |
if templ in self.op.diskparams: |
111 |
utils.ForceDictType(self.op.diskparams[templ],
|
112 |
constants.DISK_DT_TYPES) |
113 |
self.new_diskparams = self.op.diskparams |
114 |
try:
|
115 |
utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
|
116 |
except errors.OpPrereqError, err:
|
117 |
raise errors.OpPrereqError("While verify diskparams options: %s" % err, |
118 |
errors.ECODE_INVAL) |
119 |
else:
|
120 |
self.new_diskparams = {}
|
121 |
|
122 |
self._CheckIpolicy()
|
123 |
|
124 |
def BuildHooksEnv(self): |
125 |
"""Build hooks env.
|
126 |
|
127 |
"""
|
128 |
return {
|
129 |
"GROUP_NAME": self.op.group_name, |
130 |
} |
131 |
|
132 |
def BuildHooksNodes(self): |
133 |
"""Build hooks nodes.
|
134 |
|
135 |
"""
|
136 |
mn = self.cfg.GetMasterNode()
|
137 |
return ([mn], [mn])
|
138 |
|
139 |
def Exec(self, feedback_fn): |
140 |
"""Add the node group to the cluster.
|
141 |
|
142 |
"""
|
143 |
group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
|
144 |
uuid=self.group_uuid,
|
145 |
alloc_policy=self.op.alloc_policy,
|
146 |
ndparams=self.op.ndparams,
|
147 |
diskparams=self.new_diskparams,
|
148 |
ipolicy=self.op.ipolicy,
|
149 |
hv_state_static=self.new_hv_state,
|
150 |
disk_state_static=self.new_disk_state)
|
151 |
|
152 |
self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False) |
153 |
del self.remove_locks[locking.LEVEL_NODEGROUP] |
154 |
|
155 |
|
156 |
class LUGroupAssignNodes(NoHooksLU): |
157 |
"""Logical unit for assigning nodes to groups.
|
158 |
|
159 |
"""
|
160 |
REQ_BGL = False
|
161 |
|
162 |
def ExpandNames(self): |
163 |
# These raise errors.OpPrereqError on their own:
|
164 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
165 |
(self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes) |
166 |
|
167 |
# We want to lock all the affected nodes and groups. We have readily
|
168 |
# available the list of nodes, and the *destination* group. To gather the
|
169 |
# list of "source" groups, we need to fetch node information later on.
|
170 |
self.needed_locks = {
|
171 |
locking.LEVEL_NODEGROUP: set([self.group_uuid]), |
172 |
locking.LEVEL_NODE: self.op.node_uuids,
|
173 |
} |
174 |
|
175 |
def DeclareLocks(self, level): |
176 |
if level == locking.LEVEL_NODEGROUP:
|
177 |
assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1 |
178 |
|
179 |
# Try to get all affected nodes' groups without having the group or node
|
180 |
# lock yet. Needs verification later in the code flow.
|
181 |
groups = self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids) |
182 |
|
183 |
self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
|
184 |
|
185 |
def CheckPrereq(self): |
186 |
"""Check prerequisites.
|
187 |
|
188 |
"""
|
189 |
assert self.needed_locks[locking.LEVEL_NODEGROUP] |
190 |
assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) == |
191 |
frozenset(self.op.node_uuids)) |
192 |
|
193 |
expected_locks = (set([self.group_uuid]) | |
194 |
self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids)) |
195 |
actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
|
196 |
if actual_locks != expected_locks:
|
197 |
raise errors.OpExecError("Nodes changed groups since locks were acquired," |
198 |
" current groups are '%s', used to be '%s'" %
|
199 |
(utils.CommaJoin(expected_locks), |
200 |
utils.CommaJoin(actual_locks))) |
201 |
|
202 |
self.node_data = self.cfg.GetAllNodesInfo() |
203 |
self.group = self.cfg.GetNodeGroup(self.group_uuid) |
204 |
instance_data = self.cfg.GetAllInstancesInfo()
|
205 |
|
206 |
if self.group is None: |
207 |
raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" % |
208 |
(self.op.group_name, self.group_uuid)) |
209 |
|
210 |
(new_splits, previous_splits) = \ |
211 |
self.CheckAssignmentForSplitInstances([(uuid, self.group_uuid) |
212 |
for uuid in self.op.node_uuids], |
213 |
self.node_data, instance_data)
|
214 |
|
215 |
if new_splits:
|
216 |
fmt_new_splits = utils.CommaJoin(utils.NiceSort( |
217 |
self.cfg.GetInstanceNames(new_splits)))
|
218 |
|
219 |
if not self.op.force: |
220 |
raise errors.OpExecError("The following instances get split by this" |
221 |
" change and --force was not given: %s" %
|
222 |
fmt_new_splits) |
223 |
else:
|
224 |
self.LogWarning("This operation will split the following instances: %s", |
225 |
fmt_new_splits) |
226 |
|
227 |
if previous_splits:
|
228 |
self.LogWarning("In addition, these already-split instances continue" |
229 |
" to be split across groups: %s",
|
230 |
utils.CommaJoin(utils.NiceSort( |
231 |
self.cfg.GetInstanceNames(previous_splits))))
|
232 |
|
233 |
def Exec(self, feedback_fn): |
234 |
"""Assign nodes to a new group.
|
235 |
|
236 |
"""
|
237 |
mods = [(node_uuid, self.group_uuid) for node_uuid in self.op.node_uuids] |
238 |
|
239 |
self.cfg.AssignGroupNodes(mods)
|
240 |
|
241 |
@staticmethod
|
242 |
def CheckAssignmentForSplitInstances(changes, node_data, instance_data): |
243 |
"""Check for split instances after a node assignment.
|
244 |
|
245 |
This method considers a series of node assignments as an atomic operation,
|
246 |
and returns information about split instances after applying the set of
|
247 |
changes.
|
248 |
|
249 |
In particular, it returns information about newly split instances, and
|
250 |
instances that were already split, and remain so after the change.
|
251 |
|
252 |
Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
|
253 |
considered.
|
254 |
|
255 |
@type changes: list of (node_uuid, new_group_uuid) pairs.
|
256 |
@param changes: list of node assignments to consider.
|
257 |
@param node_data: a dict with data for all nodes
|
258 |
@param instance_data: a dict with all instances to consider
|
259 |
@rtype: a two-tuple
|
260 |
@return: a list of instances that were previously okay and result split as a
|
261 |
consequence of this change, and a list of instances that were previously
|
262 |
split and this change does not fix.
|
263 |
|
264 |
"""
|
265 |
changed_nodes = dict((uuid, group) for uuid, group in changes |
266 |
if node_data[uuid].group != group)
|
267 |
|
268 |
all_split_instances = set()
|
269 |
previously_split_instances = set()
|
270 |
|
271 |
for inst in instance_data.values(): |
272 |
if inst.disk_template not in constants.DTS_INT_MIRROR: |
273 |
continue
|
274 |
|
275 |
if len(set(node_data[node_uuid].group |
276 |
for node_uuid in inst.all_nodes)) > 1: |
277 |
previously_split_instances.add(inst.uuid) |
278 |
|
279 |
if len(set(changed_nodes.get(node_uuid, node_data[node_uuid].group) |
280 |
for node_uuid in inst.all_nodes)) > 1: |
281 |
all_split_instances.add(inst.uuid) |
282 |
|
283 |
return (list(all_split_instances - previously_split_instances), |
284 |
list(previously_split_instances & all_split_instances))
|
285 |
|
286 |
|
287 |
class GroupQuery(QueryBase): |
288 |
FIELDS = query.GROUP_FIELDS |
289 |
|
290 |
def ExpandNames(self, lu): |
291 |
lu.needed_locks = {} |
292 |
|
293 |
self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
|
294 |
self._cluster = lu.cfg.GetClusterInfo()
|
295 |
name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values()) |
296 |
|
297 |
if not self.names: |
298 |
self.wanted = [name_to_uuid[name]
|
299 |
for name in utils.NiceSort(name_to_uuid.keys())] |
300 |
else:
|
301 |
# Accept names to be either names or UUIDs.
|
302 |
missing = [] |
303 |
self.wanted = []
|
304 |
all_uuid = frozenset(self._all_groups.keys()) |
305 |
|
306 |
for name in self.names: |
307 |
if name in all_uuid: |
308 |
self.wanted.append(name)
|
309 |
elif name in name_to_uuid: |
310 |
self.wanted.append(name_to_uuid[name])
|
311 |
else:
|
312 |
missing.append(name) |
313 |
|
314 |
if missing:
|
315 |
raise errors.OpPrereqError("Some groups do not exist: %s" % |
316 |
utils.CommaJoin(missing), |
317 |
errors.ECODE_NOENT) |
318 |
|
319 |
def DeclareLocks(self, lu, level): |
320 |
pass
|
321 |
|
322 |
def _GetQueryData(self, lu): |
323 |
"""Computes the list of node groups and their attributes.
|
324 |
|
325 |
"""
|
326 |
do_nodes = query.GQ_NODE in self.requested_data |
327 |
do_instances = query.GQ_INST in self.requested_data |
328 |
|
329 |
group_to_nodes = None
|
330 |
group_to_instances = None
|
331 |
|
332 |
# For GQ_NODE, we need to map group->[nodes], and group->[instances] for
|
333 |
# GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
|
334 |
# latter GetAllInstancesInfo() is not enough, for we have to go through
|
335 |
# instance->node. Hence, we will need to process nodes even if we only need
|
336 |
# instance information.
|
337 |
if do_nodes or do_instances: |
338 |
all_nodes = lu.cfg.GetAllNodesInfo() |
339 |
group_to_nodes = dict((uuid, []) for uuid in self.wanted) |
340 |
node_to_group = {} |
341 |
|
342 |
for node in all_nodes.values(): |
343 |
if node.group in group_to_nodes: |
344 |
group_to_nodes[node.group].append(node.uuid) |
345 |
node_to_group[node.uuid] = node.group |
346 |
|
347 |
if do_instances:
|
348 |
all_instances = lu.cfg.GetAllInstancesInfo() |
349 |
group_to_instances = dict((uuid, []) for uuid in self.wanted) |
350 |
|
351 |
for instance in all_instances.values(): |
352 |
node = instance.primary_node |
353 |
if node in node_to_group: |
354 |
group_to_instances[node_to_group[node]].append(instance.uuid) |
355 |
|
356 |
if not do_nodes: |
357 |
# Do not pass on node information if it was not requested.
|
358 |
group_to_nodes = None
|
359 |
|
360 |
return query.GroupQueryData(self._cluster, |
361 |
[self._all_groups[uuid]
|
362 |
for uuid in self.wanted], |
363 |
group_to_nodes, group_to_instances, |
364 |
query.GQ_DISKPARAMS in self.requested_data) |
365 |
|
366 |
|
367 |
class LUGroupQuery(NoHooksLU): |
368 |
"""Logical unit for querying node groups.
|
369 |
|
370 |
"""
|
371 |
REQ_BGL = False
|
372 |
|
373 |
def CheckArguments(self): |
374 |
self.gq = GroupQuery(qlang.MakeSimpleFilter("name", self.op.names), |
375 |
self.op.output_fields, False) |
376 |
|
377 |
def ExpandNames(self): |
378 |
self.gq.ExpandNames(self) |
379 |
|
380 |
def DeclareLocks(self, level): |
381 |
self.gq.DeclareLocks(self, level) |
382 |
|
383 |
def Exec(self, feedback_fn): |
384 |
return self.gq.OldStyleQuery(self) |
385 |
|
386 |
|
387 |
class LUGroupSetParams(LogicalUnit): |
388 |
"""Modifies the parameters of a node group.
|
389 |
|
390 |
"""
|
391 |
HPATH = "group-modify"
|
392 |
HTYPE = constants.HTYPE_GROUP |
393 |
REQ_BGL = False
|
394 |
|
395 |
def CheckArguments(self): |
396 |
all_changes = [ |
397 |
self.op.ndparams,
|
398 |
self.op.diskparams,
|
399 |
self.op.alloc_policy,
|
400 |
self.op.hv_state,
|
401 |
self.op.disk_state,
|
402 |
self.op.ipolicy,
|
403 |
] |
404 |
|
405 |
if all_changes.count(None) == len(all_changes): |
406 |
raise errors.OpPrereqError("Please pass at least one modification", |
407 |
errors.ECODE_INVAL) |
408 |
|
409 |
def ExpandNames(self): |
410 |
# This raises errors.OpPrereqError on its own:
|
411 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
412 |
|
413 |
self.needed_locks = {
|
414 |
locking.LEVEL_INSTANCE: [], |
415 |
locking.LEVEL_NODEGROUP: [self.group_uuid],
|
416 |
} |
417 |
|
418 |
self.share_locks[locking.LEVEL_INSTANCE] = 1 |
419 |
|
420 |
def DeclareLocks(self, level): |
421 |
if level == locking.LEVEL_INSTANCE:
|
422 |
assert not self.needed_locks[locking.LEVEL_INSTANCE] |
423 |
|
424 |
# Lock instances optimistically, needs verification once group lock has
|
425 |
# been acquired
|
426 |
self.needed_locks[locking.LEVEL_INSTANCE] = \
|
427 |
self.cfg.GetInstanceNames(
|
428 |
self.cfg.GetNodeGroupInstances(self.group_uuid)) |
429 |
|
430 |
@staticmethod
|
431 |
def _UpdateAndVerifyDiskParams(old, new): |
432 |
"""Updates and verifies disk parameters.
|
433 |
|
434 |
"""
|
435 |
new_params = GetUpdatedParams(old, new) |
436 |
utils.ForceDictType(new_params, constants.DISK_DT_TYPES) |
437 |
return new_params
|
438 |
|
439 |
def _CheckIpolicy(self, cluster, owned_instance_names): |
440 |
"""Sanity checks for the ipolicy.
|
441 |
|
442 |
@type cluster: C{objects.Cluster}
|
443 |
@param cluster: the cluster's configuration
|
444 |
@type owned_instance_names: list of string
|
445 |
@param owned_instance_names: list of instances
|
446 |
|
447 |
"""
|
448 |
if self.op.ipolicy: |
449 |
self.new_ipolicy = GetUpdatedIPolicy(self.group.ipolicy, |
450 |
self.op.ipolicy,
|
451 |
group_policy=True)
|
452 |
|
453 |
new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
|
454 |
CheckIpolicyVsDiskTemplates(new_ipolicy, |
455 |
cluster.enabled_disk_templates) |
456 |
instances = self.cfg.GetMultiInstanceInfoByName(owned_instance_names)
|
457 |
gmi = ganeti.masterd.instance |
458 |
violations = \ |
459 |
ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster, |
460 |
self.group),
|
461 |
new_ipolicy, instances, self.cfg)
|
462 |
|
463 |
if violations:
|
464 |
self.LogWarning("After the ipolicy change the following instances" |
465 |
" violate them: %s",
|
466 |
utils.CommaJoin(violations)) |
467 |
|
468 |
def CheckPrereq(self): |
469 |
"""Check prerequisites.
|
470 |
|
471 |
"""
|
472 |
owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) |
473 |
|
474 |
# Check if locked instances are still correct
|
475 |
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names) |
476 |
|
477 |
self.group = self.cfg.GetNodeGroup(self.group_uuid) |
478 |
cluster = self.cfg.GetClusterInfo()
|
479 |
|
480 |
if self.group is None: |
481 |
raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" % |
482 |
(self.op.group_name, self.group_uuid)) |
483 |
|
484 |
if self.op.ndparams: |
485 |
new_ndparams = GetUpdatedParams(self.group.ndparams, self.op.ndparams) |
486 |
utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES) |
487 |
self.new_ndparams = new_ndparams
|
488 |
|
489 |
if self.op.diskparams: |
490 |
diskparams = self.group.diskparams
|
491 |
uavdp = self._UpdateAndVerifyDiskParams
|
492 |
# For each disktemplate subdict update and verify the values
|
493 |
new_diskparams = dict((dt,
|
494 |
uavdp(diskparams.get(dt, {}), |
495 |
self.op.diskparams[dt]))
|
496 |
for dt in constants.DISK_TEMPLATES |
497 |
if dt in self.op.diskparams) |
498 |
# As we've all subdicts of diskparams ready, lets merge the actual
|
499 |
# dict with all updated subdicts
|
500 |
self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
|
501 |
try:
|
502 |
utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
|
503 |
except errors.OpPrereqError, err:
|
504 |
raise errors.OpPrereqError("While verify diskparams options: %s" % err, |
505 |
errors.ECODE_INVAL) |
506 |
|
507 |
if self.op.hv_state: |
508 |
self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, |
509 |
self.group.hv_state_static)
|
510 |
|
511 |
if self.op.disk_state: |
512 |
self.new_disk_state = \
|
513 |
MergeAndVerifyDiskState(self.op.disk_state,
|
514 |
self.group.disk_state_static)
|
515 |
|
516 |
self._CheckIpolicy(cluster, owned_instance_names)
|
517 |
|
518 |
def BuildHooksEnv(self): |
519 |
"""Build hooks env.
|
520 |
|
521 |
"""
|
522 |
return {
|
523 |
"GROUP_NAME": self.op.group_name, |
524 |
"NEW_ALLOC_POLICY": self.op.alloc_policy, |
525 |
} |
526 |
|
527 |
def BuildHooksNodes(self): |
528 |
"""Build hooks nodes.
|
529 |
|
530 |
"""
|
531 |
mn = self.cfg.GetMasterNode()
|
532 |
return ([mn], [mn])
|
533 |
|
534 |
def Exec(self, feedback_fn): |
535 |
"""Modifies the node group.
|
536 |
|
537 |
"""
|
538 |
result = [] |
539 |
|
540 |
if self.op.ndparams: |
541 |
self.group.ndparams = self.new_ndparams |
542 |
result.append(("ndparams", str(self.group.ndparams))) |
543 |
|
544 |
if self.op.diskparams: |
545 |
self.group.diskparams = self.new_diskparams |
546 |
result.append(("diskparams", str(self.group.diskparams))) |
547 |
|
548 |
if self.op.alloc_policy: |
549 |
self.group.alloc_policy = self.op.alloc_policy |
550 |
|
551 |
if self.op.hv_state: |
552 |
self.group.hv_state_static = self.new_hv_state |
553 |
|
554 |
if self.op.disk_state: |
555 |
self.group.disk_state_static = self.new_disk_state |
556 |
|
557 |
if self.op.ipolicy: |
558 |
self.group.ipolicy = self.new_ipolicy |
559 |
|
560 |
self.cfg.Update(self.group, feedback_fn) |
561 |
return result
|
562 |
|
563 |
|
564 |
class LUGroupRemove(LogicalUnit): |
565 |
HPATH = "group-remove"
|
566 |
HTYPE = constants.HTYPE_GROUP |
567 |
REQ_BGL = False
|
568 |
|
569 |
def ExpandNames(self): |
570 |
# This will raises errors.OpPrereqError on its own:
|
571 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
572 |
self.needed_locks = {
|
573 |
locking.LEVEL_NODEGROUP: [self.group_uuid],
|
574 |
} |
575 |
|
576 |
def CheckPrereq(self): |
577 |
"""Check prerequisites.
|
578 |
|
579 |
This checks that the given group name exists as a node group, that is
|
580 |
empty (i.e., contains no nodes), and that is not the last group of the
|
581 |
cluster.
|
582 |
|
583 |
"""
|
584 |
# Verify that the group is empty.
|
585 |
group_nodes = [node.uuid |
586 |
for node in self.cfg.GetAllNodesInfo().values() |
587 |
if node.group == self.group_uuid] |
588 |
|
589 |
if group_nodes:
|
590 |
raise errors.OpPrereqError("Group '%s' not empty, has the following" |
591 |
" nodes: %s" %
|
592 |
(self.op.group_name,
|
593 |
utils.CommaJoin(utils.NiceSort(group_nodes))), |
594 |
errors.ECODE_STATE) |
595 |
|
596 |
# Verify the cluster would not be left group-less.
|
597 |
if len(self.cfg.GetNodeGroupList()) == 1: |
598 |
raise errors.OpPrereqError("Group '%s' is the only group, cannot be" |
599 |
" removed" % self.op.group_name, |
600 |
errors.ECODE_STATE) |
601 |
|
602 |
def BuildHooksEnv(self): |
603 |
"""Build hooks env.
|
604 |
|
605 |
"""
|
606 |
return {
|
607 |
"GROUP_NAME": self.op.group_name, |
608 |
} |
609 |
|
610 |
def BuildHooksNodes(self): |
611 |
"""Build hooks nodes.
|
612 |
|
613 |
"""
|
614 |
mn = self.cfg.GetMasterNode()
|
615 |
return ([mn], [mn])
|
616 |
|
617 |
def Exec(self, feedback_fn): |
618 |
"""Remove the node group.
|
619 |
|
620 |
"""
|
621 |
try:
|
622 |
self.cfg.RemoveNodeGroup(self.group_uuid) |
623 |
except errors.ConfigurationError:
|
624 |
raise errors.OpExecError("Group '%s' with UUID %s disappeared" % |
625 |
(self.op.group_name, self.group_uuid)) |
626 |
|
627 |
self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid |
628 |
|
629 |
|
630 |
class LUGroupRename(LogicalUnit): |
631 |
HPATH = "group-rename"
|
632 |
HTYPE = constants.HTYPE_GROUP |
633 |
REQ_BGL = False
|
634 |
|
635 |
def ExpandNames(self): |
636 |
# This raises errors.OpPrereqError on its own:
|
637 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
638 |
|
639 |
self.needed_locks = {
|
640 |
locking.LEVEL_NODEGROUP: [self.group_uuid],
|
641 |
} |
642 |
|
643 |
def CheckPrereq(self): |
644 |
"""Check prerequisites.
|
645 |
|
646 |
Ensures requested new name is not yet used.
|
647 |
|
648 |
"""
|
649 |
try:
|
650 |
new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name) |
651 |
except errors.OpPrereqError:
|
652 |
pass
|
653 |
else:
|
654 |
raise errors.OpPrereqError("Desired new name '%s' clashes with existing" |
655 |
" node group (UUID: %s)" %
|
656 |
(self.op.new_name, new_name_uuid),
|
657 |
errors.ECODE_EXISTS) |
658 |
|
659 |
def BuildHooksEnv(self): |
660 |
"""Build hooks env.
|
661 |
|
662 |
"""
|
663 |
return {
|
664 |
"OLD_NAME": self.op.group_name, |
665 |
"NEW_NAME": self.op.new_name, |
666 |
} |
667 |
|
668 |
def BuildHooksNodes(self): |
669 |
"""Build hooks nodes.
|
670 |
|
671 |
"""
|
672 |
mn = self.cfg.GetMasterNode()
|
673 |
|
674 |
all_nodes = self.cfg.GetAllNodesInfo()
|
675 |
all_nodes.pop(mn, None)
|
676 |
|
677 |
run_nodes = [mn] |
678 |
run_nodes.extend(node.uuid for node in all_nodes.values() |
679 |
if node.group == self.group_uuid) |
680 |
|
681 |
return (run_nodes, run_nodes)
|
682 |
|
683 |
def Exec(self, feedback_fn): |
684 |
"""Rename the node group.
|
685 |
|
686 |
"""
|
687 |
group = self.cfg.GetNodeGroup(self.group_uuid) |
688 |
|
689 |
if group is None: |
690 |
raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" % |
691 |
(self.op.group_name, self.group_uuid)) |
692 |
|
693 |
group.name = self.op.new_name
|
694 |
self.cfg.Update(group, feedback_fn)
|
695 |
|
696 |
return self.op.new_name |
697 |
|
698 |
|
699 |
class LUGroupEvacuate(LogicalUnit): |
700 |
HPATH = "group-evacuate"
|
701 |
HTYPE = constants.HTYPE_GROUP |
702 |
REQ_BGL = False
|
703 |
|
704 |
def ExpandNames(self): |
705 |
# This raises errors.OpPrereqError on its own:
|
706 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
707 |
|
708 |
if self.op.target_groups: |
709 |
self.req_target_uuids = map(self.cfg.LookupNodeGroup, |
710 |
self.op.target_groups)
|
711 |
else:
|
712 |
self.req_target_uuids = []
|
713 |
|
714 |
if self.group_uuid in self.req_target_uuids: |
715 |
raise errors.OpPrereqError("Group to be evacuated (%s) can not be used" |
716 |
" as a target group (targets are %s)" %
|
717 |
(self.group_uuid,
|
718 |
utils.CommaJoin(self.req_target_uuids)),
|
719 |
errors.ECODE_INVAL) |
720 |
|
721 |
self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator) |
722 |
|
723 |
self.share_locks = ShareAll()
|
724 |
self.needed_locks = {
|
725 |
locking.LEVEL_INSTANCE: [], |
726 |
locking.LEVEL_NODEGROUP: [], |
727 |
locking.LEVEL_NODE: [], |
728 |
} |
729 |
|
730 |
def DeclareLocks(self, level): |
731 |
if level == locking.LEVEL_INSTANCE:
|
732 |
assert not self.needed_locks[locking.LEVEL_INSTANCE] |
733 |
|
734 |
# Lock instances optimistically, needs verification once node and group
|
735 |
# locks have been acquired
|
736 |
self.needed_locks[locking.LEVEL_INSTANCE] = \
|
737 |
self.cfg.GetInstanceNames(
|
738 |
self.cfg.GetNodeGroupInstances(self.group_uuid)) |
739 |
|
740 |
elif level == locking.LEVEL_NODEGROUP:
|
741 |
assert not self.needed_locks[locking.LEVEL_NODEGROUP] |
742 |
|
743 |
if self.req_target_uuids: |
744 |
lock_groups = set([self.group_uuid] + self.req_target_uuids) |
745 |
|
746 |
# Lock all groups used by instances optimistically; this requires going
|
747 |
# via the node before it's locked, requiring verification later on
|
748 |
lock_groups.update(group_uuid |
749 |
for instance_name in |
750 |
self.owned_locks(locking.LEVEL_INSTANCE)
|
751 |
for group_uuid in |
752 |
self.cfg.GetInstanceNodeGroups(
|
753 |
self.cfg.GetInstanceInfoByName(instance_name)
|
754 |
.uuid)) |
755 |
else:
|
756 |
# No target groups, need to lock all of them
|
757 |
lock_groups = locking.ALL_SET |
758 |
|
759 |
self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
|
760 |
|
761 |
elif level == locking.LEVEL_NODE:
|
762 |
# This will only lock the nodes in the group to be evacuated which
|
763 |
# contain actual instances
|
764 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
765 |
self._LockInstancesNodes()
|
766 |
|
767 |
# Lock all nodes in group to be evacuated and target groups
|
768 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) |
769 |
assert self.group_uuid in owned_groups |
770 |
member_node_uuids = [node_uuid |
771 |
for group in owned_groups |
772 |
for node_uuid in |
773 |
self.cfg.GetNodeGroup(group).members]
|
774 |
self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
|
775 |
|
776 |
def CheckPrereq(self): |
777 |
owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) |
778 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) |
779 |
owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE)) |
780 |
|
781 |
assert owned_groups.issuperset(self.req_target_uuids) |
782 |
assert self.group_uuid in owned_groups |
783 |
|
784 |
# Check if locked instances are still correct
|
785 |
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names) |
786 |
|
787 |
# Get instance information
|
788 |
self.instances = \
|
789 |
dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names)) |
790 |
|
791 |
# Check if node groups for locked instances are still correct
|
792 |
CheckInstancesNodeGroups(self.cfg, self.instances, |
793 |
owned_groups, owned_node_uuids, self.group_uuid)
|
794 |
|
795 |
if self.req_target_uuids: |
796 |
# User requested specific target groups
|
797 |
self.target_uuids = self.req_target_uuids |
798 |
else:
|
799 |
# All groups except the one to be evacuated are potential targets
|
800 |
self.target_uuids = [group_uuid for group_uuid in owned_groups |
801 |
if group_uuid != self.group_uuid] |
802 |
|
803 |
if not self.target_uuids: |
804 |
raise errors.OpPrereqError("There are no possible target groups", |
805 |
errors.ECODE_INVAL) |
806 |
|
807 |
def BuildHooksEnv(self): |
808 |
"""Build hooks env.
|
809 |
|
810 |
"""
|
811 |
return {
|
812 |
"GROUP_NAME": self.op.group_name, |
813 |
"TARGET_GROUPS": " ".join(self.target_uuids), |
814 |
} |
815 |
|
816 |
def BuildHooksNodes(self): |
817 |
"""Build hooks nodes.
|
818 |
|
819 |
"""
|
820 |
mn = self.cfg.GetMasterNode()
|
821 |
|
822 |
assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP) |
823 |
|
824 |
run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members |
825 |
|
826 |
return (run_nodes, run_nodes)
|
827 |
|
828 |
def Exec(self, feedback_fn): |
829 |
inst_names = list(self.owned_locks(locking.LEVEL_INSTANCE)) |
830 |
|
831 |
assert self.group_uuid not in self.target_uuids |
832 |
|
833 |
req = iallocator.IAReqGroupChange(instances=inst_names, |
834 |
target_groups=self.target_uuids)
|
835 |
ial = iallocator.IAllocator(self.cfg, self.rpc, req) |
836 |
|
837 |
ial.Run(self.op.iallocator)
|
838 |
|
839 |
if not ial.success: |
840 |
raise errors.OpPrereqError("Can't compute group evacuation using" |
841 |
" iallocator '%s': %s" %
|
842 |
(self.op.iallocator, ial.info),
|
843 |
errors.ECODE_NORES) |
844 |
|
845 |
jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False) |
846 |
|
847 |
self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s", |
848 |
len(jobs), self.op.group_name) |
849 |
|
850 |
return ResultWithJobs(jobs)
|
851 |
|
852 |
|
853 |
class LUGroupVerifyDisks(NoHooksLU): |
854 |
"""Verifies the status of all disks in a node group.
|
855 |
|
856 |
"""
|
857 |
REQ_BGL = False
|
858 |
|
859 |
def ExpandNames(self): |
860 |
# Raises errors.OpPrereqError on its own if group can't be found
|
861 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
862 |
|
863 |
self.share_locks = ShareAll()
|
864 |
self.needed_locks = {
|
865 |
locking.LEVEL_INSTANCE: [], |
866 |
locking.LEVEL_NODEGROUP: [], |
867 |
locking.LEVEL_NODE: [], |
868 |
|
869 |
# This opcode is acquires all node locks in a group. LUClusterVerifyDisks
|
870 |
# starts one instance of this opcode for every group, which means all
|
871 |
# nodes will be locked for a short amount of time, so it's better to
|
872 |
# acquire the node allocation lock as well.
|
873 |
locking.LEVEL_NODE_ALLOC: locking.ALL_SET, |
874 |
} |
875 |
|
876 |
def DeclareLocks(self, level): |
877 |
if level == locking.LEVEL_INSTANCE:
|
878 |
assert not self.needed_locks[locking.LEVEL_INSTANCE] |
879 |
|
880 |
# Lock instances optimistically, needs verification once node and group
|
881 |
# locks have been acquired
|
882 |
self.needed_locks[locking.LEVEL_INSTANCE] = \
|
883 |
self.cfg.GetInstanceNames(
|
884 |
self.cfg.GetNodeGroupInstances(self.group_uuid)) |
885 |
|
886 |
elif level == locking.LEVEL_NODEGROUP:
|
887 |
assert not self.needed_locks[locking.LEVEL_NODEGROUP] |
888 |
|
889 |
self.needed_locks[locking.LEVEL_NODEGROUP] = \
|
890 |
set([self.group_uuid] + |
891 |
# Lock all groups used by instances optimistically; this requires
|
892 |
# going via the node before it's locked, requiring verification
|
893 |
# later on
|
894 |
[group_uuid |
895 |
for instance_name in self.owned_locks(locking.LEVEL_INSTANCE) |
896 |
for group_uuid in |
897 |
self.cfg.GetInstanceNodeGroups(
|
898 |
self.cfg.GetInstanceInfoByName(instance_name).uuid)])
|
899 |
|
900 |
elif level == locking.LEVEL_NODE:
|
901 |
# This will only lock the nodes in the group to be verified which contain
|
902 |
# actual instances
|
903 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
904 |
self._LockInstancesNodes()
|
905 |
|
906 |
# Lock all nodes in group to be verified
|
907 |
assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP) |
908 |
member_node_uuids = self.cfg.GetNodeGroup(self.group_uuid).members |
909 |
self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
|
910 |
|
911 |
def CheckPrereq(self): |
912 |
owned_inst_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) |
913 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) |
914 |
owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE)) |
915 |
|
916 |
assert self.group_uuid in owned_groups |
917 |
|
918 |
# Check if locked instances are still correct
|
919 |
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_inst_names) |
920 |
|
921 |
# Get instance information
|
922 |
self.instances = dict(self.cfg.GetMultiInstanceInfoByName(owned_inst_names)) |
923 |
|
924 |
# Check if node groups for locked instances are still correct
|
925 |
CheckInstancesNodeGroups(self.cfg, self.instances, |
926 |
owned_groups, owned_node_uuids, self.group_uuid)
|
927 |
|
928 |
def _VerifyInstanceLvs(self, node_errors, offline_disk_instance_names, |
929 |
missing_disks): |
930 |
node_lv_to_inst = MapInstanceLvsToNodes( |
931 |
[inst for inst in self.instances.values() if inst.disks_active]) |
932 |
if node_lv_to_inst:
|
933 |
node_uuids = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) & |
934 |
set(self.cfg.GetVmCapableNodeList())) |
935 |
|
936 |
node_lvs = self.rpc.call_lv_list(node_uuids, [])
|
937 |
|
938 |
for (node_uuid, node_res) in node_lvs.items(): |
939 |
if node_res.offline:
|
940 |
continue
|
941 |
|
942 |
msg = node_res.fail_msg |
943 |
if msg:
|
944 |
logging.warning("Error enumerating LVs on node %s: %s",
|
945 |
self.cfg.GetNodeName(node_uuid), msg)
|
946 |
node_errors[node_uuid] = msg |
947 |
continue
|
948 |
|
949 |
for lv_name, (_, _, lv_online) in node_res.payload.items(): |
950 |
inst = node_lv_to_inst.pop((node_uuid, lv_name), None)
|
951 |
if not lv_online and inst is not None: |
952 |
offline_disk_instance_names.add(inst.name) |
953 |
|
954 |
# any leftover items in nv_dict are missing LVs, let's arrange the data
|
955 |
# better
|
956 |
for key, inst in node_lv_to_inst.iteritems(): |
957 |
missing_disks.setdefault(inst.name, []).append(list(key))
|
958 |
|
959 |
def _VerifyDrbdStates(self, node_errors, offline_disk_instance_names): |
960 |
node_to_inst = {} |
961 |
for inst in self.instances.values(): |
962 |
if not inst.disks_active or inst.disk_template != constants.DT_DRBD8: |
963 |
continue
|
964 |
|
965 |
for node_uuid in itertools.chain([inst.primary_node], |
966 |
inst.secondary_nodes): |
967 |
node_to_inst.setdefault(node_uuid, []).append(inst) |
968 |
|
969 |
nodes_ip = dict((uuid, node.secondary_ip) for (uuid, node) |
970 |
in self.cfg.GetMultiNodeInfo(node_to_inst.keys())) |
971 |
for (node_uuid, insts) in node_to_inst.items(): |
972 |
node_disks = [(inst.disks, inst) for inst in insts] |
973 |
node_res = self.rpc.call_drbd_needs_activation(node_uuid, nodes_ip,
|
974 |
node_disks) |
975 |
msg = node_res.fail_msg |
976 |
if msg:
|
977 |
logging.warning("Error getting DRBD status on node %s: %s",
|
978 |
self.cfg.GetNodeName(node_uuid), msg)
|
979 |
node_errors[node_uuid] = msg |
980 |
continue
|
981 |
|
982 |
faulty_disk_uuids = set(node_res.payload)
|
983 |
for inst in self.instances.values(): |
984 |
inst_disk_uuids = set([disk.uuid for disk in inst.disks]) |
985 |
if inst_disk_uuids.intersection(faulty_disk_uuids):
|
986 |
offline_disk_instance_names.add(inst.name) |
987 |
|
988 |
def Exec(self, feedback_fn): |
989 |
"""Verify integrity of cluster disks.
|
990 |
|
991 |
@rtype: tuple of three items
|
992 |
@return: a tuple of (dict of node-to-node_error, list of instances
|
993 |
which need activate-disks, dict of instance: (node, volume) for
|
994 |
missing volumes
|
995 |
|
996 |
"""
|
997 |
node_errors = {} |
998 |
offline_disk_instance_names = set()
|
999 |
missing_disks = {} |
1000 |
|
1001 |
self._VerifyInstanceLvs(node_errors, offline_disk_instance_names,
|
1002 |
missing_disks) |
1003 |
self._VerifyDrbdStates(node_errors, offline_disk_instance_names)
|
1004 |
|
1005 |
return (node_errors, list(offline_disk_instance_names), missing_disks) |