Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / group.py @ 1c3231aa

History | View | Annotate | Download (31.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with node groups."""
23

    
24
import logging
25

    
26
from ganeti import constants
27
from ganeti import errors
28
from ganeti import locking
29
from ganeti import objects
30
from ganeti import qlang
31
from ganeti import query
32
from ganeti import utils
33
from ganeti.masterd import iallocator
34
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase, \
35
  ResultWithJobs
36
from ganeti.cmdlib.common import MergeAndVerifyHvState, \
37
  MergeAndVerifyDiskState, GetWantedNodes, GetUpdatedParams, \
38
  CheckNodeGroupInstances, GetUpdatedIPolicy, \
39
  ComputeNewInstanceViolations, GetDefaultIAllocator, ShareAll, \
40
  CheckInstancesNodeGroups, LoadNodeEvacResult, MapInstanceDisksToNodes
41

    
42
import ganeti.masterd.instance
43

    
44

    
45
class LUGroupAdd(LogicalUnit):
46
  """Logical unit for creating node groups.
47

48
  """
49
  HPATH = "group-add"
50
  HTYPE = constants.HTYPE_GROUP
51
  REQ_BGL = False
52

    
53
  def ExpandNames(self):
54
    # We need the new group's UUID here so that we can create and acquire the
55
    # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
56
    # that it should not check whether the UUID exists in the configuration.
57
    self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
58
    self.needed_locks = {}
59
    self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
60

    
61
  def CheckPrereq(self):
62
    """Check prerequisites.
63

64
    This checks that the given group name is not an existing node group
65
    already.
66

67
    """
68
    try:
69
      existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
70
    except errors.OpPrereqError:
71
      pass
72
    else:
73
      raise errors.OpPrereqError("Desired group name '%s' already exists as a"
74
                                 " node group (UUID: %s)" %
75
                                 (self.op.group_name, existing_uuid),
76
                                 errors.ECODE_EXISTS)
77

    
78
    if self.op.ndparams:
79
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
80

    
81
    if self.op.hv_state:
82
      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, None)
83
    else:
84
      self.new_hv_state = None
85

    
86
    if self.op.disk_state:
87
      self.new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, None)
88
    else:
89
      self.new_disk_state = None
90

    
91
    if self.op.diskparams:
92
      for templ in constants.DISK_TEMPLATES:
93
        if templ in self.op.diskparams:
94
          utils.ForceDictType(self.op.diskparams[templ],
95
                              constants.DISK_DT_TYPES)
96
      self.new_diskparams = self.op.diskparams
97
      try:
98
        utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
99
      except errors.OpPrereqError, err:
100
        raise errors.OpPrereqError("While verify diskparams options: %s" % err,
101
                                   errors.ECODE_INVAL)
102
    else:
103
      self.new_diskparams = {}
104

    
105
    if self.op.ipolicy:
106
      cluster = self.cfg.GetClusterInfo()
107
      full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
108
      try:
109
        objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
110
      except errors.ConfigurationError, err:
111
        raise errors.OpPrereqError("Invalid instance policy: %s" % err,
112
                                   errors.ECODE_INVAL)
113

    
114
  def BuildHooksEnv(self):
115
    """Build hooks env.
116

117
    """
118
    return {
119
      "GROUP_NAME": self.op.group_name,
120
      }
121

    
122
  def BuildHooksNodes(self):
123
    """Build hooks nodes.
124

125
    """
126
    mn = self.cfg.GetMasterNode()
127
    return ([mn], [mn])
128

    
129
  def Exec(self, feedback_fn):
130
    """Add the node group to the cluster.
131

132
    """
133
    group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
134
                                  uuid=self.group_uuid,
135
                                  alloc_policy=self.op.alloc_policy,
136
                                  ndparams=self.op.ndparams,
137
                                  diskparams=self.new_diskparams,
138
                                  ipolicy=self.op.ipolicy,
139
                                  hv_state_static=self.new_hv_state,
140
                                  disk_state_static=self.new_disk_state)
141

    
142
    self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
143
    del self.remove_locks[locking.LEVEL_NODEGROUP]
144

    
145

    
146
class LUGroupAssignNodes(NoHooksLU):
147
  """Logical unit for assigning nodes to groups.
148

149
  """
150
  REQ_BGL = False
151

    
152
  def ExpandNames(self):
153
    # These raise errors.OpPrereqError on their own:
154
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
155
    (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
156

    
157
    # We want to lock all the affected nodes and groups. We have readily
158
    # available the list of nodes, and the *destination* group. To gather the
159
    # list of "source" groups, we need to fetch node information later on.
160
    self.needed_locks = {
161
      locking.LEVEL_NODEGROUP: set([self.group_uuid]),
162
      locking.LEVEL_NODE: self.op.node_uuids,
163
      }
164

    
165
  def DeclareLocks(self, level):
166
    if level == locking.LEVEL_NODEGROUP:
167
      assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
168

    
169
      # Try to get all affected nodes' groups without having the group or node
170
      # lock yet. Needs verification later in the code flow.
171
      groups = self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids)
172

    
173
      self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
174

    
175
  def CheckPrereq(self):
176
    """Check prerequisites.
177

178
    """
179
    assert self.needed_locks[locking.LEVEL_NODEGROUP]
180
    assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
181
            frozenset(self.op.node_uuids))
182

    
183
    expected_locks = (set([self.group_uuid]) |
184
                      self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids))
185
    actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
186
    if actual_locks != expected_locks:
187
      raise errors.OpExecError("Nodes changed groups since locks were acquired,"
188
                               " current groups are '%s', used to be '%s'" %
189
                               (utils.CommaJoin(expected_locks),
190
                                utils.CommaJoin(actual_locks)))
191

    
192
    self.node_data = self.cfg.GetAllNodesInfo()
193
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
194
    instance_data = self.cfg.GetAllInstancesInfo()
195

    
196
    if self.group is None:
197
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
198
                               (self.op.group_name, self.group_uuid))
199

    
200
    (new_splits, previous_splits) = \
201
      self.CheckAssignmentForSplitInstances([(uuid, self.group_uuid)
202
                                             for uuid in self.op.node_uuids],
203
                                            self.node_data, instance_data)
204

    
205
    if new_splits:
206
      fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
207

    
208
      if not self.op.force:
209
        raise errors.OpExecError("The following instances get split by this"
210
                                 " change and --force was not given: %s" %
211
                                 fmt_new_splits)
212
      else:
213
        self.LogWarning("This operation will split the following instances: %s",
214
                        fmt_new_splits)
215

    
216
        if previous_splits:
217
          self.LogWarning("In addition, these already-split instances continue"
218
                          " to be split across groups: %s",
219
                          utils.CommaJoin(utils.NiceSort(previous_splits)))
220

    
221
  def Exec(self, feedback_fn):
222
    """Assign nodes to a new group.
223

224
    """
225
    mods = [(node_uuid, self.group_uuid) for node_uuid in self.op.node_uuids]
226

    
227
    self.cfg.AssignGroupNodes(mods)
228

    
229
  @staticmethod
230
  def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
231
    """Check for split instances after a node assignment.
232

233
    This method considers a series of node assignments as an atomic operation,
234
    and returns information about split instances after applying the set of
235
    changes.
236

237
    In particular, it returns information about newly split instances, and
238
    instances that were already split, and remain so after the change.
239

240
    Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
241
    considered.
242

243
    @type changes: list of (node_uuid, new_group_uuid) pairs.
244
    @param changes: list of node assignments to consider.
245
    @param node_data: a dict with data for all nodes
246
    @param instance_data: a dict with all instances to consider
247
    @rtype: a two-tuple
248
    @return: a list of instances that were previously okay and result split as a
249
      consequence of this change, and a list of instances that were previously
250
      split and this change does not fix.
251

252
    """
253
    changed_nodes = dict((uuid, group) for uuid, group in changes
254
                         if node_data[uuid].group != group)
255

    
256
    all_split_instances = set()
257
    previously_split_instances = set()
258

    
259
    for inst in instance_data.values():
260
      if inst.disk_template not in constants.DTS_INT_MIRROR:
261
        continue
262

    
263
      if len(set(node_data[node_uuid].group
264
                 for node_uuid in inst.all_nodes)) > 1:
265
        previously_split_instances.add(inst.name)
266

    
267
      if len(set(changed_nodes.get(node_uuid, node_data[node_uuid].group)
268
                 for node_uuid in inst.all_nodes)) > 1:
269
        all_split_instances.add(inst.name)
270

    
271
    return (list(all_split_instances - previously_split_instances),
272
            list(previously_split_instances & all_split_instances))
273

    
274

    
275
class GroupQuery(QueryBase):
276
  FIELDS = query.GROUP_FIELDS
277

    
278
  def ExpandNames(self, lu):
279
    lu.needed_locks = {}
280

    
281
    self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
282
    self._cluster = lu.cfg.GetClusterInfo()
283
    name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
284

    
285
    if not self.names:
286
      self.wanted = [name_to_uuid[name]
287
                     for name in utils.NiceSort(name_to_uuid.keys())]
288
    else:
289
      # Accept names to be either names or UUIDs.
290
      missing = []
291
      self.wanted = []
292
      all_uuid = frozenset(self._all_groups.keys())
293

    
294
      for name in self.names:
295
        if name in all_uuid:
296
          self.wanted.append(name)
297
        elif name in name_to_uuid:
298
          self.wanted.append(name_to_uuid[name])
299
        else:
300
          missing.append(name)
301

    
302
      if missing:
303
        raise errors.OpPrereqError("Some groups do not exist: %s" %
304
                                   utils.CommaJoin(missing),
305
                                   errors.ECODE_NOENT)
306

    
307
  def DeclareLocks(self, lu, level):
308
    pass
309

    
310
  def _GetQueryData(self, lu):
311
    """Computes the list of node groups and their attributes.
312

313
    """
314
    do_nodes = query.GQ_NODE in self.requested_data
315
    do_instances = query.GQ_INST in self.requested_data
316

    
317
    group_to_nodes = None
318
    group_to_instances = None
319

    
320
    # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
321
    # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
322
    # latter GetAllInstancesInfo() is not enough, for we have to go through
323
    # instance->node. Hence, we will need to process nodes even if we only need
324
    # instance information.
325
    if do_nodes or do_instances:
326
      all_nodes = lu.cfg.GetAllNodesInfo()
327
      group_to_nodes = dict((uuid, []) for uuid in self.wanted)
328
      node_to_group = {}
329

    
330
      for node in all_nodes.values():
331
        if node.group in group_to_nodes:
332
          group_to_nodes[node.group].append(node.uuid)
333
          node_to_group[node.uuid] = node.group
334

    
335
      if do_instances:
336
        all_instances = lu.cfg.GetAllInstancesInfo()
337
        group_to_instances = dict((uuid, []) for uuid in self.wanted)
338

    
339
        for instance in all_instances.values():
340
          node = instance.primary_node
341
          if node in node_to_group:
342
            group_to_instances[node_to_group[node]].append(instance.name)
343

    
344
        if not do_nodes:
345
          # Do not pass on node information if it was not requested.
346
          group_to_nodes = None
347

    
348
    return query.GroupQueryData(self._cluster,
349
                                [self._all_groups[uuid]
350
                                 for uuid in self.wanted],
351
                                group_to_nodes, group_to_instances,
352
                                query.GQ_DISKPARAMS in self.requested_data)
353

    
354

    
355
class LUGroupQuery(NoHooksLU):
356
  """Logical unit for querying node groups.
357

358
  """
359
  REQ_BGL = False
360

    
361
  def CheckArguments(self):
362
    self.gq = GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
363
                          self.op.output_fields, False)
364

    
365
  def ExpandNames(self):
366
    self.gq.ExpandNames(self)
367

    
368
  def DeclareLocks(self, level):
369
    self.gq.DeclareLocks(self, level)
370

    
371
  def Exec(self, feedback_fn):
372
    return self.gq.OldStyleQuery(self)
373

    
374

    
375
class LUGroupSetParams(LogicalUnit):
376
  """Modifies the parameters of a node group.
377

378
  """
379
  HPATH = "group-modify"
380
  HTYPE = constants.HTYPE_GROUP
381
  REQ_BGL = False
382

    
383
  def CheckArguments(self):
384
    all_changes = [
385
      self.op.ndparams,
386
      self.op.diskparams,
387
      self.op.alloc_policy,
388
      self.op.hv_state,
389
      self.op.disk_state,
390
      self.op.ipolicy,
391
      ]
392

    
393
    if all_changes.count(None) == len(all_changes):
394
      raise errors.OpPrereqError("Please pass at least one modification",
395
                                 errors.ECODE_INVAL)
396

    
397
  def ExpandNames(self):
398
    # This raises errors.OpPrereqError on its own:
399
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
400

    
401
    self.needed_locks = {
402
      locking.LEVEL_INSTANCE: [],
403
      locking.LEVEL_NODEGROUP: [self.group_uuid],
404
      }
405

    
406
    self.share_locks[locking.LEVEL_INSTANCE] = 1
407

    
408
  def DeclareLocks(self, level):
409
    if level == locking.LEVEL_INSTANCE:
410
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
411

    
412
      # Lock instances optimistically, needs verification once group lock has
413
      # been acquired
414
      self.needed_locks[locking.LEVEL_INSTANCE] = \
415
          self.cfg.GetNodeGroupInstances(self.group_uuid)
416

    
417
  @staticmethod
418
  def _UpdateAndVerifyDiskParams(old, new):
419
    """Updates and verifies disk parameters.
420

421
    """
422
    new_params = GetUpdatedParams(old, new)
423
    utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
424
    return new_params
425

    
426
  def CheckPrereq(self):
427
    """Check prerequisites.
428

429
    """
430
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
431

    
432
    # Check if locked instances are still correct
433
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
434

    
435
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
436
    cluster = self.cfg.GetClusterInfo()
437

    
438
    if self.group is None:
439
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
440
                               (self.op.group_name, self.group_uuid))
441

    
442
    if self.op.ndparams:
443
      new_ndparams = GetUpdatedParams(self.group.ndparams, self.op.ndparams)
444
      utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
445
      self.new_ndparams = new_ndparams
446

    
447
    if self.op.diskparams:
448
      diskparams = self.group.diskparams
449
      uavdp = self._UpdateAndVerifyDiskParams
450
      # For each disktemplate subdict update and verify the values
451
      new_diskparams = dict((dt,
452
                             uavdp(diskparams.get(dt, {}),
453
                                   self.op.diskparams[dt]))
454
                            for dt in constants.DISK_TEMPLATES
455
                            if dt in self.op.diskparams)
456
      # As we've all subdicts of diskparams ready, lets merge the actual
457
      # dict with all updated subdicts
458
      self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
459
      try:
460
        utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
461
      except errors.OpPrereqError, err:
462
        raise errors.OpPrereqError("While verify diskparams options: %s" % err,
463
                                   errors.ECODE_INVAL)
464

    
465
    if self.op.hv_state:
466
      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
467
                                                self.group.hv_state_static)
468

    
469
    if self.op.disk_state:
470
      self.new_disk_state = \
471
        MergeAndVerifyDiskState(self.op.disk_state,
472
                                self.group.disk_state_static)
473

    
474
    if self.op.ipolicy:
475
      self.new_ipolicy = GetUpdatedIPolicy(self.group.ipolicy,
476
                                           self.op.ipolicy,
477
                                           group_policy=True)
478

    
479
      new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
480
      inst_filter = lambda inst: inst.name in owned_instances
481
      instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
482
      gmi = ganeti.masterd.instance
483
      violations = \
484
          ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
485
                                                                 self.group),
486
                                       new_ipolicy, instances, self.cfg)
487

    
488
      if violations:
489
        self.LogWarning("After the ipolicy change the following instances"
490
                        " violate them: %s",
491
                        utils.CommaJoin(violations))
492

    
493
  def BuildHooksEnv(self):
494
    """Build hooks env.
495

496
    """
497
    return {
498
      "GROUP_NAME": self.op.group_name,
499
      "NEW_ALLOC_POLICY": self.op.alloc_policy,
500
      }
501

    
502
  def BuildHooksNodes(self):
503
    """Build hooks nodes.
504

505
    """
506
    mn = self.cfg.GetMasterNode()
507
    return ([mn], [mn])
508

    
509
  def Exec(self, feedback_fn):
510
    """Modifies the node group.
511

512
    """
513
    result = []
514

    
515
    if self.op.ndparams:
516
      self.group.ndparams = self.new_ndparams
517
      result.append(("ndparams", str(self.group.ndparams)))
518

    
519
    if self.op.diskparams:
520
      self.group.diskparams = self.new_diskparams
521
      result.append(("diskparams", str(self.group.diskparams)))
522

    
523
    if self.op.alloc_policy:
524
      self.group.alloc_policy = self.op.alloc_policy
525

    
526
    if self.op.hv_state:
527
      self.group.hv_state_static = self.new_hv_state
528

    
529
    if self.op.disk_state:
530
      self.group.disk_state_static = self.new_disk_state
531

    
532
    if self.op.ipolicy:
533
      self.group.ipolicy = self.new_ipolicy
534

    
535
    self.cfg.Update(self.group, feedback_fn)
536
    return result
537

    
538

    
539
class LUGroupRemove(LogicalUnit):
540
  HPATH = "group-remove"
541
  HTYPE = constants.HTYPE_GROUP
542
  REQ_BGL = False
543

    
544
  def ExpandNames(self):
545
    # This will raises errors.OpPrereqError on its own:
546
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
547
    self.needed_locks = {
548
      locking.LEVEL_NODEGROUP: [self.group_uuid],
549
      }
550

    
551
  def CheckPrereq(self):
552
    """Check prerequisites.
553

554
    This checks that the given group name exists as a node group, that is
555
    empty (i.e., contains no nodes), and that is not the last group of the
556
    cluster.
557

558
    """
559
    # Verify that the group is empty.
560
    group_nodes = [node.uuid
561
                   for node in self.cfg.GetAllNodesInfo().values()
562
                   if node.group == self.group_uuid]
563

    
564
    if group_nodes:
565
      raise errors.OpPrereqError("Group '%s' not empty, has the following"
566
                                 " nodes: %s" %
567
                                 (self.op.group_name,
568
                                  utils.CommaJoin(utils.NiceSort(group_nodes))),
569
                                 errors.ECODE_STATE)
570

    
571
    # Verify the cluster would not be left group-less.
572
    if len(self.cfg.GetNodeGroupList()) == 1:
573
      raise errors.OpPrereqError("Group '%s' is the only group, cannot be"
574
                                 " removed" % self.op.group_name,
575
                                 errors.ECODE_STATE)
576

    
577
  def BuildHooksEnv(self):
578
    """Build hooks env.
579

580
    """
581
    return {
582
      "GROUP_NAME": self.op.group_name,
583
      }
584

    
585
  def BuildHooksNodes(self):
586
    """Build hooks nodes.
587

588
    """
589
    mn = self.cfg.GetMasterNode()
590
    return ([mn], [mn])
591

    
592
  def Exec(self, feedback_fn):
593
    """Remove the node group.
594

595
    """
596
    try:
597
      self.cfg.RemoveNodeGroup(self.group_uuid)
598
    except errors.ConfigurationError:
599
      raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
600
                               (self.op.group_name, self.group_uuid))
601

    
602
    self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
603

    
604

    
605
class LUGroupRename(LogicalUnit):
606
  HPATH = "group-rename"
607
  HTYPE = constants.HTYPE_GROUP
608
  REQ_BGL = False
609

    
610
  def ExpandNames(self):
611
    # This raises errors.OpPrereqError on its own:
612
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
613

    
614
    self.needed_locks = {
615
      locking.LEVEL_NODEGROUP: [self.group_uuid],
616
      }
617

    
618
  def CheckPrereq(self):
619
    """Check prerequisites.
620

621
    Ensures requested new name is not yet used.
622

623
    """
624
    try:
625
      new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
626
    except errors.OpPrereqError:
627
      pass
628
    else:
629
      raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
630
                                 " node group (UUID: %s)" %
631
                                 (self.op.new_name, new_name_uuid),
632
                                 errors.ECODE_EXISTS)
633

    
634
  def BuildHooksEnv(self):
635
    """Build hooks env.
636

637
    """
638
    return {
639
      "OLD_NAME": self.op.group_name,
640
      "NEW_NAME": self.op.new_name,
641
      }
642

    
643
  def BuildHooksNodes(self):
644
    """Build hooks nodes.
645

646
    """
647
    mn = self.cfg.GetMasterNode()
648

    
649
    all_nodes = self.cfg.GetAllNodesInfo()
650
    all_nodes.pop(mn, None)
651

    
652
    run_nodes = [mn]
653
    run_nodes.extend(node.uuid for node in all_nodes.values()
654
                     if node.group == self.group_uuid)
655

    
656
    return (run_nodes, run_nodes)
657

    
658
  def Exec(self, feedback_fn):
659
    """Rename the node group.
660

661
    """
662
    group = self.cfg.GetNodeGroup(self.group_uuid)
663

    
664
    if group is None:
665
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
666
                               (self.op.group_name, self.group_uuid))
667

    
668
    group.name = self.op.new_name
669
    self.cfg.Update(group, feedback_fn)
670

    
671
    return self.op.new_name
672

    
673

    
674
class LUGroupEvacuate(LogicalUnit):
675
  HPATH = "group-evacuate"
676
  HTYPE = constants.HTYPE_GROUP
677
  REQ_BGL = False
678

    
679
  def ExpandNames(self):
680
    # This raises errors.OpPrereqError on its own:
681
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
682

    
683
    if self.op.target_groups:
684
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
685
                                  self.op.target_groups)
686
    else:
687
      self.req_target_uuids = []
688

    
689
    if self.group_uuid in self.req_target_uuids:
690
      raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
691
                                 " as a target group (targets are %s)" %
692
                                 (self.group_uuid,
693
                                  utils.CommaJoin(self.req_target_uuids)),
694
                                 errors.ECODE_INVAL)
695

    
696
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
697

    
698
    self.share_locks = ShareAll()
699
    self.needed_locks = {
700
      locking.LEVEL_INSTANCE: [],
701
      locking.LEVEL_NODEGROUP: [],
702
      locking.LEVEL_NODE: [],
703
      }
704

    
705
  def DeclareLocks(self, level):
706
    if level == locking.LEVEL_INSTANCE:
707
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
708

    
709
      # Lock instances optimistically, needs verification once node and group
710
      # locks have been acquired
711
      self.needed_locks[locking.LEVEL_INSTANCE] = \
712
        self.cfg.GetNodeGroupInstances(self.group_uuid)
713

    
714
    elif level == locking.LEVEL_NODEGROUP:
715
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
716

    
717
      if self.req_target_uuids:
718
        lock_groups = set([self.group_uuid] + self.req_target_uuids)
719

    
720
        # Lock all groups used by instances optimistically; this requires going
721
        # via the node before it's locked, requiring verification later on
722
        lock_groups.update(group_uuid
723
                           for instance_name in
724
                             self.owned_locks(locking.LEVEL_INSTANCE)
725
                           for group_uuid in
726
                             self.cfg.GetInstanceNodeGroups(instance_name))
727
      else:
728
        # No target groups, need to lock all of them
729
        lock_groups = locking.ALL_SET
730

    
731
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
732

    
733
    elif level == locking.LEVEL_NODE:
734
      # This will only lock the nodes in the group to be evacuated which
735
      # contain actual instances
736
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
737
      self._LockInstancesNodes()
738

    
739
      # Lock all nodes in group to be evacuated and target groups
740
      owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
741
      assert self.group_uuid in owned_groups
742
      member_node_uuids = [node_uuid
743
                           for group in owned_groups
744
                           for node_uuid in
745
                             self.cfg.GetNodeGroup(group).members]
746
      self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
747

    
748
  def CheckPrereq(self):
749
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
750
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
751
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
752

    
753
    assert owned_groups.issuperset(self.req_target_uuids)
754
    assert self.group_uuid in owned_groups
755

    
756
    # Check if locked instances are still correct
757
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
758

    
759
    # Get instance information
760
    self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
761

    
762
    # Check if node groups for locked instances are still correct
763
    CheckInstancesNodeGroups(self.cfg, self.instances,
764
                             owned_groups, owned_node_uuids, self.group_uuid)
765

    
766
    if self.req_target_uuids:
767
      # User requested specific target groups
768
      self.target_uuids = self.req_target_uuids
769
    else:
770
      # All groups except the one to be evacuated are potential targets
771
      self.target_uuids = [group_uuid for group_uuid in owned_groups
772
                           if group_uuid != self.group_uuid]
773

    
774
      if not self.target_uuids:
775
        raise errors.OpPrereqError("There are no possible target groups",
776
                                   errors.ECODE_INVAL)
777

    
778
  def BuildHooksEnv(self):
779
    """Build hooks env.
780

781
    """
782
    return {
783
      "GROUP_NAME": self.op.group_name,
784
      "TARGET_GROUPS": " ".join(self.target_uuids),
785
      }
786

    
787
  def BuildHooksNodes(self):
788
    """Build hooks nodes.
789

790
    """
791
    mn = self.cfg.GetMasterNode()
792

    
793
    assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
794

    
795
    run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
796

    
797
    return (run_nodes, run_nodes)
798

    
799
  def Exec(self, feedback_fn):
800
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
801

    
802
    assert self.group_uuid not in self.target_uuids
803

    
804
    req = iallocator.IAReqGroupChange(instances=instances,
805
                                      target_groups=self.target_uuids)
806
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
807

    
808
    ial.Run(self.op.iallocator)
809

    
810
    if not ial.success:
811
      raise errors.OpPrereqError("Can't compute group evacuation using"
812
                                 " iallocator '%s': %s" %
813
                                 (self.op.iallocator, ial.info),
814
                                 errors.ECODE_NORES)
815

    
816
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
817

    
818
    self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
819
                 len(jobs), self.op.group_name)
820

    
821
    return ResultWithJobs(jobs)
822

    
823

    
824
class LUGroupVerifyDisks(NoHooksLU):
825
  """Verifies the status of all disks in a node group.
826

827
  """
828
  REQ_BGL = False
829

    
830
  def ExpandNames(self):
831
    # Raises errors.OpPrereqError on its own if group can't be found
832
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
833

    
834
    self.share_locks = ShareAll()
835
    self.needed_locks = {
836
      locking.LEVEL_INSTANCE: [],
837
      locking.LEVEL_NODEGROUP: [],
838
      locking.LEVEL_NODE: [],
839

    
840
      # This opcode is acquires all node locks in a group. LUClusterVerifyDisks
841
      # starts one instance of this opcode for every group, which means all
842
      # nodes will be locked for a short amount of time, so it's better to
843
      # acquire the node allocation lock as well.
844
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
845
      }
846

    
847
  def DeclareLocks(self, level):
848
    if level == locking.LEVEL_INSTANCE:
849
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
850

    
851
      # Lock instances optimistically, needs verification once node and group
852
      # locks have been acquired
853
      self.needed_locks[locking.LEVEL_INSTANCE] = \
854
        self.cfg.GetNodeGroupInstances(self.group_uuid)
855

    
856
    elif level == locking.LEVEL_NODEGROUP:
857
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
858

    
859
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
860
        set([self.group_uuid] +
861
            # Lock all groups used by instances optimistically; this requires
862
            # going via the node before it's locked, requiring verification
863
            # later on
864
            [group_uuid
865
             for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
866
             for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
867

    
868
    elif level == locking.LEVEL_NODE:
869
      # This will only lock the nodes in the group to be verified which contain
870
      # actual instances
871
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
872
      self._LockInstancesNodes()
873

    
874
      # Lock all nodes in group to be verified
875
      assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
876
      member_node_uuids = self.cfg.GetNodeGroup(self.group_uuid).members
877
      self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
878

    
879
  def CheckPrereq(self):
880
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
881
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
882
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
883

    
884
    assert self.group_uuid in owned_groups
885

    
886
    # Check if locked instances are still correct
887
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
888

    
889
    # Get instance information
890
    self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
891

    
892
    # Check if node groups for locked instances are still correct
893
    CheckInstancesNodeGroups(self.cfg, self.instances,
894
                             owned_groups, owned_node_uuids, self.group_uuid)
895

    
896
  def Exec(self, feedback_fn):
897
    """Verify integrity of cluster disks.
898

899
    @rtype: tuple of three items
900
    @return: a tuple of (dict of node-to-node_error, list of instances
901
        which need activate-disks, dict of instance: (node, volume) for
902
        missing volumes
903

904
    """
905
    res_nodes = {}
906
    res_instances = set()
907
    res_missing = {}
908

    
909
    nv_dict = MapInstanceDisksToNodes(
910
      [inst for inst in self.instances.values() if inst.disks_active])
911

    
912
    if nv_dict:
913
      node_uuids = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
914
                                  set(self.cfg.GetVmCapableNodeList()))
915

    
916
      node_lvs = self.rpc.call_lv_list(node_uuids, [])
917

    
918
      for (node_uuid, node_res) in node_lvs.items():
919
        if node_res.offline:
920
          continue
921

    
922
        msg = node_res.fail_msg
923
        if msg:
924
          logging.warning("Error enumerating LVs on node %s: %s",
925
                          self.cfg.GetNodeName(node_uuid), msg)
926
          res_nodes[node_uuid] = msg
927
          continue
928

    
929
        for lv_name, (_, _, lv_online) in node_res.payload.items():
930
          inst = nv_dict.pop((node_uuid, lv_name), None)
931
          if not (lv_online or inst is None):
932
            res_instances.add(inst)
933

    
934
      # any leftover items in nv_dict are missing LVs, let's arrange the data
935
      # better
936
      for key, inst in nv_dict.iteritems():
937
        res_missing.setdefault(inst, []).append(list(key))
938

    
939
    return (res_nodes, list(res_instances), res_missing)