Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / group.py @ da4a52a3

History | View | Annotate | Download (32 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with node groups."""
23

    
24
import logging
25

    
26
from ganeti import constants
27
from ganeti import errors
28
from ganeti import locking
29
from ganeti import objects
30
from ganeti import qlang
31
from ganeti import query
32
from ganeti import utils
33
from ganeti.masterd import iallocator
34
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase, \
35
  ResultWithJobs
36
from ganeti.cmdlib.common import MergeAndVerifyHvState, \
37
  MergeAndVerifyDiskState, GetWantedNodes, GetUpdatedParams, \
38
  CheckNodeGroupInstances, GetUpdatedIPolicy, \
39
  ComputeNewInstanceViolations, GetDefaultIAllocator, ShareAll, \
40
  CheckInstancesNodeGroups, LoadNodeEvacResult, MapInstanceDisksToNodes
41

    
42
import ganeti.masterd.instance
43

    
44

    
45
class LUGroupAdd(LogicalUnit):
46
  """Logical unit for creating node groups.
47

48
  """
49
  HPATH = "group-add"
50
  HTYPE = constants.HTYPE_GROUP
51
  REQ_BGL = False
52

    
53
  def ExpandNames(self):
54
    # We need the new group's UUID here so that we can create and acquire the
55
    # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
56
    # that it should not check whether the UUID exists in the configuration.
57
    self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
58
    self.needed_locks = {}
59
    self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
60

    
61
  def CheckPrereq(self):
62
    """Check prerequisites.
63

64
    This checks that the given group name is not an existing node group
65
    already.
66

67
    """
68
    try:
69
      existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
70
    except errors.OpPrereqError:
71
      pass
72
    else:
73
      raise errors.OpPrereqError("Desired group name '%s' already exists as a"
74
                                 " node group (UUID: %s)" %
75
                                 (self.op.group_name, existing_uuid),
76
                                 errors.ECODE_EXISTS)
77

    
78
    if self.op.ndparams:
79
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
80

    
81
    if self.op.hv_state:
82
      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, None)
83
    else:
84
      self.new_hv_state = None
85

    
86
    if self.op.disk_state:
87
      self.new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, None)
88
    else:
89
      self.new_disk_state = None
90

    
91
    if self.op.diskparams:
92
      for templ in constants.DISK_TEMPLATES:
93
        if templ in self.op.diskparams:
94
          utils.ForceDictType(self.op.diskparams[templ],
95
                              constants.DISK_DT_TYPES)
96
      self.new_diskparams = self.op.diskparams
97
      try:
98
        utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
99
      except errors.OpPrereqError, err:
100
        raise errors.OpPrereqError("While verify diskparams options: %s" % err,
101
                                   errors.ECODE_INVAL)
102
    else:
103
      self.new_diskparams = {}
104

    
105
    if self.op.ipolicy:
106
      cluster = self.cfg.GetClusterInfo()
107
      full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
108
      try:
109
        objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
110
      except errors.ConfigurationError, err:
111
        raise errors.OpPrereqError("Invalid instance policy: %s" % err,
112
                                   errors.ECODE_INVAL)
113

    
114
  def BuildHooksEnv(self):
115
    """Build hooks env.
116

117
    """
118
    return {
119
      "GROUP_NAME": self.op.group_name,
120
      }
121

    
122
  def BuildHooksNodes(self):
123
    """Build hooks nodes.
124

125
    """
126
    mn = self.cfg.GetMasterNode()
127
    return ([mn], [mn])
128

    
129
  def Exec(self, feedback_fn):
130
    """Add the node group to the cluster.
131

132
    """
133
    group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
134
                                  uuid=self.group_uuid,
135
                                  alloc_policy=self.op.alloc_policy,
136
                                  ndparams=self.op.ndparams,
137
                                  diskparams=self.new_diskparams,
138
                                  ipolicy=self.op.ipolicy,
139
                                  hv_state_static=self.new_hv_state,
140
                                  disk_state_static=self.new_disk_state)
141

    
142
    self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
143
    del self.remove_locks[locking.LEVEL_NODEGROUP]
144

    
145

    
146
class LUGroupAssignNodes(NoHooksLU):
147
  """Logical unit for assigning nodes to groups.
148

149
  """
150
  REQ_BGL = False
151

    
152
  def ExpandNames(self):
153
    # These raise errors.OpPrereqError on their own:
154
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
155
    (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
156

    
157
    # We want to lock all the affected nodes and groups. We have readily
158
    # available the list of nodes, and the *destination* group. To gather the
159
    # list of "source" groups, we need to fetch node information later on.
160
    self.needed_locks = {
161
      locking.LEVEL_NODEGROUP: set([self.group_uuid]),
162
      locking.LEVEL_NODE: self.op.node_uuids,
163
      }
164

    
165
  def DeclareLocks(self, level):
166
    if level == locking.LEVEL_NODEGROUP:
167
      assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
168

    
169
      # Try to get all affected nodes' groups without having the group or node
170
      # lock yet. Needs verification later in the code flow.
171
      groups = self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids)
172

    
173
      self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
174

    
175
  def CheckPrereq(self):
176
    """Check prerequisites.
177

178
    """
179
    assert self.needed_locks[locking.LEVEL_NODEGROUP]
180
    assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
181
            frozenset(self.op.node_uuids))
182

    
183
    expected_locks = (set([self.group_uuid]) |
184
                      self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids))
185
    actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
186
    if actual_locks != expected_locks:
187
      raise errors.OpExecError("Nodes changed groups since locks were acquired,"
188
                               " current groups are '%s', used to be '%s'" %
189
                               (utils.CommaJoin(expected_locks),
190
                                utils.CommaJoin(actual_locks)))
191

    
192
    self.node_data = self.cfg.GetAllNodesInfo()
193
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
194
    instance_data = self.cfg.GetAllInstancesInfo()
195

    
196
    if self.group is None:
197
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
198
                               (self.op.group_name, self.group_uuid))
199

    
200
    (new_splits, previous_splits) = \
201
      self.CheckAssignmentForSplitInstances([(uuid, self.group_uuid)
202
                                             for uuid in self.op.node_uuids],
203
                                            self.node_data, instance_data)
204

    
205
    if new_splits:
206
      fmt_new_splits = utils.CommaJoin(utils.NiceSort(
207
                         self.cfg.GetInstanceNames(new_splits)))
208

    
209
      if not self.op.force:
210
        raise errors.OpExecError("The following instances get split by this"
211
                                 " change and --force was not given: %s" %
212
                                 fmt_new_splits)
213
      else:
214
        self.LogWarning("This operation will split the following instances: %s",
215
                        fmt_new_splits)
216

    
217
        if previous_splits:
218
          self.LogWarning("In addition, these already-split instances continue"
219
                          " to be split across groups: %s",
220
                          utils.CommaJoin(utils.NiceSort(
221
                            self.cfg.GetInstanceNames(previous_splits))))
222

    
223
  def Exec(self, feedback_fn):
224
    """Assign nodes to a new group.
225

226
    """
227
    mods = [(node_uuid, self.group_uuid) for node_uuid in self.op.node_uuids]
228

    
229
    self.cfg.AssignGroupNodes(mods)
230

    
231
  @staticmethod
232
  def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
233
    """Check for split instances after a node assignment.
234

235
    This method considers a series of node assignments as an atomic operation,
236
    and returns information about split instances after applying the set of
237
    changes.
238

239
    In particular, it returns information about newly split instances, and
240
    instances that were already split, and remain so after the change.
241

242
    Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
243
    considered.
244

245
    @type changes: list of (node_uuid, new_group_uuid) pairs.
246
    @param changes: list of node assignments to consider.
247
    @param node_data: a dict with data for all nodes
248
    @param instance_data: a dict with all instances to consider
249
    @rtype: a two-tuple
250
    @return: a list of instances that were previously okay and result split as a
251
      consequence of this change, and a list of instances that were previously
252
      split and this change does not fix.
253

254
    """
255
    changed_nodes = dict((uuid, group) for uuid, group in changes
256
                         if node_data[uuid].group != group)
257

    
258
    all_split_instances = set()
259
    previously_split_instances = set()
260

    
261
    for inst in instance_data.values():
262
      if inst.disk_template not in constants.DTS_INT_MIRROR:
263
        continue
264

    
265
      if len(set(node_data[node_uuid].group
266
                 for node_uuid in inst.all_nodes)) > 1:
267
        previously_split_instances.add(inst.uuid)
268

    
269
      if len(set(changed_nodes.get(node_uuid, node_data[node_uuid].group)
270
                 for node_uuid in inst.all_nodes)) > 1:
271
        all_split_instances.add(inst.uuid)
272

    
273
    return (list(all_split_instances - previously_split_instances),
274
            list(previously_split_instances & all_split_instances))
275

    
276

    
277
class GroupQuery(QueryBase):
278
  FIELDS = query.GROUP_FIELDS
279

    
280
  def ExpandNames(self, lu):
281
    lu.needed_locks = {}
282

    
283
    self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
284
    self._cluster = lu.cfg.GetClusterInfo()
285
    name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
286

    
287
    if not self.names:
288
      self.wanted = [name_to_uuid[name]
289
                     for name in utils.NiceSort(name_to_uuid.keys())]
290
    else:
291
      # Accept names to be either names or UUIDs.
292
      missing = []
293
      self.wanted = []
294
      all_uuid = frozenset(self._all_groups.keys())
295

    
296
      for name in self.names:
297
        if name in all_uuid:
298
          self.wanted.append(name)
299
        elif name in name_to_uuid:
300
          self.wanted.append(name_to_uuid[name])
301
        else:
302
          missing.append(name)
303

    
304
      if missing:
305
        raise errors.OpPrereqError("Some groups do not exist: %s" %
306
                                   utils.CommaJoin(missing),
307
                                   errors.ECODE_NOENT)
308

    
309
  def DeclareLocks(self, lu, level):
310
    pass
311

    
312
  def _GetQueryData(self, lu):
313
    """Computes the list of node groups and their attributes.
314

315
    """
316
    do_nodes = query.GQ_NODE in self.requested_data
317
    do_instances = query.GQ_INST in self.requested_data
318

    
319
    group_to_nodes = None
320
    group_to_instances = None
321

    
322
    # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
323
    # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
324
    # latter GetAllInstancesInfo() is not enough, for we have to go through
325
    # instance->node. Hence, we will need to process nodes even if we only need
326
    # instance information.
327
    if do_nodes or do_instances:
328
      all_nodes = lu.cfg.GetAllNodesInfo()
329
      group_to_nodes = dict((uuid, []) for uuid in self.wanted)
330
      node_to_group = {}
331

    
332
      for node in all_nodes.values():
333
        if node.group in group_to_nodes:
334
          group_to_nodes[node.group].append(node.uuid)
335
          node_to_group[node.uuid] = node.group
336

    
337
      if do_instances:
338
        all_instances = lu.cfg.GetAllInstancesInfo()
339
        group_to_instances = dict((uuid, []) for uuid in self.wanted)
340

    
341
        for instance in all_instances.values():
342
          node = instance.primary_node
343
          if node in node_to_group:
344
            group_to_instances[node_to_group[node]].append(instance.uuid)
345

    
346
        if not do_nodes:
347
          # Do not pass on node information if it was not requested.
348
          group_to_nodes = None
349

    
350
    return query.GroupQueryData(self._cluster,
351
                                [self._all_groups[uuid]
352
                                 for uuid in self.wanted],
353
                                group_to_nodes, group_to_instances,
354
                                query.GQ_DISKPARAMS in self.requested_data)
355

    
356

    
357
class LUGroupQuery(NoHooksLU):
358
  """Logical unit for querying node groups.
359

360
  """
361
  REQ_BGL = False
362

    
363
  def CheckArguments(self):
364
    self.gq = GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
365
                          self.op.output_fields, False)
366

    
367
  def ExpandNames(self):
368
    self.gq.ExpandNames(self)
369

    
370
  def DeclareLocks(self, level):
371
    self.gq.DeclareLocks(self, level)
372

    
373
  def Exec(self, feedback_fn):
374
    return self.gq.OldStyleQuery(self)
375

    
376

    
377
class LUGroupSetParams(LogicalUnit):
378
  """Modifies the parameters of a node group.
379

380
  """
381
  HPATH = "group-modify"
382
  HTYPE = constants.HTYPE_GROUP
383
  REQ_BGL = False
384

    
385
  def CheckArguments(self):
386
    all_changes = [
387
      self.op.ndparams,
388
      self.op.diskparams,
389
      self.op.alloc_policy,
390
      self.op.hv_state,
391
      self.op.disk_state,
392
      self.op.ipolicy,
393
      ]
394

    
395
    if all_changes.count(None) == len(all_changes):
396
      raise errors.OpPrereqError("Please pass at least one modification",
397
                                 errors.ECODE_INVAL)
398

    
399
  def ExpandNames(self):
400
    # This raises errors.OpPrereqError on its own:
401
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
402

    
403
    self.needed_locks = {
404
      locking.LEVEL_INSTANCE: [],
405
      locking.LEVEL_NODEGROUP: [self.group_uuid],
406
      }
407

    
408
    self.share_locks[locking.LEVEL_INSTANCE] = 1
409

    
410
  def DeclareLocks(self, level):
411
    if level == locking.LEVEL_INSTANCE:
412
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
413

    
414
      # Lock instances optimistically, needs verification once group lock has
415
      # been acquired
416
      self.needed_locks[locking.LEVEL_INSTANCE] = \
417
        self.cfg.GetInstanceNames(
418
          self.cfg.GetNodeGroupInstances(self.group_uuid))
419

    
420
  @staticmethod
421
  def _UpdateAndVerifyDiskParams(old, new):
422
    """Updates and verifies disk parameters.
423

424
    """
425
    new_params = GetUpdatedParams(old, new)
426
    utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
427
    return new_params
428

    
429
  def CheckPrereq(self):
430
    """Check prerequisites.
431

432
    """
433
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
434

    
435
    # Check if locked instances are still correct
436
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
437

    
438
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
439
    cluster = self.cfg.GetClusterInfo()
440

    
441
    if self.group is None:
442
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
443
                               (self.op.group_name, self.group_uuid))
444

    
445
    if self.op.ndparams:
446
      new_ndparams = GetUpdatedParams(self.group.ndparams, self.op.ndparams)
447
      utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
448
      self.new_ndparams = new_ndparams
449

    
450
    if self.op.diskparams:
451
      diskparams = self.group.diskparams
452
      uavdp = self._UpdateAndVerifyDiskParams
453
      # For each disktemplate subdict update and verify the values
454
      new_diskparams = dict((dt,
455
                             uavdp(diskparams.get(dt, {}),
456
                                   self.op.diskparams[dt]))
457
                            for dt in constants.DISK_TEMPLATES
458
                            if dt in self.op.diskparams)
459
      # As we've all subdicts of diskparams ready, lets merge the actual
460
      # dict with all updated subdicts
461
      self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
462
      try:
463
        utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
464
      except errors.OpPrereqError, err:
465
        raise errors.OpPrereqError("While verify diskparams options: %s" % err,
466
                                   errors.ECODE_INVAL)
467

    
468
    if self.op.hv_state:
469
      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
470
                                                self.group.hv_state_static)
471

    
472
    if self.op.disk_state:
473
      self.new_disk_state = \
474
        MergeAndVerifyDiskState(self.op.disk_state,
475
                                self.group.disk_state_static)
476

    
477
    if self.op.ipolicy:
478
      self.new_ipolicy = GetUpdatedIPolicy(self.group.ipolicy,
479
                                           self.op.ipolicy,
480
                                           group_policy=True)
481

    
482
      new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
483
      instances = self.cfg.GetMultiInstanceInfoByName(owned_instance_names)
484
      gmi = ganeti.masterd.instance
485
      violations = \
486
          ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
487
                                                                 self.group),
488
                                       new_ipolicy, instances, self.cfg)
489

    
490
      if violations:
491
        self.LogWarning("After the ipolicy change the following instances"
492
                        " violate them: %s",
493
                        utils.CommaJoin(violations))
494

    
495
  def BuildHooksEnv(self):
496
    """Build hooks env.
497

498
    """
499
    return {
500
      "GROUP_NAME": self.op.group_name,
501
      "NEW_ALLOC_POLICY": self.op.alloc_policy,
502
      }
503

    
504
  def BuildHooksNodes(self):
505
    """Build hooks nodes.
506

507
    """
508
    mn = self.cfg.GetMasterNode()
509
    return ([mn], [mn])
510

    
511
  def Exec(self, feedback_fn):
512
    """Modifies the node group.
513

514
    """
515
    result = []
516

    
517
    if self.op.ndparams:
518
      self.group.ndparams = self.new_ndparams
519
      result.append(("ndparams", str(self.group.ndparams)))
520

    
521
    if self.op.diskparams:
522
      self.group.diskparams = self.new_diskparams
523
      result.append(("diskparams", str(self.group.diskparams)))
524

    
525
    if self.op.alloc_policy:
526
      self.group.alloc_policy = self.op.alloc_policy
527

    
528
    if self.op.hv_state:
529
      self.group.hv_state_static = self.new_hv_state
530

    
531
    if self.op.disk_state:
532
      self.group.disk_state_static = self.new_disk_state
533

    
534
    if self.op.ipolicy:
535
      self.group.ipolicy = self.new_ipolicy
536

    
537
    self.cfg.Update(self.group, feedback_fn)
538
    return result
539

    
540

    
541
class LUGroupRemove(LogicalUnit):
542
  HPATH = "group-remove"
543
  HTYPE = constants.HTYPE_GROUP
544
  REQ_BGL = False
545

    
546
  def ExpandNames(self):
547
    # This will raises errors.OpPrereqError on its own:
548
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
549
    self.needed_locks = {
550
      locking.LEVEL_NODEGROUP: [self.group_uuid],
551
      }
552

    
553
  def CheckPrereq(self):
554
    """Check prerequisites.
555

556
    This checks that the given group name exists as a node group, that is
557
    empty (i.e., contains no nodes), and that is not the last group of the
558
    cluster.
559

560
    """
561
    # Verify that the group is empty.
562
    group_nodes = [node.uuid
563
                   for node in self.cfg.GetAllNodesInfo().values()
564
                   if node.group == self.group_uuid]
565

    
566
    if group_nodes:
567
      raise errors.OpPrereqError("Group '%s' not empty, has the following"
568
                                 " nodes: %s" %
569
                                 (self.op.group_name,
570
                                  utils.CommaJoin(utils.NiceSort(group_nodes))),
571
                                 errors.ECODE_STATE)
572

    
573
    # Verify the cluster would not be left group-less.
574
    if len(self.cfg.GetNodeGroupList()) == 1:
575
      raise errors.OpPrereqError("Group '%s' is the only group, cannot be"
576
                                 " removed" % self.op.group_name,
577
                                 errors.ECODE_STATE)
578

    
579
  def BuildHooksEnv(self):
580
    """Build hooks env.
581

582
    """
583
    return {
584
      "GROUP_NAME": self.op.group_name,
585
      }
586

    
587
  def BuildHooksNodes(self):
588
    """Build hooks nodes.
589

590
    """
591
    mn = self.cfg.GetMasterNode()
592
    return ([mn], [mn])
593

    
594
  def Exec(self, feedback_fn):
595
    """Remove the node group.
596

597
    """
598
    try:
599
      self.cfg.RemoveNodeGroup(self.group_uuid)
600
    except errors.ConfigurationError:
601
      raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
602
                               (self.op.group_name, self.group_uuid))
603

    
604
    self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
605

    
606

    
607
class LUGroupRename(LogicalUnit):
608
  HPATH = "group-rename"
609
  HTYPE = constants.HTYPE_GROUP
610
  REQ_BGL = False
611

    
612
  def ExpandNames(self):
613
    # This raises errors.OpPrereqError on its own:
614
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
615

    
616
    self.needed_locks = {
617
      locking.LEVEL_NODEGROUP: [self.group_uuid],
618
      }
619

    
620
  def CheckPrereq(self):
621
    """Check prerequisites.
622

623
    Ensures requested new name is not yet used.
624

625
    """
626
    try:
627
      new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
628
    except errors.OpPrereqError:
629
      pass
630
    else:
631
      raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
632
                                 " node group (UUID: %s)" %
633
                                 (self.op.new_name, new_name_uuid),
634
                                 errors.ECODE_EXISTS)
635

    
636
  def BuildHooksEnv(self):
637
    """Build hooks env.
638

639
    """
640
    return {
641
      "OLD_NAME": self.op.group_name,
642
      "NEW_NAME": self.op.new_name,
643
      }
644

    
645
  def BuildHooksNodes(self):
646
    """Build hooks nodes.
647

648
    """
649
    mn = self.cfg.GetMasterNode()
650

    
651
    all_nodes = self.cfg.GetAllNodesInfo()
652
    all_nodes.pop(mn, None)
653

    
654
    run_nodes = [mn]
655
    run_nodes.extend(node.uuid for node in all_nodes.values()
656
                     if node.group == self.group_uuid)
657

    
658
    return (run_nodes, run_nodes)
659

    
660
  def Exec(self, feedback_fn):
661
    """Rename the node group.
662

663
    """
664
    group = self.cfg.GetNodeGroup(self.group_uuid)
665

    
666
    if group is None:
667
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
668
                               (self.op.group_name, self.group_uuid))
669

    
670
    group.name = self.op.new_name
671
    self.cfg.Update(group, feedback_fn)
672

    
673
    return self.op.new_name
674

    
675

    
676
class LUGroupEvacuate(LogicalUnit):
677
  HPATH = "group-evacuate"
678
  HTYPE = constants.HTYPE_GROUP
679
  REQ_BGL = False
680

    
681
  def ExpandNames(self):
682
    # This raises errors.OpPrereqError on its own:
683
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
684

    
685
    if self.op.target_groups:
686
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
687
                                  self.op.target_groups)
688
    else:
689
      self.req_target_uuids = []
690

    
691
    if self.group_uuid in self.req_target_uuids:
692
      raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
693
                                 " as a target group (targets are %s)" %
694
                                 (self.group_uuid,
695
                                  utils.CommaJoin(self.req_target_uuids)),
696
                                 errors.ECODE_INVAL)
697

    
698
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
699

    
700
    self.share_locks = ShareAll()
701
    self.needed_locks = {
702
      locking.LEVEL_INSTANCE: [],
703
      locking.LEVEL_NODEGROUP: [],
704
      locking.LEVEL_NODE: [],
705
      }
706

    
707
  def DeclareLocks(self, level):
708
    if level == locking.LEVEL_INSTANCE:
709
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
710

    
711
      # Lock instances optimistically, needs verification once node and group
712
      # locks have been acquired
713
      self.needed_locks[locking.LEVEL_INSTANCE] = \
714
        self.cfg.GetInstanceNames(
715
          self.cfg.GetNodeGroupInstances(self.group_uuid))
716

    
717
    elif level == locking.LEVEL_NODEGROUP:
718
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
719

    
720
      if self.req_target_uuids:
721
        lock_groups = set([self.group_uuid] + self.req_target_uuids)
722

    
723
        # Lock all groups used by instances optimistically; this requires going
724
        # via the node before it's locked, requiring verification later on
725
        lock_groups.update(group_uuid
726
                           for instance_name in
727
                             self.owned_locks(locking.LEVEL_INSTANCE)
728
                           for group_uuid in
729
                             self.cfg.GetInstanceNodeGroups(
730
                               self.cfg.GetInstanceInfoByName(instance_name)
731
                                 .uuid))
732
      else:
733
        # No target groups, need to lock all of them
734
        lock_groups = locking.ALL_SET
735

    
736
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
737

    
738
    elif level == locking.LEVEL_NODE:
739
      # This will only lock the nodes in the group to be evacuated which
740
      # contain actual instances
741
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
742
      self._LockInstancesNodes()
743

    
744
      # Lock all nodes in group to be evacuated and target groups
745
      owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
746
      assert self.group_uuid in owned_groups
747
      member_node_uuids = [node_uuid
748
                           for group in owned_groups
749
                           for node_uuid in
750
                             self.cfg.GetNodeGroup(group).members]
751
      self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
752

    
753
  def CheckPrereq(self):
754
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
755
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
756
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
757

    
758
    assert owned_groups.issuperset(self.req_target_uuids)
759
    assert self.group_uuid in owned_groups
760

    
761
    # Check if locked instances are still correct
762
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
763

    
764
    # Get instance information
765
    self.instances = \
766
      dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names))
767

    
768
    # Check if node groups for locked instances are still correct
769
    CheckInstancesNodeGroups(self.cfg, self.instances,
770
                             owned_groups, owned_node_uuids, self.group_uuid)
771

    
772
    if self.req_target_uuids:
773
      # User requested specific target groups
774
      self.target_uuids = self.req_target_uuids
775
    else:
776
      # All groups except the one to be evacuated are potential targets
777
      self.target_uuids = [group_uuid for group_uuid in owned_groups
778
                           if group_uuid != self.group_uuid]
779

    
780
      if not self.target_uuids:
781
        raise errors.OpPrereqError("There are no possible target groups",
782
                                   errors.ECODE_INVAL)
783

    
784
  def BuildHooksEnv(self):
785
    """Build hooks env.
786

787
    """
788
    return {
789
      "GROUP_NAME": self.op.group_name,
790
      "TARGET_GROUPS": " ".join(self.target_uuids),
791
      }
792

    
793
  def BuildHooksNodes(self):
794
    """Build hooks nodes.
795

796
    """
797
    mn = self.cfg.GetMasterNode()
798

    
799
    assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
800

    
801
    run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
802

    
803
    return (run_nodes, run_nodes)
804

    
805
  def Exec(self, feedback_fn):
806
    inst_names = list(self.owned_locks(locking.LEVEL_INSTANCE))
807

    
808
    assert self.group_uuid not in self.target_uuids
809

    
810
    req = iallocator.IAReqGroupChange(instances=inst_names,
811
                                      target_groups=self.target_uuids)
812
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
813

    
814
    ial.Run(self.op.iallocator)
815

    
816
    if not ial.success:
817
      raise errors.OpPrereqError("Can't compute group evacuation using"
818
                                 " iallocator '%s': %s" %
819
                                 (self.op.iallocator, ial.info),
820
                                 errors.ECODE_NORES)
821

    
822
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
823

    
824
    self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
825
                 len(jobs), self.op.group_name)
826

    
827
    return ResultWithJobs(jobs)
828

    
829

    
830
class LUGroupVerifyDisks(NoHooksLU):
831
  """Verifies the status of all disks in a node group.
832

833
  """
834
  REQ_BGL = False
835

    
836
  def ExpandNames(self):
837
    # Raises errors.OpPrereqError on its own if group can't be found
838
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
839

    
840
    self.share_locks = ShareAll()
841
    self.needed_locks = {
842
      locking.LEVEL_INSTANCE: [],
843
      locking.LEVEL_NODEGROUP: [],
844
      locking.LEVEL_NODE: [],
845

    
846
      # This opcode is acquires all node locks in a group. LUClusterVerifyDisks
847
      # starts one instance of this opcode for every group, which means all
848
      # nodes will be locked for a short amount of time, so it's better to
849
      # acquire the node allocation lock as well.
850
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
851
      }
852

    
853
  def DeclareLocks(self, level):
854
    if level == locking.LEVEL_INSTANCE:
855
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
856

    
857
      # Lock instances optimistically, needs verification once node and group
858
      # locks have been acquired
859
      self.needed_locks[locking.LEVEL_INSTANCE] = \
860
        self.cfg.GetInstanceNames(
861
          self.cfg.GetNodeGroupInstances(self.group_uuid))
862

    
863
    elif level == locking.LEVEL_NODEGROUP:
864
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
865

    
866
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
867
        set([self.group_uuid] +
868
            # Lock all groups used by instances optimistically; this requires
869
            # going via the node before it's locked, requiring verification
870
            # later on
871
            [group_uuid
872
             for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
873
             for group_uuid in
874
               self.cfg.GetInstanceNodeGroups(
875
                 self.cfg.GetInstanceInfoByName(instance_name).uuid)])
876

    
877
    elif level == locking.LEVEL_NODE:
878
      # This will only lock the nodes in the group to be verified which contain
879
      # actual instances
880
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
881
      self._LockInstancesNodes()
882

    
883
      # Lock all nodes in group to be verified
884
      assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
885
      member_node_uuids = self.cfg.GetNodeGroup(self.group_uuid).members
886
      self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
887

    
888
  def CheckPrereq(self):
889
    owned_inst_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
890
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
891
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
892

    
893
    assert self.group_uuid in owned_groups
894

    
895
    # Check if locked instances are still correct
896
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_inst_names)
897

    
898
    # Get instance information
899
    self.instances = dict(self.cfg.GetMultiInstanceInfoByName(owned_inst_names))
900

    
901
    # Check if node groups for locked instances are still correct
902
    CheckInstancesNodeGroups(self.cfg, self.instances,
903
                             owned_groups, owned_node_uuids, self.group_uuid)
904

    
905
  def Exec(self, feedback_fn):
906
    """Verify integrity of cluster disks.
907

908
    @rtype: tuple of three items
909
    @return: a tuple of (dict of node-to-node_error, list of instances
910
        which need activate-disks, dict of instance: (node, volume) for
911
        missing volumes
912

913
    """
914
    res_nodes = {}
915
    res_instances = set()
916
    res_missing = {}
917

    
918
    nv_dict = MapInstanceDisksToNodes(
919
      [inst for inst in self.instances.values() if inst.disks_active])
920

    
921
    if nv_dict:
922
      node_uuids = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
923
                                  set(self.cfg.GetVmCapableNodeList()))
924

    
925
      node_lvs = self.rpc.call_lv_list(node_uuids, [])
926

    
927
      for (node_uuid, node_res) in node_lvs.items():
928
        if node_res.offline:
929
          continue
930

    
931
        msg = node_res.fail_msg
932
        if msg:
933
          logging.warning("Error enumerating LVs on node %s: %s",
934
                          self.cfg.GetNodeName(node_uuid), msg)
935
          res_nodes[node_uuid] = msg
936
          continue
937

    
938
        for lv_name, (_, _, lv_online) in node_res.payload.items():
939
          inst = nv_dict.pop((node_uuid, lv_name), None)
940
          if not (lv_online or inst is None):
941
            res_instances.add(inst)
942

    
943
      # any leftover items in nv_dict are missing LVs, let's arrange the data
944
      # better
945
      for key, inst in nv_dict.iteritems():
946
        res_missing.setdefault(inst, []).append(list(key))
947

    
948
    return (res_nodes, list(res_instances), res_missing)