Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / group.py @ 235a6b29

History | View | Annotate | Download (33.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with node groups."""
23

    
24
import itertools
25
import logging
26

    
27
from ganeti import constants
28
from ganeti import errors
29
from ganeti import locking
30
from ganeti import objects
31
from ganeti import qlang
32
from ganeti import query
33
from ganeti import utils
34
from ganeti.masterd import iallocator
35
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase, \
36
  ResultWithJobs
37
from ganeti.cmdlib.common import MergeAndVerifyHvState, \
38
  MergeAndVerifyDiskState, GetWantedNodes, GetUpdatedParams, \
39
  CheckNodeGroupInstances, GetUpdatedIPolicy, \
40
  ComputeNewInstanceViolations, GetDefaultIAllocator, ShareAll, \
41
  CheckInstancesNodeGroups, LoadNodeEvacResult, MapInstanceLvsToNodes
42

    
43
import ganeti.masterd.instance
44

    
45

    
46
class LUGroupAdd(LogicalUnit):
47
  """Logical unit for creating node groups.
48

49
  """
50
  HPATH = "group-add"
51
  HTYPE = constants.HTYPE_GROUP
52
  REQ_BGL = False
53

    
54
  def ExpandNames(self):
55
    # We need the new group's UUID here so that we can create and acquire the
56
    # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
57
    # that it should not check whether the UUID exists in the configuration.
58
    self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
59
    self.needed_locks = {}
60
    self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
61

    
62
  def CheckPrereq(self):
63
    """Check prerequisites.
64

65
    This checks that the given group name is not an existing node group
66
    already.
67

68
    """
69
    try:
70
      existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
71
    except errors.OpPrereqError:
72
      pass
73
    else:
74
      raise errors.OpPrereqError("Desired group name '%s' already exists as a"
75
                                 " node group (UUID: %s)" %
76
                                 (self.op.group_name, existing_uuid),
77
                                 errors.ECODE_EXISTS)
78

    
79
    if self.op.ndparams:
80
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
81

    
82
    if self.op.hv_state:
83
      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, None)
84
    else:
85
      self.new_hv_state = None
86

    
87
    if self.op.disk_state:
88
      self.new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, None)
89
    else:
90
      self.new_disk_state = None
91

    
92
    if self.op.diskparams:
93
      for templ in constants.DISK_TEMPLATES:
94
        if templ in self.op.diskparams:
95
          utils.ForceDictType(self.op.diskparams[templ],
96
                              constants.DISK_DT_TYPES)
97
      self.new_diskparams = self.op.diskparams
98
      try:
99
        utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
100
      except errors.OpPrereqError, err:
101
        raise errors.OpPrereqError("While verify diskparams options: %s" % err,
102
                                   errors.ECODE_INVAL)
103
    else:
104
      self.new_diskparams = {}
105

    
106
    if self.op.ipolicy:
107
      cluster = self.cfg.GetClusterInfo()
108
      full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
109
      try:
110
        objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
111
      except errors.ConfigurationError, err:
112
        raise errors.OpPrereqError("Invalid instance policy: %s" % err,
113
                                   errors.ECODE_INVAL)
114

    
115
  def BuildHooksEnv(self):
116
    """Build hooks env.
117

118
    """
119
    return {
120
      "GROUP_NAME": self.op.group_name,
121
      }
122

    
123
  def BuildHooksNodes(self):
124
    """Build hooks nodes.
125

126
    """
127
    mn = self.cfg.GetMasterNode()
128
    return ([mn], [mn])
129

    
130
  def Exec(self, feedback_fn):
131
    """Add the node group to the cluster.
132

133
    """
134
    group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
135
                                  uuid=self.group_uuid,
136
                                  alloc_policy=self.op.alloc_policy,
137
                                  ndparams=self.op.ndparams,
138
                                  diskparams=self.new_diskparams,
139
                                  ipolicy=self.op.ipolicy,
140
                                  hv_state_static=self.new_hv_state,
141
                                  disk_state_static=self.new_disk_state)
142

    
143
    self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
144
    del self.remove_locks[locking.LEVEL_NODEGROUP]
145

    
146

    
147
class LUGroupAssignNodes(NoHooksLU):
148
  """Logical unit for assigning nodes to groups.
149

150
  """
151
  REQ_BGL = False
152

    
153
  def ExpandNames(self):
154
    # These raise errors.OpPrereqError on their own:
155
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
156
    (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
157

    
158
    # We want to lock all the affected nodes and groups. We have readily
159
    # available the list of nodes, and the *destination* group. To gather the
160
    # list of "source" groups, we need to fetch node information later on.
161
    self.needed_locks = {
162
      locking.LEVEL_NODEGROUP: set([self.group_uuid]),
163
      locking.LEVEL_NODE: self.op.node_uuids,
164
      }
165

    
166
  def DeclareLocks(self, level):
167
    if level == locking.LEVEL_NODEGROUP:
168
      assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
169

    
170
      # Try to get all affected nodes' groups without having the group or node
171
      # lock yet. Needs verification later in the code flow.
172
      groups = self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids)
173

    
174
      self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
175

    
176
  def CheckPrereq(self):
177
    """Check prerequisites.
178

179
    """
180
    assert self.needed_locks[locking.LEVEL_NODEGROUP]
181
    assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
182
            frozenset(self.op.node_uuids))
183

    
184
    expected_locks = (set([self.group_uuid]) |
185
                      self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids))
186
    actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
187
    if actual_locks != expected_locks:
188
      raise errors.OpExecError("Nodes changed groups since locks were acquired,"
189
                               " current groups are '%s', used to be '%s'" %
190
                               (utils.CommaJoin(expected_locks),
191
                                utils.CommaJoin(actual_locks)))
192

    
193
    self.node_data = self.cfg.GetAllNodesInfo()
194
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
195
    instance_data = self.cfg.GetAllInstancesInfo()
196

    
197
    if self.group is None:
198
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
199
                               (self.op.group_name, self.group_uuid))
200

    
201
    (new_splits, previous_splits) = \
202
      self.CheckAssignmentForSplitInstances([(uuid, self.group_uuid)
203
                                             for uuid in self.op.node_uuids],
204
                                            self.node_data, instance_data)
205

    
206
    if new_splits:
207
      fmt_new_splits = utils.CommaJoin(utils.NiceSort(
208
                         self.cfg.GetInstanceNames(new_splits)))
209

    
210
      if not self.op.force:
211
        raise errors.OpExecError("The following instances get split by this"
212
                                 " change and --force was not given: %s" %
213
                                 fmt_new_splits)
214
      else:
215
        self.LogWarning("This operation will split the following instances: %s",
216
                        fmt_new_splits)
217

    
218
        if previous_splits:
219
          self.LogWarning("In addition, these already-split instances continue"
220
                          " to be split across groups: %s",
221
                          utils.CommaJoin(utils.NiceSort(
222
                            self.cfg.GetInstanceNames(previous_splits))))
223

    
224
  def Exec(self, feedback_fn):
225
    """Assign nodes to a new group.
226

227
    """
228
    mods = [(node_uuid, self.group_uuid) for node_uuid in self.op.node_uuids]
229

    
230
    self.cfg.AssignGroupNodes(mods)
231

    
232
  @staticmethod
233
  def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
234
    """Check for split instances after a node assignment.
235

236
    This method considers a series of node assignments as an atomic operation,
237
    and returns information about split instances after applying the set of
238
    changes.
239

240
    In particular, it returns information about newly split instances, and
241
    instances that were already split, and remain so after the change.
242

243
    Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
244
    considered.
245

246
    @type changes: list of (node_uuid, new_group_uuid) pairs.
247
    @param changes: list of node assignments to consider.
248
    @param node_data: a dict with data for all nodes
249
    @param instance_data: a dict with all instances to consider
250
    @rtype: a two-tuple
251
    @return: a list of instances that were previously okay and result split as a
252
      consequence of this change, and a list of instances that were previously
253
      split and this change does not fix.
254

255
    """
256
    changed_nodes = dict((uuid, group) for uuid, group in changes
257
                         if node_data[uuid].group != group)
258

    
259
    all_split_instances = set()
260
    previously_split_instances = set()
261

    
262
    for inst in instance_data.values():
263
      if inst.disk_template not in constants.DTS_INT_MIRROR:
264
        continue
265

    
266
      if len(set(node_data[node_uuid].group
267
                 for node_uuid in inst.all_nodes)) > 1:
268
        previously_split_instances.add(inst.uuid)
269

    
270
      if len(set(changed_nodes.get(node_uuid, node_data[node_uuid].group)
271
                 for node_uuid in inst.all_nodes)) > 1:
272
        all_split_instances.add(inst.uuid)
273

    
274
    return (list(all_split_instances - previously_split_instances),
275
            list(previously_split_instances & all_split_instances))
276

    
277

    
278
class GroupQuery(QueryBase):
279
  FIELDS = query.GROUP_FIELDS
280

    
281
  def ExpandNames(self, lu):
282
    lu.needed_locks = {}
283

    
284
    self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
285
    self._cluster = lu.cfg.GetClusterInfo()
286
    name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
287

    
288
    if not self.names:
289
      self.wanted = [name_to_uuid[name]
290
                     for name in utils.NiceSort(name_to_uuid.keys())]
291
    else:
292
      # Accept names to be either names or UUIDs.
293
      missing = []
294
      self.wanted = []
295
      all_uuid = frozenset(self._all_groups.keys())
296

    
297
      for name in self.names:
298
        if name in all_uuid:
299
          self.wanted.append(name)
300
        elif name in name_to_uuid:
301
          self.wanted.append(name_to_uuid[name])
302
        else:
303
          missing.append(name)
304

    
305
      if missing:
306
        raise errors.OpPrereqError("Some groups do not exist: %s" %
307
                                   utils.CommaJoin(missing),
308
                                   errors.ECODE_NOENT)
309

    
310
  def DeclareLocks(self, lu, level):
311
    pass
312

    
313
  def _GetQueryData(self, lu):
314
    """Computes the list of node groups and their attributes.
315

316
    """
317
    do_nodes = query.GQ_NODE in self.requested_data
318
    do_instances = query.GQ_INST in self.requested_data
319

    
320
    group_to_nodes = None
321
    group_to_instances = None
322

    
323
    # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
324
    # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
325
    # latter GetAllInstancesInfo() is not enough, for we have to go through
326
    # instance->node. Hence, we will need to process nodes even if we only need
327
    # instance information.
328
    if do_nodes or do_instances:
329
      all_nodes = lu.cfg.GetAllNodesInfo()
330
      group_to_nodes = dict((uuid, []) for uuid in self.wanted)
331
      node_to_group = {}
332

    
333
      for node in all_nodes.values():
334
        if node.group in group_to_nodes:
335
          group_to_nodes[node.group].append(node.uuid)
336
          node_to_group[node.uuid] = node.group
337

    
338
      if do_instances:
339
        all_instances = lu.cfg.GetAllInstancesInfo()
340
        group_to_instances = dict((uuid, []) for uuid in self.wanted)
341

    
342
        for instance in all_instances.values():
343
          node = instance.primary_node
344
          if node in node_to_group:
345
            group_to_instances[node_to_group[node]].append(instance.uuid)
346

    
347
        if not do_nodes:
348
          # Do not pass on node information if it was not requested.
349
          group_to_nodes = None
350

    
351
    return query.GroupQueryData(self._cluster,
352
                                [self._all_groups[uuid]
353
                                 for uuid in self.wanted],
354
                                group_to_nodes, group_to_instances,
355
                                query.GQ_DISKPARAMS in self.requested_data)
356

    
357

    
358
class LUGroupQuery(NoHooksLU):
359
  """Logical unit for querying node groups.
360

361
  """
362
  REQ_BGL = False
363

    
364
  def CheckArguments(self):
365
    self.gq = GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
366
                          self.op.output_fields, False)
367

    
368
  def ExpandNames(self):
369
    self.gq.ExpandNames(self)
370

    
371
  def DeclareLocks(self, level):
372
    self.gq.DeclareLocks(self, level)
373

    
374
  def Exec(self, feedback_fn):
375
    return self.gq.OldStyleQuery(self)
376

    
377

    
378
class LUGroupSetParams(LogicalUnit):
379
  """Modifies the parameters of a node group.
380

381
  """
382
  HPATH = "group-modify"
383
  HTYPE = constants.HTYPE_GROUP
384
  REQ_BGL = False
385

    
386
  def CheckArguments(self):
387
    all_changes = [
388
      self.op.ndparams,
389
      self.op.diskparams,
390
      self.op.alloc_policy,
391
      self.op.hv_state,
392
      self.op.disk_state,
393
      self.op.ipolicy,
394
      ]
395

    
396
    if all_changes.count(None) == len(all_changes):
397
      raise errors.OpPrereqError("Please pass at least one modification",
398
                                 errors.ECODE_INVAL)
399

    
400
  def ExpandNames(self):
401
    # This raises errors.OpPrereqError on its own:
402
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
403

    
404
    self.needed_locks = {
405
      locking.LEVEL_INSTANCE: [],
406
      locking.LEVEL_NODEGROUP: [self.group_uuid],
407
      }
408

    
409
    self.share_locks[locking.LEVEL_INSTANCE] = 1
410

    
411
  def DeclareLocks(self, level):
412
    if level == locking.LEVEL_INSTANCE:
413
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
414

    
415
      # Lock instances optimistically, needs verification once group lock has
416
      # been acquired
417
      self.needed_locks[locking.LEVEL_INSTANCE] = \
418
        self.cfg.GetInstanceNames(
419
          self.cfg.GetNodeGroupInstances(self.group_uuid))
420

    
421
  @staticmethod
422
  def _UpdateAndVerifyDiskParams(old, new):
423
    """Updates and verifies disk parameters.
424

425
    """
426
    new_params = GetUpdatedParams(old, new)
427
    utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
428
    return new_params
429

    
430
  def CheckPrereq(self):
431
    """Check prerequisites.
432

433
    """
434
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
435

    
436
    # Check if locked instances are still correct
437
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
438

    
439
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
440
    cluster = self.cfg.GetClusterInfo()
441

    
442
    if self.group is None:
443
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
444
                               (self.op.group_name, self.group_uuid))
445

    
446
    if self.op.ndparams:
447
      new_ndparams = GetUpdatedParams(self.group.ndparams, self.op.ndparams)
448
      utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
449
      self.new_ndparams = new_ndparams
450

    
451
    if self.op.diskparams:
452
      diskparams = self.group.diskparams
453
      uavdp = self._UpdateAndVerifyDiskParams
454
      # For each disktemplate subdict update and verify the values
455
      new_diskparams = dict((dt,
456
                             uavdp(diskparams.get(dt, {}),
457
                                   self.op.diskparams[dt]))
458
                            for dt in constants.DISK_TEMPLATES
459
                            if dt in self.op.diskparams)
460
      # As we've all subdicts of diskparams ready, lets merge the actual
461
      # dict with all updated subdicts
462
      self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
463
      try:
464
        utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
465
      except errors.OpPrereqError, err:
466
        raise errors.OpPrereqError("While verify diskparams options: %s" % err,
467
                                   errors.ECODE_INVAL)
468

    
469
    if self.op.hv_state:
470
      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
471
                                                self.group.hv_state_static)
472

    
473
    if self.op.disk_state:
474
      self.new_disk_state = \
475
        MergeAndVerifyDiskState(self.op.disk_state,
476
                                self.group.disk_state_static)
477

    
478
    if self.op.ipolicy:
479
      self.new_ipolicy = GetUpdatedIPolicy(self.group.ipolicy,
480
                                           self.op.ipolicy,
481
                                           group_policy=True)
482

    
483
      new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
484
      instances = self.cfg.GetMultiInstanceInfoByName(owned_instance_names)
485
      gmi = ganeti.masterd.instance
486
      violations = \
487
          ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
488
                                                                 self.group),
489
                                       new_ipolicy, instances, self.cfg)
490

    
491
      if violations:
492
        self.LogWarning("After the ipolicy change the following instances"
493
                        " violate them: %s",
494
                        utils.CommaJoin(violations))
495

    
496
  def BuildHooksEnv(self):
497
    """Build hooks env.
498

499
    """
500
    return {
501
      "GROUP_NAME": self.op.group_name,
502
      "NEW_ALLOC_POLICY": self.op.alloc_policy,
503
      }
504

    
505
  def BuildHooksNodes(self):
506
    """Build hooks nodes.
507

508
    """
509
    mn = self.cfg.GetMasterNode()
510
    return ([mn], [mn])
511

    
512
  def Exec(self, feedback_fn):
513
    """Modifies the node group.
514

515
    """
516
    result = []
517

    
518
    if self.op.ndparams:
519
      self.group.ndparams = self.new_ndparams
520
      result.append(("ndparams", str(self.group.ndparams)))
521

    
522
    if self.op.diskparams:
523
      self.group.diskparams = self.new_diskparams
524
      result.append(("diskparams", str(self.group.diskparams)))
525

    
526
    if self.op.alloc_policy:
527
      self.group.alloc_policy = self.op.alloc_policy
528

    
529
    if self.op.hv_state:
530
      self.group.hv_state_static = self.new_hv_state
531

    
532
    if self.op.disk_state:
533
      self.group.disk_state_static = self.new_disk_state
534

    
535
    if self.op.ipolicy:
536
      self.group.ipolicy = self.new_ipolicy
537

    
538
    self.cfg.Update(self.group, feedback_fn)
539
    return result
540

    
541

    
542
class LUGroupRemove(LogicalUnit):
543
  HPATH = "group-remove"
544
  HTYPE = constants.HTYPE_GROUP
545
  REQ_BGL = False
546

    
547
  def ExpandNames(self):
548
    # This will raises errors.OpPrereqError on its own:
549
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
550
    self.needed_locks = {
551
      locking.LEVEL_NODEGROUP: [self.group_uuid],
552
      }
553

    
554
  def CheckPrereq(self):
555
    """Check prerequisites.
556

557
    This checks that the given group name exists as a node group, that is
558
    empty (i.e., contains no nodes), and that is not the last group of the
559
    cluster.
560

561
    """
562
    # Verify that the group is empty.
563
    group_nodes = [node.uuid
564
                   for node in self.cfg.GetAllNodesInfo().values()
565
                   if node.group == self.group_uuid]
566

    
567
    if group_nodes:
568
      raise errors.OpPrereqError("Group '%s' not empty, has the following"
569
                                 " nodes: %s" %
570
                                 (self.op.group_name,
571
                                  utils.CommaJoin(utils.NiceSort(group_nodes))),
572
                                 errors.ECODE_STATE)
573

    
574
    # Verify the cluster would not be left group-less.
575
    if len(self.cfg.GetNodeGroupList()) == 1:
576
      raise errors.OpPrereqError("Group '%s' is the only group, cannot be"
577
                                 " removed" % self.op.group_name,
578
                                 errors.ECODE_STATE)
579

    
580
  def BuildHooksEnv(self):
581
    """Build hooks env.
582

583
    """
584
    return {
585
      "GROUP_NAME": self.op.group_name,
586
      }
587

    
588
  def BuildHooksNodes(self):
589
    """Build hooks nodes.
590

591
    """
592
    mn = self.cfg.GetMasterNode()
593
    return ([mn], [mn])
594

    
595
  def Exec(self, feedback_fn):
596
    """Remove the node group.
597

598
    """
599
    try:
600
      self.cfg.RemoveNodeGroup(self.group_uuid)
601
    except errors.ConfigurationError:
602
      raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
603
                               (self.op.group_name, self.group_uuid))
604

    
605
    self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
606

    
607

    
608
class LUGroupRename(LogicalUnit):
609
  HPATH = "group-rename"
610
  HTYPE = constants.HTYPE_GROUP
611
  REQ_BGL = False
612

    
613
  def ExpandNames(self):
614
    # This raises errors.OpPrereqError on its own:
615
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
616

    
617
    self.needed_locks = {
618
      locking.LEVEL_NODEGROUP: [self.group_uuid],
619
      }
620

    
621
  def CheckPrereq(self):
622
    """Check prerequisites.
623

624
    Ensures requested new name is not yet used.
625

626
    """
627
    try:
628
      new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
629
    except errors.OpPrereqError:
630
      pass
631
    else:
632
      raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
633
                                 " node group (UUID: %s)" %
634
                                 (self.op.new_name, new_name_uuid),
635
                                 errors.ECODE_EXISTS)
636

    
637
  def BuildHooksEnv(self):
638
    """Build hooks env.
639

640
    """
641
    return {
642
      "OLD_NAME": self.op.group_name,
643
      "NEW_NAME": self.op.new_name,
644
      }
645

    
646
  def BuildHooksNodes(self):
647
    """Build hooks nodes.
648

649
    """
650
    mn = self.cfg.GetMasterNode()
651

    
652
    all_nodes = self.cfg.GetAllNodesInfo()
653
    all_nodes.pop(mn, None)
654

    
655
    run_nodes = [mn]
656
    run_nodes.extend(node.uuid for node in all_nodes.values()
657
                     if node.group == self.group_uuid)
658

    
659
    return (run_nodes, run_nodes)
660

    
661
  def Exec(self, feedback_fn):
662
    """Rename the node group.
663

664
    """
665
    group = self.cfg.GetNodeGroup(self.group_uuid)
666

    
667
    if group is None:
668
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
669
                               (self.op.group_name, self.group_uuid))
670

    
671
    group.name = self.op.new_name
672
    self.cfg.Update(group, feedback_fn)
673

    
674
    return self.op.new_name
675

    
676

    
677
class LUGroupEvacuate(LogicalUnit):
678
  HPATH = "group-evacuate"
679
  HTYPE = constants.HTYPE_GROUP
680
  REQ_BGL = False
681

    
682
  def ExpandNames(self):
683
    # This raises errors.OpPrereqError on its own:
684
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
685

    
686
    if self.op.target_groups:
687
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
688
                                  self.op.target_groups)
689
    else:
690
      self.req_target_uuids = []
691

    
692
    if self.group_uuid in self.req_target_uuids:
693
      raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
694
                                 " as a target group (targets are %s)" %
695
                                 (self.group_uuid,
696
                                  utils.CommaJoin(self.req_target_uuids)),
697
                                 errors.ECODE_INVAL)
698

    
699
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
700

    
701
    self.share_locks = ShareAll()
702
    self.needed_locks = {
703
      locking.LEVEL_INSTANCE: [],
704
      locking.LEVEL_NODEGROUP: [],
705
      locking.LEVEL_NODE: [],
706
      }
707

    
708
  def DeclareLocks(self, level):
709
    if level == locking.LEVEL_INSTANCE:
710
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
711

    
712
      # Lock instances optimistically, needs verification once node and group
713
      # locks have been acquired
714
      self.needed_locks[locking.LEVEL_INSTANCE] = \
715
        self.cfg.GetInstanceNames(
716
          self.cfg.GetNodeGroupInstances(self.group_uuid))
717

    
718
    elif level == locking.LEVEL_NODEGROUP:
719
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
720

    
721
      if self.req_target_uuids:
722
        lock_groups = set([self.group_uuid] + self.req_target_uuids)
723

    
724
        # Lock all groups used by instances optimistically; this requires going
725
        # via the node before it's locked, requiring verification later on
726
        lock_groups.update(group_uuid
727
                           for instance_name in
728
                             self.owned_locks(locking.LEVEL_INSTANCE)
729
                           for group_uuid in
730
                             self.cfg.GetInstanceNodeGroups(
731
                               self.cfg.GetInstanceInfoByName(instance_name)
732
                                 .uuid))
733
      else:
734
        # No target groups, need to lock all of them
735
        lock_groups = locking.ALL_SET
736

    
737
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
738

    
739
    elif level == locking.LEVEL_NODE:
740
      # This will only lock the nodes in the group to be evacuated which
741
      # contain actual instances
742
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
743
      self._LockInstancesNodes()
744

    
745
      # Lock all nodes in group to be evacuated and target groups
746
      owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
747
      assert self.group_uuid in owned_groups
748
      member_node_uuids = [node_uuid
749
                           for group in owned_groups
750
                           for node_uuid in
751
                             self.cfg.GetNodeGroup(group).members]
752
      self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
753

    
754
  def CheckPrereq(self):
755
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
756
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
757
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
758

    
759
    assert owned_groups.issuperset(self.req_target_uuids)
760
    assert self.group_uuid in owned_groups
761

    
762
    # Check if locked instances are still correct
763
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
764

    
765
    # Get instance information
766
    self.instances = \
767
      dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names))
768

    
769
    # Check if node groups for locked instances are still correct
770
    CheckInstancesNodeGroups(self.cfg, self.instances,
771
                             owned_groups, owned_node_uuids, self.group_uuid)
772

    
773
    if self.req_target_uuids:
774
      # User requested specific target groups
775
      self.target_uuids = self.req_target_uuids
776
    else:
777
      # All groups except the one to be evacuated are potential targets
778
      self.target_uuids = [group_uuid for group_uuid in owned_groups
779
                           if group_uuid != self.group_uuid]
780

    
781
      if not self.target_uuids:
782
        raise errors.OpPrereqError("There are no possible target groups",
783
                                   errors.ECODE_INVAL)
784

    
785
  def BuildHooksEnv(self):
786
    """Build hooks env.
787

788
    """
789
    return {
790
      "GROUP_NAME": self.op.group_name,
791
      "TARGET_GROUPS": " ".join(self.target_uuids),
792
      }
793

    
794
  def BuildHooksNodes(self):
795
    """Build hooks nodes.
796

797
    """
798
    mn = self.cfg.GetMasterNode()
799

    
800
    assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
801

    
802
    run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
803

    
804
    return (run_nodes, run_nodes)
805

    
806
  def Exec(self, feedback_fn):
807
    inst_names = list(self.owned_locks(locking.LEVEL_INSTANCE))
808

    
809
    assert self.group_uuid not in self.target_uuids
810

    
811
    req = iallocator.IAReqGroupChange(instances=inst_names,
812
                                      target_groups=self.target_uuids)
813
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
814

    
815
    ial.Run(self.op.iallocator)
816

    
817
    if not ial.success:
818
      raise errors.OpPrereqError("Can't compute group evacuation using"
819
                                 " iallocator '%s': %s" %
820
                                 (self.op.iallocator, ial.info),
821
                                 errors.ECODE_NORES)
822

    
823
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
824

    
825
    self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
826
                 len(jobs), self.op.group_name)
827

    
828
    return ResultWithJobs(jobs)
829

    
830

    
831
class LUGroupVerifyDisks(NoHooksLU):
832
  """Verifies the status of all disks in a node group.
833

834
  """
835
  REQ_BGL = False
836

    
837
  def ExpandNames(self):
838
    # Raises errors.OpPrereqError on its own if group can't be found
839
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
840

    
841
    self.share_locks = ShareAll()
842
    self.needed_locks = {
843
      locking.LEVEL_INSTANCE: [],
844
      locking.LEVEL_NODEGROUP: [],
845
      locking.LEVEL_NODE: [],
846

    
847
      # This opcode is acquires all node locks in a group. LUClusterVerifyDisks
848
      # starts one instance of this opcode for every group, which means all
849
      # nodes will be locked for a short amount of time, so it's better to
850
      # acquire the node allocation lock as well.
851
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
852
      }
853

    
854
  def DeclareLocks(self, level):
855
    if level == locking.LEVEL_INSTANCE:
856
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
857

    
858
      # Lock instances optimistically, needs verification once node and group
859
      # locks have been acquired
860
      self.needed_locks[locking.LEVEL_INSTANCE] = \
861
        self.cfg.GetInstanceNames(
862
          self.cfg.GetNodeGroupInstances(self.group_uuid))
863

    
864
    elif level == locking.LEVEL_NODEGROUP:
865
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
866

    
867
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
868
        set([self.group_uuid] +
869
            # Lock all groups used by instances optimistically; this requires
870
            # going via the node before it's locked, requiring verification
871
            # later on
872
            [group_uuid
873
             for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
874
             for group_uuid in
875
               self.cfg.GetInstanceNodeGroups(
876
                 self.cfg.GetInstanceInfoByName(instance_name).uuid)])
877

    
878
    elif level == locking.LEVEL_NODE:
879
      # This will only lock the nodes in the group to be verified which contain
880
      # actual instances
881
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
882
      self._LockInstancesNodes()
883

    
884
      # Lock all nodes in group to be verified
885
      assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
886
      member_node_uuids = self.cfg.GetNodeGroup(self.group_uuid).members
887
      self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
888

    
889
  def CheckPrereq(self):
890
    owned_inst_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
891
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
892
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
893

    
894
    assert self.group_uuid in owned_groups
895

    
896
    # Check if locked instances are still correct
897
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_inst_names)
898

    
899
    # Get instance information
900
    self.instances = dict(self.cfg.GetMultiInstanceInfoByName(owned_inst_names))
901

    
902
    # Check if node groups for locked instances are still correct
903
    CheckInstancesNodeGroups(self.cfg, self.instances,
904
                             owned_groups, owned_node_uuids, self.group_uuid)
905

    
906
  def _VerifyInstanceLvs(self, node_errors, offline_disk_instance_names,
907
                         missing_disks):
908
    node_lv_to_inst = MapInstanceLvsToNodes(
909
      [inst for inst in self.instances.values() if inst.disks_active])
910
    if node_lv_to_inst:
911
      node_uuids = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
912
                                  set(self.cfg.GetVmCapableNodeList()))
913

    
914
      node_lvs = self.rpc.call_lv_list(node_uuids, [])
915

    
916
      for (node_uuid, node_res) in node_lvs.items():
917
        if node_res.offline:
918
          continue
919

    
920
        msg = node_res.fail_msg
921
        if msg:
922
          logging.warning("Error enumerating LVs on node %s: %s",
923
                          self.cfg.GetNodeName(node_uuid), msg)
924
          node_errors[node_uuid] = msg
925
          continue
926

    
927
        for lv_name, (_, _, lv_online) in node_res.payload.items():
928
          inst = node_lv_to_inst.pop((node_uuid, lv_name), None)
929
          if not lv_online and inst is not None:
930
            offline_disk_instance_names.add(inst.name)
931

    
932
      # any leftover items in nv_dict are missing LVs, let's arrange the data
933
      # better
934
      for key, inst in node_lv_to_inst.iteritems():
935
        missing_disks.setdefault(inst.name, []).append(list(key))
936

    
937
  def _VerifyDrbdStates(self, node_errors, offline_disk_instance_names):
938
    node_to_inst = {}
939
    for inst in self.instances.values():
940
      if not inst.disks_active or inst.disk_template != constants.DT_DRBD8:
941
        continue
942

    
943
      for node_uuid in itertools.chain([inst.primary_node],
944
                                       inst.secondary_nodes):
945
        node_to_inst.setdefault(node_uuid, []).append(inst)
946

    
947
    nodes_ip = dict((uuid, node.secondary_ip) for (uuid, node)
948
                    in self.cfg.GetMultiNodeInfo(node_to_inst.keys()))
949
    for (node_uuid, insts) in node_to_inst.items():
950
      node_disks = [(inst.disks, inst) for inst in insts]
951
      node_res = self.rpc.call_drbd_needs_activation(node_uuid, nodes_ip,
952
                                                     node_disks)
953
      msg = node_res.fail_msg
954
      if msg:
955
        logging.warning("Error getting DRBD status on node %s: %s",
956
                        self.cfg.GetNodeName(node_uuid), msg)
957
        node_errors[node_uuid] = msg
958
        continue
959

    
960
      faulty_disk_uuids = set(node_res.payload)
961
      for inst in self.instances.values():
962
        inst_disk_uuids = set([disk.uuid for disk in inst.disks])
963
        if inst_disk_uuids.intersection(faulty_disk_uuids):
964
          offline_disk_instance_names.add(inst.name)
965

    
966
  def Exec(self, feedback_fn):
967
    """Verify integrity of cluster disks.
968

969
    @rtype: tuple of three items
970
    @return: a tuple of (dict of node-to-node_error, list of instances
971
        which need activate-disks, dict of instance: (node, volume) for
972
        missing volumes
973

974
    """
975
    node_errors = {}
976
    offline_disk_instance_names = set()
977
    missing_disks = {}
978

    
979
    self._VerifyInstanceLvs(node_errors, offline_disk_instance_names,
980
                            missing_disks)
981
    self._VerifyDrbdStates(node_errors, offline_disk_instance_names)
982

    
983
    return (node_errors, list(offline_disk_instance_names), missing_disks)