Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / group.py @ 70b634e6

History | View | Annotate | Download (34.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with node groups."""
23

    
24
import itertools
25
import logging
26

    
27
from ganeti import constants
28
from ganeti import errors
29
from ganeti import locking
30
from ganeti import objects
31
from ganeti import qlang
32
from ganeti import query
33
from ganeti import utils
34
from ganeti.masterd import iallocator
35
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase, \
36
  ResultWithJobs
37
from ganeti.cmdlib.common import MergeAndVerifyHvState, \
38
  MergeAndVerifyDiskState, GetWantedNodes, GetUpdatedParams, \
39
  CheckNodeGroupInstances, GetUpdatedIPolicy, \
40
  ComputeNewInstanceViolations, GetDefaultIAllocator, ShareAll, \
41
  CheckInstancesNodeGroups, LoadNodeEvacResult, MapInstanceLvsToNodes, \
42
  CheckIpolicyVsDiskTemplates, CheckDiskAccessModeValidity, \
43
  CheckDiskAccessModeConsistency
44

    
45
import ganeti.masterd.instance
46

    
47

    
48
class LUGroupAdd(LogicalUnit):
49
  """Logical unit for creating node groups.
50

51
  """
52
  HPATH = "group-add"
53
  HTYPE = constants.HTYPE_GROUP
54
  REQ_BGL = False
55

    
56
  def ExpandNames(self):
57
    # We need the new group's UUID here so that we can create and acquire the
58
    # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
59
    # that it should not check whether the UUID exists in the configuration.
60
    self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
61
    self.needed_locks = {}
62
    self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
63

    
64
  def _CheckIpolicy(self):
65
    """Checks the group's ipolicy for consistency and validity.
66

67
    """
68
    if self.op.ipolicy:
69
      cluster = self.cfg.GetClusterInfo()
70
      full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
71
      try:
72
        objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
73
      except errors.ConfigurationError, err:
74
        raise errors.OpPrereqError("Invalid instance policy: %s" % err,
75
                                   errors.ECODE_INVAL)
76
      CheckIpolicyVsDiskTemplates(full_ipolicy,
77
                                  cluster.enabled_disk_templates)
78

    
79
  def CheckPrereq(self):
80
    """Check prerequisites.
81

82
    This checks that the given group name is not an existing node group
83
    already.
84

85
    """
86
    try:
87
      existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
88
    except errors.OpPrereqError:
89
      pass
90
    else:
91
      raise errors.OpPrereqError("Desired group name '%s' already exists as a"
92
                                 " node group (UUID: %s)" %
93
                                 (self.op.group_name, existing_uuid),
94
                                 errors.ECODE_EXISTS)
95

    
96
    if self.op.ndparams:
97
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
98

    
99
    if self.op.hv_state:
100
      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, None)
101
    else:
102
      self.new_hv_state = None
103

    
104
    if self.op.disk_state:
105
      self.new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, None)
106
    else:
107
      self.new_disk_state = None
108

    
109
    if self.op.diskparams:
110
      for templ in constants.DISK_TEMPLATES:
111
        if templ in self.op.diskparams:
112
          utils.ForceDictType(self.op.diskparams[templ],
113
                              constants.DISK_DT_TYPES)
114
      self.new_diskparams = self.op.diskparams
115
      try:
116
        utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
117
      except errors.OpPrereqError, err:
118
        raise errors.OpPrereqError("While verify diskparams options: %s" % err,
119
                                   errors.ECODE_INVAL)
120
    else:
121
      self.new_diskparams = {}
122

    
123
    self._CheckIpolicy()
124

    
125
  def BuildHooksEnv(self):
126
    """Build hooks env.
127

128
    """
129
    return {
130
      "GROUP_NAME": self.op.group_name,
131
      }
132

    
133
  def BuildHooksNodes(self):
134
    """Build hooks nodes.
135

136
    """
137
    mn = self.cfg.GetMasterNode()
138
    return ([mn], [mn])
139

    
140
  def Exec(self, feedback_fn):
141
    """Add the node group to the cluster.
142

143
    """
144
    group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
145
                                  uuid=self.group_uuid,
146
                                  alloc_policy=self.op.alloc_policy,
147
                                  ndparams=self.op.ndparams,
148
                                  diskparams=self.new_diskparams,
149
                                  ipolicy=self.op.ipolicy,
150
                                  hv_state_static=self.new_hv_state,
151
                                  disk_state_static=self.new_disk_state)
152

    
153
    self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
154
    del self.remove_locks[locking.LEVEL_NODEGROUP]
155

    
156

    
157
class LUGroupAssignNodes(NoHooksLU):
158
  """Logical unit for assigning nodes to groups.
159

160
  """
161
  REQ_BGL = False
162

    
163
  def ExpandNames(self):
164
    # These raise errors.OpPrereqError on their own:
165
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
166
    (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
167

    
168
    # We want to lock all the affected nodes and groups. We have readily
169
    # available the list of nodes, and the *destination* group. To gather the
170
    # list of "source" groups, we need to fetch node information later on.
171
    self.needed_locks = {
172
      locking.LEVEL_NODEGROUP: set([self.group_uuid]),
173
      locking.LEVEL_NODE: self.op.node_uuids,
174
      }
175

    
176
  def DeclareLocks(self, level):
177
    if level == locking.LEVEL_NODEGROUP:
178
      assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
179

    
180
      # Try to get all affected nodes' groups without having the group or node
181
      # lock yet. Needs verification later in the code flow.
182
      groups = self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids)
183

    
184
      self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
185

    
186
  def CheckPrereq(self):
187
    """Check prerequisites.
188

189
    """
190
    assert self.needed_locks[locking.LEVEL_NODEGROUP]
191
    assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
192
            frozenset(self.op.node_uuids))
193

    
194
    expected_locks = (set([self.group_uuid]) |
195
                      self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids))
196
    actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
197
    if actual_locks != expected_locks:
198
      raise errors.OpExecError("Nodes changed groups since locks were acquired,"
199
                               " current groups are '%s', used to be '%s'" %
200
                               (utils.CommaJoin(expected_locks),
201
                                utils.CommaJoin(actual_locks)))
202

    
203
    self.node_data = self.cfg.GetAllNodesInfo()
204
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
205
    instance_data = self.cfg.GetAllInstancesInfo()
206

    
207
    if self.group is None:
208
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
209
                               (self.op.group_name, self.group_uuid))
210

    
211
    (new_splits, previous_splits) = \
212
      self.CheckAssignmentForSplitInstances([(uuid, self.group_uuid)
213
                                             for uuid in self.op.node_uuids],
214
                                            self.node_data, instance_data)
215

    
216
    if new_splits:
217
      fmt_new_splits = utils.CommaJoin(utils.NiceSort(
218
                         self.cfg.GetInstanceNames(new_splits)))
219

    
220
      if not self.op.force:
221
        raise errors.OpExecError("The following instances get split by this"
222
                                 " change and --force was not given: %s" %
223
                                 fmt_new_splits)
224
      else:
225
        self.LogWarning("This operation will split the following instances: %s",
226
                        fmt_new_splits)
227

    
228
        if previous_splits:
229
          self.LogWarning("In addition, these already-split instances continue"
230
                          " to be split across groups: %s",
231
                          utils.CommaJoin(utils.NiceSort(
232
                            self.cfg.GetInstanceNames(previous_splits))))
233

    
234
  def Exec(self, feedback_fn):
235
    """Assign nodes to a new group.
236

237
    """
238
    mods = [(node_uuid, self.group_uuid) for node_uuid in self.op.node_uuids]
239

    
240
    self.cfg.AssignGroupNodes(mods)
241

    
242
  @staticmethod
243
  def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
244
    """Check for split instances after a node assignment.
245

246
    This method considers a series of node assignments as an atomic operation,
247
    and returns information about split instances after applying the set of
248
    changes.
249

250
    In particular, it returns information about newly split instances, and
251
    instances that were already split, and remain so after the change.
252

253
    Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
254
    considered.
255

256
    @type changes: list of (node_uuid, new_group_uuid) pairs.
257
    @param changes: list of node assignments to consider.
258
    @param node_data: a dict with data for all nodes
259
    @param instance_data: a dict with all instances to consider
260
    @rtype: a two-tuple
261
    @return: a list of instances that were previously okay and result split as a
262
      consequence of this change, and a list of instances that were previously
263
      split and this change does not fix.
264

265
    """
266
    changed_nodes = dict((uuid, group) for uuid, group in changes
267
                         if node_data[uuid].group != group)
268

    
269
    all_split_instances = set()
270
    previously_split_instances = set()
271

    
272
    for inst in instance_data.values():
273
      if inst.disk_template not in constants.DTS_INT_MIRROR:
274
        continue
275

    
276
      if len(set(node_data[node_uuid].group
277
                 for node_uuid in inst.all_nodes)) > 1:
278
        previously_split_instances.add(inst.uuid)
279

    
280
      if len(set(changed_nodes.get(node_uuid, node_data[node_uuid].group)
281
                 for node_uuid in inst.all_nodes)) > 1:
282
        all_split_instances.add(inst.uuid)
283

    
284
    return (list(all_split_instances - previously_split_instances),
285
            list(previously_split_instances & all_split_instances))
286

    
287

    
288
class GroupQuery(QueryBase):
289
  FIELDS = query.GROUP_FIELDS
290

    
291
  def ExpandNames(self, lu):
292
    lu.needed_locks = {}
293

    
294
    self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
295
    self._cluster = lu.cfg.GetClusterInfo()
296
    name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
297

    
298
    if not self.names:
299
      self.wanted = [name_to_uuid[name]
300
                     for name in utils.NiceSort(name_to_uuid.keys())]
301
    else:
302
      # Accept names to be either names or UUIDs.
303
      missing = []
304
      self.wanted = []
305
      all_uuid = frozenset(self._all_groups.keys())
306

    
307
      for name in self.names:
308
        if name in all_uuid:
309
          self.wanted.append(name)
310
        elif name in name_to_uuid:
311
          self.wanted.append(name_to_uuid[name])
312
        else:
313
          missing.append(name)
314

    
315
      if missing:
316
        raise errors.OpPrereqError("Some groups do not exist: %s" %
317
                                   utils.CommaJoin(missing),
318
                                   errors.ECODE_NOENT)
319

    
320
  def DeclareLocks(self, lu, level):
321
    pass
322

    
323
  def _GetQueryData(self, lu):
324
    """Computes the list of node groups and their attributes.
325

326
    """
327
    do_nodes = query.GQ_NODE in self.requested_data
328
    do_instances = query.GQ_INST in self.requested_data
329

    
330
    group_to_nodes = None
331
    group_to_instances = None
332

    
333
    # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
334
    # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
335
    # latter GetAllInstancesInfo() is not enough, for we have to go through
336
    # instance->node. Hence, we will need to process nodes even if we only need
337
    # instance information.
338
    if do_nodes or do_instances:
339
      all_nodes = lu.cfg.GetAllNodesInfo()
340
      group_to_nodes = dict((uuid, []) for uuid in self.wanted)
341
      node_to_group = {}
342

    
343
      for node in all_nodes.values():
344
        if node.group in group_to_nodes:
345
          group_to_nodes[node.group].append(node.uuid)
346
          node_to_group[node.uuid] = node.group
347

    
348
      if do_instances:
349
        all_instances = lu.cfg.GetAllInstancesInfo()
350
        group_to_instances = dict((uuid, []) for uuid in self.wanted)
351

    
352
        for instance in all_instances.values():
353
          node = instance.primary_node
354
          if node in node_to_group:
355
            group_to_instances[node_to_group[node]].append(instance.uuid)
356

    
357
        if not do_nodes:
358
          # Do not pass on node information if it was not requested.
359
          group_to_nodes = None
360

    
361
    return query.GroupQueryData(self._cluster,
362
                                [self._all_groups[uuid]
363
                                 for uuid in self.wanted],
364
                                group_to_nodes, group_to_instances,
365
                                query.GQ_DISKPARAMS in self.requested_data)
366

    
367

    
368
class LUGroupQuery(NoHooksLU):
369
  """Logical unit for querying node groups.
370

371
  """
372
  REQ_BGL = False
373

    
374
  def CheckArguments(self):
375
    self.gq = GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
376
                          self.op.output_fields, False)
377

    
378
  def ExpandNames(self):
379
    self.gq.ExpandNames(self)
380

    
381
  def DeclareLocks(self, level):
382
    self.gq.DeclareLocks(self, level)
383

    
384
  def Exec(self, feedback_fn):
385
    return self.gq.OldStyleQuery(self)
386

    
387

    
388
class LUGroupSetParams(LogicalUnit):
389
  """Modifies the parameters of a node group.
390

391
  """
392
  HPATH = "group-modify"
393
  HTYPE = constants.HTYPE_GROUP
394
  REQ_BGL = False
395

    
396
  def CheckArguments(self):
397
    all_changes = [
398
      self.op.ndparams,
399
      self.op.diskparams,
400
      self.op.alloc_policy,
401
      self.op.hv_state,
402
      self.op.disk_state,
403
      self.op.ipolicy,
404
      ]
405

    
406
    if all_changes.count(None) == len(all_changes):
407
      raise errors.OpPrereqError("Please pass at least one modification",
408
                                 errors.ECODE_INVAL)
409

    
410
    if self.op.diskparams:
411
      CheckDiskAccessModeValidity(self.op.diskparams)
412

    
413
  def ExpandNames(self):
414
    # This raises errors.OpPrereqError on its own:
415
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
416

    
417
    self.needed_locks = {
418
      locking.LEVEL_INSTANCE: [],
419
      locking.LEVEL_NODEGROUP: [self.group_uuid],
420
      }
421

    
422
    self.share_locks[locking.LEVEL_INSTANCE] = 1
423

    
424
  def DeclareLocks(self, level):
425
    if level == locking.LEVEL_INSTANCE:
426
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
427

    
428
      # Lock instances optimistically, needs verification once group lock has
429
      # been acquired
430
      self.needed_locks[locking.LEVEL_INSTANCE] = \
431
        self.cfg.GetInstanceNames(
432
          self.cfg.GetNodeGroupInstances(self.group_uuid))
433

    
434
  @staticmethod
435
  def _UpdateAndVerifyDiskParams(old, new):
436
    """Updates and verifies disk parameters.
437

438
    """
439
    new_params = GetUpdatedParams(old, new)
440
    utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
441
    return new_params
442

    
443
  def _CheckIpolicy(self, cluster, owned_instance_names):
444
    """Sanity checks for the ipolicy.
445

446
    @type cluster: C{objects.Cluster}
447
    @param cluster: the cluster's configuration
448
    @type owned_instance_names: list of string
449
    @param owned_instance_names: list of instances
450

451
    """
452
    if self.op.ipolicy:
453
      self.new_ipolicy = GetUpdatedIPolicy(self.group.ipolicy,
454
                                           self.op.ipolicy,
455
                                           group_policy=True)
456

    
457
      new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
458
      CheckIpolicyVsDiskTemplates(new_ipolicy,
459
                                  cluster.enabled_disk_templates)
460
      instances = \
461
        dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names))
462
      gmi = ganeti.masterd.instance
463
      violations = \
464
          ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
465
                                                                 self.group),
466
                                       new_ipolicy, instances.values(),
467
                                       self.cfg)
468

    
469
      if violations:
470
        self.LogWarning("After the ipolicy change the following instances"
471
                        " violate them: %s",
472
                        utils.CommaJoin(violations))
473

    
474
  def CheckPrereq(self):
475
    """Check prerequisites.
476

477
    """
478
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
479

    
480
    # Check if locked instances are still correct
481
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
482

    
483
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
484
    cluster = self.cfg.GetClusterInfo()
485

    
486
    if self.group is None:
487
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
488
                               (self.op.group_name, self.group_uuid))
489

    
490
    if self.op.ndparams:
491
      new_ndparams = GetUpdatedParams(self.group.ndparams, self.op.ndparams)
492
      utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
493
      self.new_ndparams = new_ndparams
494

    
495
    if self.op.diskparams:
496
      diskparams = self.group.diskparams
497
      uavdp = self._UpdateAndVerifyDiskParams
498
      # For each disktemplate subdict update and verify the values
499
      new_diskparams = dict((dt,
500
                             uavdp(diskparams.get(dt, {}),
501
                                   self.op.diskparams[dt]))
502
                            for dt in constants.DISK_TEMPLATES
503
                            if dt in self.op.diskparams)
504
      # As we've all subdicts of diskparams ready, lets merge the actual
505
      # dict with all updated subdicts
506
      self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
507

    
508
      try:
509
        utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
510
        CheckDiskAccessModeConsistency(self.new_diskparams, self.cfg,
511
                                       group=self.group)
512
      except errors.OpPrereqError, err:
513
        raise errors.OpPrereqError("While verify diskparams options: %s" % err,
514
                                   errors.ECODE_INVAL)
515

    
516
    if self.op.hv_state:
517
      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
518
                                                self.group.hv_state_static)
519

    
520
    if self.op.disk_state:
521
      self.new_disk_state = \
522
        MergeAndVerifyDiskState(self.op.disk_state,
523
                                self.group.disk_state_static)
524

    
525
    self._CheckIpolicy(cluster, owned_instance_names)
526

    
527
  def BuildHooksEnv(self):
528
    """Build hooks env.
529

530
    """
531
    return {
532
      "GROUP_NAME": self.op.group_name,
533
      "NEW_ALLOC_POLICY": self.op.alloc_policy,
534
      }
535

    
536
  def BuildHooksNodes(self):
537
    """Build hooks nodes.
538

539
    """
540
    mn = self.cfg.GetMasterNode()
541
    return ([mn], [mn])
542

    
543
  def Exec(self, feedback_fn):
544
    """Modifies the node group.
545

546
    """
547
    result = []
548

    
549
    if self.op.ndparams:
550
      self.group.ndparams = self.new_ndparams
551
      result.append(("ndparams", str(self.group.ndparams)))
552

    
553
    if self.op.diskparams:
554
      self.group.diskparams = self.new_diskparams
555
      result.append(("diskparams", str(self.group.diskparams)))
556

    
557
    if self.op.alloc_policy:
558
      self.group.alloc_policy = self.op.alloc_policy
559

    
560
    if self.op.hv_state:
561
      self.group.hv_state_static = self.new_hv_state
562

    
563
    if self.op.disk_state:
564
      self.group.disk_state_static = self.new_disk_state
565

    
566
    if self.op.ipolicy:
567
      self.group.ipolicy = self.new_ipolicy
568

    
569
    self.cfg.Update(self.group, feedback_fn)
570
    return result
571

    
572

    
573
class LUGroupRemove(LogicalUnit):
574
  HPATH = "group-remove"
575
  HTYPE = constants.HTYPE_GROUP
576
  REQ_BGL = False
577

    
578
  def ExpandNames(self):
579
    # This will raises errors.OpPrereqError on its own:
580
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
581
    self.needed_locks = {
582
      locking.LEVEL_NODEGROUP: [self.group_uuid],
583
      }
584

    
585
  def CheckPrereq(self):
586
    """Check prerequisites.
587

588
    This checks that the given group name exists as a node group, that is
589
    empty (i.e., contains no nodes), and that is not the last group of the
590
    cluster.
591

592
    """
593
    # Verify that the group is empty.
594
    group_nodes = [node.uuid
595
                   for node in self.cfg.GetAllNodesInfo().values()
596
                   if node.group == self.group_uuid]
597

    
598
    if group_nodes:
599
      raise errors.OpPrereqError("Group '%s' not empty, has the following"
600
                                 " nodes: %s" %
601
                                 (self.op.group_name,
602
                                  utils.CommaJoin(utils.NiceSort(group_nodes))),
603
                                 errors.ECODE_STATE)
604

    
605
    # Verify the cluster would not be left group-less.
606
    if len(self.cfg.GetNodeGroupList()) == 1:
607
      raise errors.OpPrereqError("Group '%s' is the only group, cannot be"
608
                                 " removed" % self.op.group_name,
609
                                 errors.ECODE_STATE)
610

    
611
  def BuildHooksEnv(self):
612
    """Build hooks env.
613

614
    """
615
    return {
616
      "GROUP_NAME": self.op.group_name,
617
      }
618

    
619
  def BuildHooksNodes(self):
620
    """Build hooks nodes.
621

622
    """
623
    mn = self.cfg.GetMasterNode()
624
    return ([mn], [mn])
625

    
626
  def Exec(self, feedback_fn):
627
    """Remove the node group.
628

629
    """
630
    try:
631
      self.cfg.RemoveNodeGroup(self.group_uuid)
632
    except errors.ConfigurationError:
633
      raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
634
                               (self.op.group_name, self.group_uuid))
635

    
636
    self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
637

    
638

    
639
class LUGroupRename(LogicalUnit):
640
  HPATH = "group-rename"
641
  HTYPE = constants.HTYPE_GROUP
642
  REQ_BGL = False
643

    
644
  def ExpandNames(self):
645
    # This raises errors.OpPrereqError on its own:
646
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
647

    
648
    self.needed_locks = {
649
      locking.LEVEL_NODEGROUP: [self.group_uuid],
650
      }
651

    
652
  def CheckPrereq(self):
653
    """Check prerequisites.
654

655
    Ensures requested new name is not yet used.
656

657
    """
658
    try:
659
      new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
660
    except errors.OpPrereqError:
661
      pass
662
    else:
663
      raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
664
                                 " node group (UUID: %s)" %
665
                                 (self.op.new_name, new_name_uuid),
666
                                 errors.ECODE_EXISTS)
667

    
668
  def BuildHooksEnv(self):
669
    """Build hooks env.
670

671
    """
672
    return {
673
      "OLD_NAME": self.op.group_name,
674
      "NEW_NAME": self.op.new_name,
675
      }
676

    
677
  def BuildHooksNodes(self):
678
    """Build hooks nodes.
679

680
    """
681
    mn = self.cfg.GetMasterNode()
682

    
683
    all_nodes = self.cfg.GetAllNodesInfo()
684
    all_nodes.pop(mn, None)
685

    
686
    run_nodes = [mn]
687
    run_nodes.extend(node.uuid for node in all_nodes.values()
688
                     if node.group == self.group_uuid)
689

    
690
    return (run_nodes, run_nodes)
691

    
692
  def Exec(self, feedback_fn):
693
    """Rename the node group.
694

695
    """
696
    group = self.cfg.GetNodeGroup(self.group_uuid)
697

    
698
    if group is None:
699
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
700
                               (self.op.group_name, self.group_uuid))
701

    
702
    group.name = self.op.new_name
703
    self.cfg.Update(group, feedback_fn)
704

    
705
    return self.op.new_name
706

    
707

    
708
class LUGroupEvacuate(LogicalUnit):
709
  HPATH = "group-evacuate"
710
  HTYPE = constants.HTYPE_GROUP
711
  REQ_BGL = False
712

    
713
  def ExpandNames(self):
714
    # This raises errors.OpPrereqError on its own:
715
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
716

    
717
    if self.op.target_groups:
718
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
719
                                  self.op.target_groups)
720
    else:
721
      self.req_target_uuids = []
722

    
723
    if self.group_uuid in self.req_target_uuids:
724
      raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
725
                                 " as a target group (targets are %s)" %
726
                                 (self.group_uuid,
727
                                  utils.CommaJoin(self.req_target_uuids)),
728
                                 errors.ECODE_INVAL)
729

    
730
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
731

    
732
    self.share_locks = ShareAll()
733
    self.needed_locks = {
734
      locking.LEVEL_INSTANCE: [],
735
      locking.LEVEL_NODEGROUP: [],
736
      locking.LEVEL_NODE: [],
737
      }
738

    
739
  def DeclareLocks(self, level):
740
    if level == locking.LEVEL_INSTANCE:
741
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
742

    
743
      # Lock instances optimistically, needs verification once node and group
744
      # locks have been acquired
745
      self.needed_locks[locking.LEVEL_INSTANCE] = \
746
        self.cfg.GetInstanceNames(
747
          self.cfg.GetNodeGroupInstances(self.group_uuid))
748

    
749
    elif level == locking.LEVEL_NODEGROUP:
750
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
751

    
752
      if self.req_target_uuids:
753
        lock_groups = set([self.group_uuid] + self.req_target_uuids)
754

    
755
        # Lock all groups used by instances optimistically; this requires going
756
        # via the node before it's locked, requiring verification later on
757
        lock_groups.update(group_uuid
758
                           for instance_name in
759
                             self.owned_locks(locking.LEVEL_INSTANCE)
760
                           for group_uuid in
761
                             self.cfg.GetInstanceNodeGroups(
762
                               self.cfg.GetInstanceInfoByName(instance_name)
763
                                 .uuid))
764
      else:
765
        # No target groups, need to lock all of them
766
        lock_groups = locking.ALL_SET
767

    
768
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
769

    
770
    elif level == locking.LEVEL_NODE:
771
      # This will only lock the nodes in the group to be evacuated which
772
      # contain actual instances
773
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
774
      self._LockInstancesNodes()
775

    
776
      # Lock all nodes in group to be evacuated and target groups
777
      owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
778
      assert self.group_uuid in owned_groups
779
      member_node_uuids = [node_uuid
780
                           for group in owned_groups
781
                           for node_uuid in
782
                             self.cfg.GetNodeGroup(group).members]
783
      self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
784

    
785
  def CheckPrereq(self):
786
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
787
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
788
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
789

    
790
    assert owned_groups.issuperset(self.req_target_uuids)
791
    assert self.group_uuid in owned_groups
792

    
793
    # Check if locked instances are still correct
794
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
795

    
796
    # Get instance information
797
    self.instances = \
798
      dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names))
799

    
800
    # Check if node groups for locked instances are still correct
801
    CheckInstancesNodeGroups(self.cfg, self.instances,
802
                             owned_groups, owned_node_uuids, self.group_uuid)
803

    
804
    if self.req_target_uuids:
805
      # User requested specific target groups
806
      self.target_uuids = self.req_target_uuids
807
    else:
808
      # All groups except the one to be evacuated are potential targets
809
      self.target_uuids = [group_uuid for group_uuid in owned_groups
810
                           if group_uuid != self.group_uuid]
811

    
812
      if not self.target_uuids:
813
        raise errors.OpPrereqError("There are no possible target groups",
814
                                   errors.ECODE_INVAL)
815

    
816
  def BuildHooksEnv(self):
817
    """Build hooks env.
818

819
    """
820
    return {
821
      "GROUP_NAME": self.op.group_name,
822
      "TARGET_GROUPS": " ".join(self.target_uuids),
823
      }
824

    
825
  def BuildHooksNodes(self):
826
    """Build hooks nodes.
827

828
    """
829
    mn = self.cfg.GetMasterNode()
830

    
831
    assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
832

    
833
    run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
834

    
835
    return (run_nodes, run_nodes)
836

    
837
  def Exec(self, feedback_fn):
838
    inst_names = list(self.owned_locks(locking.LEVEL_INSTANCE))
839

    
840
    assert self.group_uuid not in self.target_uuids
841

    
842
    req = iallocator.IAReqGroupChange(instances=inst_names,
843
                                      target_groups=self.target_uuids)
844
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
845

    
846
    ial.Run(self.op.iallocator)
847

    
848
    if not ial.success:
849
      raise errors.OpPrereqError("Can't compute group evacuation using"
850
                                 " iallocator '%s': %s" %
851
                                 (self.op.iallocator, ial.info),
852
                                 errors.ECODE_NORES)
853

    
854
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
855

    
856
    self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
857
                 len(jobs), self.op.group_name)
858

    
859
    return ResultWithJobs(jobs)
860

    
861

    
862
class LUGroupVerifyDisks(NoHooksLU):
863
  """Verifies the status of all disks in a node group.
864

865
  """
866
  REQ_BGL = False
867

    
868
  def ExpandNames(self):
869
    # Raises errors.OpPrereqError on its own if group can't be found
870
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
871

    
872
    self.share_locks = ShareAll()
873
    self.needed_locks = {
874
      locking.LEVEL_INSTANCE: [],
875
      locking.LEVEL_NODEGROUP: [],
876
      locking.LEVEL_NODE: [],
877

    
878
      # This opcode is acquires all node locks in a group. LUClusterVerifyDisks
879
      # starts one instance of this opcode for every group, which means all
880
      # nodes will be locked for a short amount of time, so it's better to
881
      # acquire the node allocation lock as well.
882
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
883
      }
884

    
885
  def DeclareLocks(self, level):
886
    if level == locking.LEVEL_INSTANCE:
887
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
888

    
889
      # Lock instances optimistically, needs verification once node and group
890
      # locks have been acquired
891
      self.needed_locks[locking.LEVEL_INSTANCE] = \
892
        self.cfg.GetInstanceNames(
893
          self.cfg.GetNodeGroupInstances(self.group_uuid))
894

    
895
    elif level == locking.LEVEL_NODEGROUP:
896
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
897

    
898
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
899
        set([self.group_uuid] +
900
            # Lock all groups used by instances optimistically; this requires
901
            # going via the node before it's locked, requiring verification
902
            # later on
903
            [group_uuid
904
             for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
905
             for group_uuid in
906
               self.cfg.GetInstanceNodeGroups(
907
                 self.cfg.GetInstanceInfoByName(instance_name).uuid)])
908

    
909
    elif level == locking.LEVEL_NODE:
910
      # This will only lock the nodes in the group to be verified which contain
911
      # actual instances
912
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
913
      self._LockInstancesNodes()
914

    
915
      # Lock all nodes in group to be verified
916
      assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
917
      member_node_uuids = self.cfg.GetNodeGroup(self.group_uuid).members
918
      self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
919

    
920
  def CheckPrereq(self):
921
    owned_inst_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
922
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
923
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
924

    
925
    assert self.group_uuid in owned_groups
926

    
927
    # Check if locked instances are still correct
928
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_inst_names)
929

    
930
    # Get instance information
931
    self.instances = dict(self.cfg.GetMultiInstanceInfoByName(owned_inst_names))
932

    
933
    # Check if node groups for locked instances are still correct
934
    CheckInstancesNodeGroups(self.cfg, self.instances,
935
                             owned_groups, owned_node_uuids, self.group_uuid)
936

    
937
  def _VerifyInstanceLvs(self, node_errors, offline_disk_instance_names,
938
                         missing_disks):
939
    node_lv_to_inst = MapInstanceLvsToNodes(
940
      [inst for inst in self.instances.values() if inst.disks_active])
941
    if node_lv_to_inst:
942
      node_uuids = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
943
                                  set(self.cfg.GetVmCapableNodeList()))
944

    
945
      node_lvs = self.rpc.call_lv_list(node_uuids, [])
946

    
947
      for (node_uuid, node_res) in node_lvs.items():
948
        if node_res.offline:
949
          continue
950

    
951
        msg = node_res.fail_msg
952
        if msg:
953
          logging.warning("Error enumerating LVs on node %s: %s",
954
                          self.cfg.GetNodeName(node_uuid), msg)
955
          node_errors[node_uuid] = msg
956
          continue
957

    
958
        for lv_name, (_, _, lv_online) in node_res.payload.items():
959
          inst = node_lv_to_inst.pop((node_uuid, lv_name), None)
960
          if not lv_online and inst is not None:
961
            offline_disk_instance_names.add(inst.name)
962

    
963
      # any leftover items in nv_dict are missing LVs, let's arrange the data
964
      # better
965
      for key, inst in node_lv_to_inst.iteritems():
966
        missing_disks.setdefault(inst.name, []).append(list(key))
967

    
968
  def _VerifyDrbdStates(self, node_errors, offline_disk_instance_names):
969
    node_to_inst = {}
970
    for inst in self.instances.values():
971
      if not inst.disks_active or inst.disk_template != constants.DT_DRBD8:
972
        continue
973

    
974
      for node_uuid in itertools.chain([inst.primary_node],
975
                                       inst.secondary_nodes):
976
        node_to_inst.setdefault(node_uuid, []).append(inst)
977

    
978
    for (node_uuid, insts) in node_to_inst.items():
979
      node_disks = [(inst.disks, inst) for inst in insts]
980
      node_res = self.rpc.call_drbd_needs_activation(node_uuid, node_disks)
981
      msg = node_res.fail_msg
982
      if msg:
983
        logging.warning("Error getting DRBD status on node %s: %s",
984
                        self.cfg.GetNodeName(node_uuid), msg)
985
        node_errors[node_uuid] = msg
986
        continue
987

    
988
      faulty_disk_uuids = set(node_res.payload)
989
      for inst in self.instances.values():
990
        inst_disk_uuids = set([disk.uuid for disk in inst.disks])
991
        if inst_disk_uuids.intersection(faulty_disk_uuids):
992
          offline_disk_instance_names.add(inst.name)
993

    
994
  def Exec(self, feedback_fn):
995
    """Verify integrity of cluster disks.
996

997
    @rtype: tuple of three items
998
    @return: a tuple of (dict of node-to-node_error, list of instances
999
        which need activate-disks, dict of instance: (node, volume) for
1000
        missing volumes
1001

1002
    """
1003
    node_errors = {}
1004
    offline_disk_instance_names = set()
1005
    missing_disks = {}
1006

    
1007
    self._VerifyInstanceLvs(node_errors, offline_disk_instance_names,
1008
                            missing_disks)
1009
    self._VerifyDrbdStates(node_errors, offline_disk_instance_names)
1010

    
1011
    return (node_errors, list(offline_disk_instance_names), missing_disks)