Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / group.py @ 0c3d9c7c

History | View | Annotate | Download (34.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with node groups."""
23

    
24
import itertools
25
import logging
26

    
27
from ganeti import constants
28
from ganeti import errors
29
from ganeti import locking
30
from ganeti import objects
31
from ganeti import qlang
32
from ganeti import query
33
from ganeti import utils
34
from ganeti.masterd import iallocator
35
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase, \
36
  ResultWithJobs
37
from ganeti.cmdlib.common import MergeAndVerifyHvState, \
38
  MergeAndVerifyDiskState, GetWantedNodes, GetUpdatedParams, \
39
  CheckNodeGroupInstances, GetUpdatedIPolicy, \
40
  ComputeNewInstanceViolations, GetDefaultIAllocator, ShareAll, \
41
  CheckInstancesNodeGroups, LoadNodeEvacResult, MapInstanceLvsToNodes, \
42
  CheckIpolicyVsDiskTemplates
43

    
44
import ganeti.masterd.instance
45

    
46

    
47
class LUGroupAdd(LogicalUnit):
48
  """Logical unit for creating node groups.
49

50
  """
51
  HPATH = "group-add"
52
  HTYPE = constants.HTYPE_GROUP
53
  REQ_BGL = False
54

    
55
  def ExpandNames(self):
56
    # We need the new group's UUID here so that we can create and acquire the
57
    # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
58
    # that it should not check whether the UUID exists in the configuration.
59
    self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
60
    self.needed_locks = {}
61
    self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
62

    
63
  def _CheckIpolicy(self):
64
    """Checks the group's ipolicy for consistency and validity.
65

66
    """
67
    if self.op.ipolicy:
68
      cluster = self.cfg.GetClusterInfo()
69
      full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
70
      try:
71
        objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
72
      except errors.ConfigurationError, err:
73
        raise errors.OpPrereqError("Invalid instance policy: %s" % err,
74
                                   errors.ECODE_INVAL)
75
      CheckIpolicyVsDiskTemplates(full_ipolicy,
76
                                  cluster.enabled_disk_templates)
77

    
78
  def CheckPrereq(self):
79
    """Check prerequisites.
80

81
    This checks that the given group name is not an existing node group
82
    already.
83

84
    """
85
    try:
86
      existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
87
    except errors.OpPrereqError:
88
      pass
89
    else:
90
      raise errors.OpPrereqError("Desired group name '%s' already exists as a"
91
                                 " node group (UUID: %s)" %
92
                                 (self.op.group_name, existing_uuid),
93
                                 errors.ECODE_EXISTS)
94

    
95
    if self.op.ndparams:
96
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
97

    
98
    if self.op.hv_state:
99
      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, None)
100
    else:
101
      self.new_hv_state = None
102

    
103
    if self.op.disk_state:
104
      self.new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, None)
105
    else:
106
      self.new_disk_state = None
107

    
108
    if self.op.diskparams:
109
      for templ in constants.DISK_TEMPLATES:
110
        if templ in self.op.diskparams:
111
          utils.ForceDictType(self.op.diskparams[templ],
112
                              constants.DISK_DT_TYPES)
113
      self.new_diskparams = self.op.diskparams
114
      try:
115
        utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
116
      except errors.OpPrereqError, err:
117
        raise errors.OpPrereqError("While verify diskparams options: %s" % err,
118
                                   errors.ECODE_INVAL)
119
    else:
120
      self.new_diskparams = {}
121

    
122
    self._CheckIpolicy()
123

    
124
  def BuildHooksEnv(self):
125
    """Build hooks env.
126

127
    """
128
    return {
129
      "GROUP_NAME": self.op.group_name,
130
      }
131

    
132
  def BuildHooksNodes(self):
133
    """Build hooks nodes.
134

135
    """
136
    mn = self.cfg.GetMasterNode()
137
    return ([mn], [mn])
138

    
139
  def Exec(self, feedback_fn):
140
    """Add the node group to the cluster.
141

142
    """
143
    group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
144
                                  uuid=self.group_uuid,
145
                                  alloc_policy=self.op.alloc_policy,
146
                                  ndparams=self.op.ndparams,
147
                                  diskparams=self.new_diskparams,
148
                                  ipolicy=self.op.ipolicy,
149
                                  hv_state_static=self.new_hv_state,
150
                                  disk_state_static=self.new_disk_state)
151

    
152
    self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
153
    del self.remove_locks[locking.LEVEL_NODEGROUP]
154

    
155

    
156
class LUGroupAssignNodes(NoHooksLU):
157
  """Logical unit for assigning nodes to groups.
158

159
  """
160
  REQ_BGL = False
161

    
162
  def ExpandNames(self):
163
    # These raise errors.OpPrereqError on their own:
164
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
165
    (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
166

    
167
    # We want to lock all the affected nodes and groups. We have readily
168
    # available the list of nodes, and the *destination* group. To gather the
169
    # list of "source" groups, we need to fetch node information later on.
170
    self.needed_locks = {
171
      locking.LEVEL_NODEGROUP: set([self.group_uuid]),
172
      locking.LEVEL_NODE: self.op.node_uuids,
173
      }
174

    
175
  def DeclareLocks(self, level):
176
    if level == locking.LEVEL_NODEGROUP:
177
      assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
178

    
179
      # Try to get all affected nodes' groups without having the group or node
180
      # lock yet. Needs verification later in the code flow.
181
      groups = self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids)
182

    
183
      self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
184

    
185
  def CheckPrereq(self):
186
    """Check prerequisites.
187

188
    """
189
    assert self.needed_locks[locking.LEVEL_NODEGROUP]
190
    assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
191
            frozenset(self.op.node_uuids))
192

    
193
    expected_locks = (set([self.group_uuid]) |
194
                      self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids))
195
    actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
196
    if actual_locks != expected_locks:
197
      raise errors.OpExecError("Nodes changed groups since locks were acquired,"
198
                               " current groups are '%s', used to be '%s'" %
199
                               (utils.CommaJoin(expected_locks),
200
                                utils.CommaJoin(actual_locks)))
201

    
202
    self.node_data = self.cfg.GetAllNodesInfo()
203
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
204
    instance_data = self.cfg.GetAllInstancesInfo()
205

    
206
    if self.group is None:
207
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
208
                               (self.op.group_name, self.group_uuid))
209

    
210
    (new_splits, previous_splits) = \
211
      self.CheckAssignmentForSplitInstances([(uuid, self.group_uuid)
212
                                             for uuid in self.op.node_uuids],
213
                                            self.node_data, instance_data)
214

    
215
    if new_splits:
216
      fmt_new_splits = utils.CommaJoin(utils.NiceSort(
217
                         self.cfg.GetInstanceNames(new_splits)))
218

    
219
      if not self.op.force:
220
        raise errors.OpExecError("The following instances get split by this"
221
                                 " change and --force was not given: %s" %
222
                                 fmt_new_splits)
223
      else:
224
        self.LogWarning("This operation will split the following instances: %s",
225
                        fmt_new_splits)
226

    
227
        if previous_splits:
228
          self.LogWarning("In addition, these already-split instances continue"
229
                          " to be split across groups: %s",
230
                          utils.CommaJoin(utils.NiceSort(
231
                            self.cfg.GetInstanceNames(previous_splits))))
232

    
233
  def Exec(self, feedback_fn):
234
    """Assign nodes to a new group.
235

236
    """
237
    mods = [(node_uuid, self.group_uuid) for node_uuid in self.op.node_uuids]
238

    
239
    self.cfg.AssignGroupNodes(mods)
240

    
241
  @staticmethod
242
  def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
243
    """Check for split instances after a node assignment.
244

245
    This method considers a series of node assignments as an atomic operation,
246
    and returns information about split instances after applying the set of
247
    changes.
248

249
    In particular, it returns information about newly split instances, and
250
    instances that were already split, and remain so after the change.
251

252
    Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
253
    considered.
254

255
    @type changes: list of (node_uuid, new_group_uuid) pairs.
256
    @param changes: list of node assignments to consider.
257
    @param node_data: a dict with data for all nodes
258
    @param instance_data: a dict with all instances to consider
259
    @rtype: a two-tuple
260
    @return: a list of instances that were previously okay and result split as a
261
      consequence of this change, and a list of instances that were previously
262
      split and this change does not fix.
263

264
    """
265
    changed_nodes = dict((uuid, group) for uuid, group in changes
266
                         if node_data[uuid].group != group)
267

    
268
    all_split_instances = set()
269
    previously_split_instances = set()
270

    
271
    for inst in instance_data.values():
272
      if inst.disk_template not in constants.DTS_INT_MIRROR:
273
        continue
274

    
275
      if len(set(node_data[node_uuid].group
276
                 for node_uuid in inst.all_nodes)) > 1:
277
        previously_split_instances.add(inst.uuid)
278

    
279
      if len(set(changed_nodes.get(node_uuid, node_data[node_uuid].group)
280
                 for node_uuid in inst.all_nodes)) > 1:
281
        all_split_instances.add(inst.uuid)
282

    
283
    return (list(all_split_instances - previously_split_instances),
284
            list(previously_split_instances & all_split_instances))
285

    
286

    
287
class GroupQuery(QueryBase):
288
  FIELDS = query.GROUP_FIELDS
289

    
290
  def ExpandNames(self, lu):
291
    lu.needed_locks = {}
292

    
293
    self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
294
    self._cluster = lu.cfg.GetClusterInfo()
295
    name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
296

    
297
    if not self.names:
298
      self.wanted = [name_to_uuid[name]
299
                     for name in utils.NiceSort(name_to_uuid.keys())]
300
    else:
301
      # Accept names to be either names or UUIDs.
302
      missing = []
303
      self.wanted = []
304
      all_uuid = frozenset(self._all_groups.keys())
305

    
306
      for name in self.names:
307
        if name in all_uuid:
308
          self.wanted.append(name)
309
        elif name in name_to_uuid:
310
          self.wanted.append(name_to_uuid[name])
311
        else:
312
          missing.append(name)
313

    
314
      if missing:
315
        raise errors.OpPrereqError("Some groups do not exist: %s" %
316
                                   utils.CommaJoin(missing),
317
                                   errors.ECODE_NOENT)
318

    
319
  def DeclareLocks(self, lu, level):
320
    pass
321

    
322
  def _GetQueryData(self, lu):
323
    """Computes the list of node groups and their attributes.
324

325
    """
326
    do_nodes = query.GQ_NODE in self.requested_data
327
    do_instances = query.GQ_INST in self.requested_data
328

    
329
    group_to_nodes = None
330
    group_to_instances = None
331

    
332
    # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
333
    # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
334
    # latter GetAllInstancesInfo() is not enough, for we have to go through
335
    # instance->node. Hence, we will need to process nodes even if we only need
336
    # instance information.
337
    if do_nodes or do_instances:
338
      all_nodes = lu.cfg.GetAllNodesInfo()
339
      group_to_nodes = dict((uuid, []) for uuid in self.wanted)
340
      node_to_group = {}
341

    
342
      for node in all_nodes.values():
343
        if node.group in group_to_nodes:
344
          group_to_nodes[node.group].append(node.uuid)
345
          node_to_group[node.uuid] = node.group
346

    
347
      if do_instances:
348
        all_instances = lu.cfg.GetAllInstancesInfo()
349
        group_to_instances = dict((uuid, []) for uuid in self.wanted)
350

    
351
        for instance in all_instances.values():
352
          node = instance.primary_node
353
          if node in node_to_group:
354
            group_to_instances[node_to_group[node]].append(instance.uuid)
355

    
356
        if not do_nodes:
357
          # Do not pass on node information if it was not requested.
358
          group_to_nodes = None
359

    
360
    return query.GroupQueryData(self._cluster,
361
                                [self._all_groups[uuid]
362
                                 for uuid in self.wanted],
363
                                group_to_nodes, group_to_instances,
364
                                query.GQ_DISKPARAMS in self.requested_data)
365

    
366

    
367
class LUGroupQuery(NoHooksLU):
368
  """Logical unit for querying node groups.
369

370
  """
371
  REQ_BGL = False
372

    
373
  def CheckArguments(self):
374
    self.gq = GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
375
                          self.op.output_fields, False)
376

    
377
  def ExpandNames(self):
378
    self.gq.ExpandNames(self)
379

    
380
  def DeclareLocks(self, level):
381
    self.gq.DeclareLocks(self, level)
382

    
383
  def Exec(self, feedback_fn):
384
    return self.gq.OldStyleQuery(self)
385

    
386

    
387
class LUGroupSetParams(LogicalUnit):
388
  """Modifies the parameters of a node group.
389

390
  """
391
  HPATH = "group-modify"
392
  HTYPE = constants.HTYPE_GROUP
393
  REQ_BGL = False
394

    
395
  def CheckArguments(self):
396
    all_changes = [
397
      self.op.ndparams,
398
      self.op.diskparams,
399
      self.op.alloc_policy,
400
      self.op.hv_state,
401
      self.op.disk_state,
402
      self.op.ipolicy,
403
      ]
404

    
405
    if all_changes.count(None) == len(all_changes):
406
      raise errors.OpPrereqError("Please pass at least one modification",
407
                                 errors.ECODE_INVAL)
408

    
409
  def ExpandNames(self):
410
    # This raises errors.OpPrereqError on its own:
411
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
412

    
413
    self.needed_locks = {
414
      locking.LEVEL_INSTANCE: [],
415
      locking.LEVEL_NODEGROUP: [self.group_uuid],
416
      }
417

    
418
    self.share_locks[locking.LEVEL_INSTANCE] = 1
419

    
420
  def DeclareLocks(self, level):
421
    if level == locking.LEVEL_INSTANCE:
422
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
423

    
424
      # Lock instances optimistically, needs verification once group lock has
425
      # been acquired
426
      self.needed_locks[locking.LEVEL_INSTANCE] = \
427
        self.cfg.GetInstanceNames(
428
          self.cfg.GetNodeGroupInstances(self.group_uuid))
429

    
430
  @staticmethod
431
  def _UpdateAndVerifyDiskParams(old, new):
432
    """Updates and verifies disk parameters.
433

434
    """
435
    new_params = GetUpdatedParams(old, new)
436
    utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
437
    return new_params
438

    
439
  def _CheckIpolicy(self, cluster, owned_instance_names):
440
    """Sanity checks for the ipolicy.
441

442
    @type cluster: C{objects.Cluster}
443
    @param cluster: the cluster's configuration
444
    @type owned_instance_names: list of string
445
    @param owned_instance_names: list of instances
446

447
    """
448
    if self.op.ipolicy:
449
      self.new_ipolicy = GetUpdatedIPolicy(self.group.ipolicy,
450
                                           self.op.ipolicy,
451
                                           group_policy=True)
452

    
453
      new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
454
      CheckIpolicyVsDiskTemplates(new_ipolicy,
455
                                  cluster.enabled_disk_templates)
456
      instances = \
457
        dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names))
458
      gmi = ganeti.masterd.instance
459
      violations = \
460
          ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
461
                                                                 self.group),
462
                                       new_ipolicy, instances.values(),
463
                                       self.cfg)
464

    
465
      if violations:
466
        self.LogWarning("After the ipolicy change the following instances"
467
                        " violate them: %s",
468
                        utils.CommaJoin(violations))
469

    
470
  def CheckPrereq(self):
471
    """Check prerequisites.
472

473
    """
474
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
475

    
476
    # Check if locked instances are still correct
477
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
478

    
479
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
480
    cluster = self.cfg.GetClusterInfo()
481

    
482
    if self.group is None:
483
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
484
                               (self.op.group_name, self.group_uuid))
485

    
486
    if self.op.ndparams:
487
      new_ndparams = GetUpdatedParams(self.group.ndparams, self.op.ndparams)
488
      utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
489
      self.new_ndparams = new_ndparams
490

    
491
    if self.op.diskparams:
492
      diskparams = self.group.diskparams
493
      uavdp = self._UpdateAndVerifyDiskParams
494
      # For each disktemplate subdict update and verify the values
495
      new_diskparams = dict((dt,
496
                             uavdp(diskparams.get(dt, {}),
497
                                   self.op.diskparams[dt]))
498
                            for dt in constants.DISK_TEMPLATES
499
                            if dt in self.op.diskparams)
500
      # As we've all subdicts of diskparams ready, lets merge the actual
501
      # dict with all updated subdicts
502
      self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
503
      try:
504
        utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
505
      except errors.OpPrereqError, err:
506
        raise errors.OpPrereqError("While verify diskparams options: %s" % err,
507
                                   errors.ECODE_INVAL)
508

    
509
    if self.op.hv_state:
510
      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
511
                                                self.group.hv_state_static)
512

    
513
    if self.op.disk_state:
514
      self.new_disk_state = \
515
        MergeAndVerifyDiskState(self.op.disk_state,
516
                                self.group.disk_state_static)
517

    
518
    self._CheckIpolicy(cluster, owned_instance_names)
519

    
520
  def BuildHooksEnv(self):
521
    """Build hooks env.
522

523
    """
524
    return {
525
      "GROUP_NAME": self.op.group_name,
526
      "NEW_ALLOC_POLICY": self.op.alloc_policy,
527
      }
528

    
529
  def BuildHooksNodes(self):
530
    """Build hooks nodes.
531

532
    """
533
    mn = self.cfg.GetMasterNode()
534
    return ([mn], [mn])
535

    
536
  def Exec(self, feedback_fn):
537
    """Modifies the node group.
538

539
    """
540
    result = []
541

    
542
    if self.op.ndparams:
543
      self.group.ndparams = self.new_ndparams
544
      result.append(("ndparams", str(self.group.ndparams)))
545

    
546
    if self.op.diskparams:
547
      self.group.diskparams = self.new_diskparams
548
      result.append(("diskparams", str(self.group.diskparams)))
549

    
550
    if self.op.alloc_policy:
551
      self.group.alloc_policy = self.op.alloc_policy
552

    
553
    if self.op.hv_state:
554
      self.group.hv_state_static = self.new_hv_state
555

    
556
    if self.op.disk_state:
557
      self.group.disk_state_static = self.new_disk_state
558

    
559
    if self.op.ipolicy:
560
      self.group.ipolicy = self.new_ipolicy
561

    
562
    self.cfg.Update(self.group, feedback_fn)
563
    return result
564

    
565

    
566
class LUGroupRemove(LogicalUnit):
567
  HPATH = "group-remove"
568
  HTYPE = constants.HTYPE_GROUP
569
  REQ_BGL = False
570

    
571
  def ExpandNames(self):
572
    # This will raises errors.OpPrereqError on its own:
573
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
574
    self.needed_locks = {
575
      locking.LEVEL_NODEGROUP: [self.group_uuid],
576
      }
577

    
578
  def CheckPrereq(self):
579
    """Check prerequisites.
580

581
    This checks that the given group name exists as a node group, that is
582
    empty (i.e., contains no nodes), and that is not the last group of the
583
    cluster.
584

585
    """
586
    # Verify that the group is empty.
587
    group_nodes = [node.uuid
588
                   for node in self.cfg.GetAllNodesInfo().values()
589
                   if node.group == self.group_uuid]
590

    
591
    if group_nodes:
592
      raise errors.OpPrereqError("Group '%s' not empty, has the following"
593
                                 " nodes: %s" %
594
                                 (self.op.group_name,
595
                                  utils.CommaJoin(utils.NiceSort(group_nodes))),
596
                                 errors.ECODE_STATE)
597

    
598
    # Verify the cluster would not be left group-less.
599
    if len(self.cfg.GetNodeGroupList()) == 1:
600
      raise errors.OpPrereqError("Group '%s' is the only group, cannot be"
601
                                 " removed" % self.op.group_name,
602
                                 errors.ECODE_STATE)
603

    
604
  def BuildHooksEnv(self):
605
    """Build hooks env.
606

607
    """
608
    return {
609
      "GROUP_NAME": self.op.group_name,
610
      }
611

    
612
  def BuildHooksNodes(self):
613
    """Build hooks nodes.
614

615
    """
616
    mn = self.cfg.GetMasterNode()
617
    return ([mn], [mn])
618

    
619
  def Exec(self, feedback_fn):
620
    """Remove the node group.
621

622
    """
623
    try:
624
      self.cfg.RemoveNodeGroup(self.group_uuid)
625
    except errors.ConfigurationError:
626
      raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
627
                               (self.op.group_name, self.group_uuid))
628

    
629
    self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
630

    
631

    
632
class LUGroupRename(LogicalUnit):
633
  HPATH = "group-rename"
634
  HTYPE = constants.HTYPE_GROUP
635
  REQ_BGL = False
636

    
637
  def ExpandNames(self):
638
    # This raises errors.OpPrereqError on its own:
639
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
640

    
641
    self.needed_locks = {
642
      locking.LEVEL_NODEGROUP: [self.group_uuid],
643
      }
644

    
645
  def CheckPrereq(self):
646
    """Check prerequisites.
647

648
    Ensures requested new name is not yet used.
649

650
    """
651
    try:
652
      new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
653
    except errors.OpPrereqError:
654
      pass
655
    else:
656
      raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
657
                                 " node group (UUID: %s)" %
658
                                 (self.op.new_name, new_name_uuid),
659
                                 errors.ECODE_EXISTS)
660

    
661
  def BuildHooksEnv(self):
662
    """Build hooks env.
663

664
    """
665
    return {
666
      "OLD_NAME": self.op.group_name,
667
      "NEW_NAME": self.op.new_name,
668
      }
669

    
670
  def BuildHooksNodes(self):
671
    """Build hooks nodes.
672

673
    """
674
    mn = self.cfg.GetMasterNode()
675

    
676
    all_nodes = self.cfg.GetAllNodesInfo()
677
    all_nodes.pop(mn, None)
678

    
679
    run_nodes = [mn]
680
    run_nodes.extend(node.uuid for node in all_nodes.values()
681
                     if node.group == self.group_uuid)
682

    
683
    return (run_nodes, run_nodes)
684

    
685
  def Exec(self, feedback_fn):
686
    """Rename the node group.
687

688
    """
689
    group = self.cfg.GetNodeGroup(self.group_uuid)
690

    
691
    if group is None:
692
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
693
                               (self.op.group_name, self.group_uuid))
694

    
695
    group.name = self.op.new_name
696
    self.cfg.Update(group, feedback_fn)
697

    
698
    return self.op.new_name
699

    
700

    
701
class LUGroupEvacuate(LogicalUnit):
702
  HPATH = "group-evacuate"
703
  HTYPE = constants.HTYPE_GROUP
704
  REQ_BGL = False
705

    
706
  def ExpandNames(self):
707
    # This raises errors.OpPrereqError on its own:
708
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
709

    
710
    if self.op.target_groups:
711
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
712
                                  self.op.target_groups)
713
    else:
714
      self.req_target_uuids = []
715

    
716
    if self.group_uuid in self.req_target_uuids:
717
      raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
718
                                 " as a target group (targets are %s)" %
719
                                 (self.group_uuid,
720
                                  utils.CommaJoin(self.req_target_uuids)),
721
                                 errors.ECODE_INVAL)
722

    
723
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
724

    
725
    self.share_locks = ShareAll()
726
    self.needed_locks = {
727
      locking.LEVEL_INSTANCE: [],
728
      locking.LEVEL_NODEGROUP: [],
729
      locking.LEVEL_NODE: [],
730
      }
731

    
732
  def DeclareLocks(self, level):
733
    if level == locking.LEVEL_INSTANCE:
734
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
735

    
736
      # Lock instances optimistically, needs verification once node and group
737
      # locks have been acquired
738
      self.needed_locks[locking.LEVEL_INSTANCE] = \
739
        self.cfg.GetInstanceNames(
740
          self.cfg.GetNodeGroupInstances(self.group_uuid))
741

    
742
    elif level == locking.LEVEL_NODEGROUP:
743
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
744

    
745
      if self.req_target_uuids:
746
        lock_groups = set([self.group_uuid] + self.req_target_uuids)
747

    
748
        # Lock all groups used by instances optimistically; this requires going
749
        # via the node before it's locked, requiring verification later on
750
        lock_groups.update(group_uuid
751
                           for instance_name in
752
                             self.owned_locks(locking.LEVEL_INSTANCE)
753
                           for group_uuid in
754
                             self.cfg.GetInstanceNodeGroups(
755
                               self.cfg.GetInstanceInfoByName(instance_name)
756
                                 .uuid))
757
      else:
758
        # No target groups, need to lock all of them
759
        lock_groups = locking.ALL_SET
760

    
761
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
762

    
763
    elif level == locking.LEVEL_NODE:
764
      # This will only lock the nodes in the group to be evacuated which
765
      # contain actual instances
766
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
767
      self._LockInstancesNodes()
768

    
769
      # Lock all nodes in group to be evacuated and target groups
770
      owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
771
      assert self.group_uuid in owned_groups
772
      member_node_uuids = [node_uuid
773
                           for group in owned_groups
774
                           for node_uuid in
775
                             self.cfg.GetNodeGroup(group).members]
776
      self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
777

    
778
  def CheckPrereq(self):
779
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
780
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
781
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
782

    
783
    assert owned_groups.issuperset(self.req_target_uuids)
784
    assert self.group_uuid in owned_groups
785

    
786
    # Check if locked instances are still correct
787
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
788

    
789
    # Get instance information
790
    self.instances = \
791
      dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names))
792

    
793
    # Check if node groups for locked instances are still correct
794
    CheckInstancesNodeGroups(self.cfg, self.instances,
795
                             owned_groups, owned_node_uuids, self.group_uuid)
796

    
797
    if self.req_target_uuids:
798
      # User requested specific target groups
799
      self.target_uuids = self.req_target_uuids
800
    else:
801
      # All groups except the one to be evacuated are potential targets
802
      self.target_uuids = [group_uuid for group_uuid in owned_groups
803
                           if group_uuid != self.group_uuid]
804

    
805
      if not self.target_uuids:
806
        raise errors.OpPrereqError("There are no possible target groups",
807
                                   errors.ECODE_INVAL)
808

    
809
  def BuildHooksEnv(self):
810
    """Build hooks env.
811

812
    """
813
    return {
814
      "GROUP_NAME": self.op.group_name,
815
      "TARGET_GROUPS": " ".join(self.target_uuids),
816
      }
817

    
818
  def BuildHooksNodes(self):
819
    """Build hooks nodes.
820

821
    """
822
    mn = self.cfg.GetMasterNode()
823

    
824
    assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
825

    
826
    run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
827

    
828
    return (run_nodes, run_nodes)
829

    
830
  def Exec(self, feedback_fn):
831
    inst_names = list(self.owned_locks(locking.LEVEL_INSTANCE))
832

    
833
    assert self.group_uuid not in self.target_uuids
834

    
835
    req = iallocator.IAReqGroupChange(instances=inst_names,
836
                                      target_groups=self.target_uuids)
837
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
838

    
839
    ial.Run(self.op.iallocator)
840

    
841
    if not ial.success:
842
      raise errors.OpPrereqError("Can't compute group evacuation using"
843
                                 " iallocator '%s': %s" %
844
                                 (self.op.iallocator, ial.info),
845
                                 errors.ECODE_NORES)
846

    
847
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
848

    
849
    self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
850
                 len(jobs), self.op.group_name)
851

    
852
    return ResultWithJobs(jobs)
853

    
854

    
855
class LUGroupVerifyDisks(NoHooksLU):
856
  """Verifies the status of all disks in a node group.
857

858
  """
859
  REQ_BGL = False
860

    
861
  def ExpandNames(self):
862
    # Raises errors.OpPrereqError on its own if group can't be found
863
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
864

    
865
    self.share_locks = ShareAll()
866
    self.needed_locks = {
867
      locking.LEVEL_INSTANCE: [],
868
      locking.LEVEL_NODEGROUP: [],
869
      locking.LEVEL_NODE: [],
870

    
871
      # This opcode is acquires all node locks in a group. LUClusterVerifyDisks
872
      # starts one instance of this opcode for every group, which means all
873
      # nodes will be locked for a short amount of time, so it's better to
874
      # acquire the node allocation lock as well.
875
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
876
      }
877

    
878
  def DeclareLocks(self, level):
879
    if level == locking.LEVEL_INSTANCE:
880
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
881

    
882
      # Lock instances optimistically, needs verification once node and group
883
      # locks have been acquired
884
      self.needed_locks[locking.LEVEL_INSTANCE] = \
885
        self.cfg.GetInstanceNames(
886
          self.cfg.GetNodeGroupInstances(self.group_uuid))
887

    
888
    elif level == locking.LEVEL_NODEGROUP:
889
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
890

    
891
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
892
        set([self.group_uuid] +
893
            # Lock all groups used by instances optimistically; this requires
894
            # going via the node before it's locked, requiring verification
895
            # later on
896
            [group_uuid
897
             for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
898
             for group_uuid in
899
               self.cfg.GetInstanceNodeGroups(
900
                 self.cfg.GetInstanceInfoByName(instance_name).uuid)])
901

    
902
    elif level == locking.LEVEL_NODE:
903
      # This will only lock the nodes in the group to be verified which contain
904
      # actual instances
905
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
906
      self._LockInstancesNodes()
907

    
908
      # Lock all nodes in group to be verified
909
      assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
910
      member_node_uuids = self.cfg.GetNodeGroup(self.group_uuid).members
911
      self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
912

    
913
  def CheckPrereq(self):
914
    owned_inst_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
915
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
916
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
917

    
918
    assert self.group_uuid in owned_groups
919

    
920
    # Check if locked instances are still correct
921
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_inst_names)
922

    
923
    # Get instance information
924
    self.instances = dict(self.cfg.GetMultiInstanceInfoByName(owned_inst_names))
925

    
926
    # Check if node groups for locked instances are still correct
927
    CheckInstancesNodeGroups(self.cfg, self.instances,
928
                             owned_groups, owned_node_uuids, self.group_uuid)
929

    
930
  def _VerifyInstanceLvs(self, node_errors, offline_disk_instance_names,
931
                         missing_disks):
932
    node_lv_to_inst = MapInstanceLvsToNodes(
933
      [inst for inst in self.instances.values() if inst.disks_active])
934
    if node_lv_to_inst:
935
      node_uuids = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
936
                                  set(self.cfg.GetVmCapableNodeList()))
937

    
938
      node_lvs = self.rpc.call_lv_list(node_uuids, [])
939

    
940
      for (node_uuid, node_res) in node_lvs.items():
941
        if node_res.offline:
942
          continue
943

    
944
        msg = node_res.fail_msg
945
        if msg:
946
          logging.warning("Error enumerating LVs on node %s: %s",
947
                          self.cfg.GetNodeName(node_uuid), msg)
948
          node_errors[node_uuid] = msg
949
          continue
950

    
951
        for lv_name, (_, _, lv_online) in node_res.payload.items():
952
          inst = node_lv_to_inst.pop((node_uuid, lv_name), None)
953
          if not lv_online and inst is not None:
954
            offline_disk_instance_names.add(inst.name)
955

    
956
      # any leftover items in nv_dict are missing LVs, let's arrange the data
957
      # better
958
      for key, inst in node_lv_to_inst.iteritems():
959
        missing_disks.setdefault(inst.name, []).append(list(key))
960

    
961
  def _VerifyDrbdStates(self, node_errors, offline_disk_instance_names):
962
    node_to_inst = {}
963
    for inst in self.instances.values():
964
      if not inst.disks_active or inst.disk_template != constants.DT_DRBD8:
965
        continue
966

    
967
      for node_uuid in itertools.chain([inst.primary_node],
968
                                       inst.secondary_nodes):
969
        node_to_inst.setdefault(node_uuid, []).append(inst)
970

    
971
    for (node_uuid, insts) in node_to_inst.items():
972
      node_disks = [(inst.disks, inst) for inst in insts]
973
      node_res = self.rpc.call_drbd_needs_activation(node_uuid, node_disks)
974
      msg = node_res.fail_msg
975
      if msg:
976
        logging.warning("Error getting DRBD status on node %s: %s",
977
                        self.cfg.GetNodeName(node_uuid), msg)
978
        node_errors[node_uuid] = msg
979
        continue
980

    
981
      faulty_disk_uuids = set(node_res.payload)
982
      for inst in self.instances.values():
983
        inst_disk_uuids = set([disk.uuid for disk in inst.disks])
984
        if inst_disk_uuids.intersection(faulty_disk_uuids):
985
          offline_disk_instance_names.add(inst.name)
986

    
987
  def Exec(self, feedback_fn):
988
    """Verify integrity of cluster disks.
989

990
    @rtype: tuple of three items
991
    @return: a tuple of (dict of node-to-node_error, list of instances
992
        which need activate-disks, dict of instance: (node, volume) for
993
        missing volumes
994

995
    """
996
    node_errors = {}
997
    offline_disk_instance_names = set()
998
    missing_disks = {}
999

    
1000
    self._VerifyInstanceLvs(node_errors, offline_disk_instance_names,
1001
                            missing_disks)
1002
    self._VerifyDrbdStates(node_errors, offline_disk_instance_names)
1003

    
1004
    return (node_errors, list(offline_disk_instance_names), missing_disks)