Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / group.py @ 2ff6426b

History | View | Annotate | Download (32.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with node groups."""
23

    
24
import itertools
25
import logging
26

    
27
from ganeti import constants
28
from ganeti import errors
29
from ganeti import locking
30
from ganeti import objects
31
from ganeti import utils
32
from ganeti.masterd import iallocator
33
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, ResultWithJobs
34
from ganeti.cmdlib.common import MergeAndVerifyHvState, \
35
  MergeAndVerifyDiskState, GetWantedNodes, GetUpdatedParams, \
36
  CheckNodeGroupInstances, GetUpdatedIPolicy, \
37
  ComputeNewInstanceViolations, GetDefaultIAllocator, ShareAll, \
38
  CheckInstancesNodeGroups, LoadNodeEvacResult, MapInstanceLvsToNodes, \
39
  CheckIpolicyVsDiskTemplates, CheckDiskAccessModeValidity, \
40
  CheckDiskAccessModeConsistency, OpConnectInstanceCommunicationNetwork
41

    
42
import ganeti.masterd.instance
43

    
44

    
45
class LUGroupAdd(LogicalUnit):
46
  """Logical unit for creating node groups.
47

48
  """
49
  HPATH = "group-add"
50
  HTYPE = constants.HTYPE_GROUP
51
  REQ_BGL = False
52

    
53
  def ExpandNames(self):
54
    # We need the new group's UUID here so that we can create and acquire the
55
    # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
56
    # that it should not check whether the UUID exists in the configuration.
57
    self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
58
    self.needed_locks = {}
59
    self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
60

    
61
  def _CheckIpolicy(self):
62
    """Checks the group's ipolicy for consistency and validity.
63

64
    """
65
    if self.op.ipolicy:
66
      cluster = self.cfg.GetClusterInfo()
67
      full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
68
      try:
69
        objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
70
      except errors.ConfigurationError, err:
71
        raise errors.OpPrereqError("Invalid instance policy: %s" % err,
72
                                   errors.ECODE_INVAL)
73
      CheckIpolicyVsDiskTemplates(full_ipolicy,
74
                                  cluster.enabled_disk_templates)
75

    
76
  def CheckPrereq(self):
77
    """Check prerequisites.
78

79
    This checks that the given group name is not an existing node group
80
    already.
81

82
    """
83
    try:
84
      existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
85
    except errors.OpPrereqError:
86
      pass
87
    else:
88
      raise errors.OpPrereqError("Desired group name '%s' already exists as a"
89
                                 " node group (UUID: %s)" %
90
                                 (self.op.group_name, existing_uuid),
91
                                 errors.ECODE_EXISTS)
92

    
93
    if self.op.ndparams:
94
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
95

    
96
    if self.op.hv_state:
97
      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, None)
98
    else:
99
      self.new_hv_state = None
100

    
101
    if self.op.disk_state:
102
      self.new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, None)
103
    else:
104
      self.new_disk_state = None
105

    
106
    if self.op.diskparams:
107
      for templ in constants.DISK_TEMPLATES:
108
        if templ in self.op.diskparams:
109
          utils.ForceDictType(self.op.diskparams[templ],
110
                              constants.DISK_DT_TYPES)
111
      self.new_diskparams = self.op.diskparams
112
      try:
113
        utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
114
      except errors.OpPrereqError, err:
115
        raise errors.OpPrereqError("While verify diskparams options: %s" % err,
116
                                   errors.ECODE_INVAL)
117
    else:
118
      self.new_diskparams = {}
119

    
120
    self._CheckIpolicy()
121

    
122
  def BuildHooksEnv(self):
123
    """Build hooks env.
124

125
    """
126
    return {
127
      "GROUP_NAME": self.op.group_name,
128
      }
129

    
130
  def BuildHooksNodes(self):
131
    """Build hooks nodes.
132

133
    """
134
    mn = self.cfg.GetMasterNode()
135
    return ([mn], [mn])
136

    
137
  @staticmethod
138
  def _ConnectInstanceCommunicationNetwork(cfg, group_uuid, network_name):
139
    """Connect a node group to the instance communication network.
140

141
    The group is connected to the instance communication network via
142
    the Opcode 'OpNetworkConnect'.
143

144
    @type cfg: L{ganeti.config.ConfigWriter}
145
    @param cfg: Ganeti configuration
146

147
    @type group_uuid: string
148
    @param group_uuid: UUID of the group to connect
149

150
    @type network_name: string
151
    @param network_name: name of the network to connect to
152

153
    @rtype: L{ganeti.cmdlib.ResultWithJobs} or L{None}
154
    @return: L{ganeti.cmdlib.ResultWithJobs} if the group needs to be
155
             connected, otherwise (the group is already connected)
156
             L{None}
157

158
    """
159
    try:
160
      cfg.LookupNetwork(network_name)
161
      network_exists = True
162
    except errors.OpPrereqError:
163
      network_exists = False
164

    
165
    if network_exists:
166
      op = OpConnectInstanceCommunicationNetwork(group_uuid, network_name)
167
      return ResultWithJobs([[op]])
168
    else:
169
      return None
170

    
171
  def Exec(self, feedback_fn):
172
    """Add the node group to the cluster.
173

174
    """
175
    group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
176
                                  uuid=self.group_uuid,
177
                                  alloc_policy=self.op.alloc_policy,
178
                                  ndparams=self.op.ndparams,
179
                                  diskparams=self.new_diskparams,
180
                                  ipolicy=self.op.ipolicy,
181
                                  hv_state_static=self.new_hv_state,
182
                                  disk_state_static=self.new_disk_state)
183

    
184
    self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
185
    del self.remove_locks[locking.LEVEL_NODEGROUP]
186

    
187
    network_name = self.cfg.GetClusterInfo().instance_communication_network
188
    if network_name:
189
      return self._ConnectInstanceCommunicationNetwork(self.cfg,
190
                                                       self.group_uuid,
191
                                                       network_name)
192

    
193

    
194
class LUGroupAssignNodes(NoHooksLU):
195
  """Logical unit for assigning nodes to groups.
196

197
  """
198
  REQ_BGL = False
199

    
200
  def ExpandNames(self):
201
    # These raise errors.OpPrereqError on their own:
202
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
203
    (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
204

    
205
    # We want to lock all the affected nodes and groups. We have readily
206
    # available the list of nodes, and the *destination* group. To gather the
207
    # list of "source" groups, we need to fetch node information later on.
208
    self.needed_locks = {
209
      locking.LEVEL_NODEGROUP: set([self.group_uuid]),
210
      locking.LEVEL_NODE: self.op.node_uuids,
211
      }
212

    
213
  def DeclareLocks(self, level):
214
    if level == locking.LEVEL_NODEGROUP:
215
      assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
216

    
217
      # Try to get all affected nodes' groups without having the group or node
218
      # lock yet. Needs verification later in the code flow.
219
      groups = self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids)
220

    
221
      self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
222

    
223
  def CheckPrereq(self):
224
    """Check prerequisites.
225

226
    """
227
    assert self.needed_locks[locking.LEVEL_NODEGROUP]
228
    assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
229
            frozenset(self.op.node_uuids))
230

    
231
    expected_locks = (set([self.group_uuid]) |
232
                      self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids))
233
    actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
234
    if actual_locks != expected_locks:
235
      raise errors.OpExecError("Nodes changed groups since locks were acquired,"
236
                               " current groups are '%s', used to be '%s'" %
237
                               (utils.CommaJoin(expected_locks),
238
                                utils.CommaJoin(actual_locks)))
239

    
240
    self.node_data = self.cfg.GetAllNodesInfo()
241
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
242
    instance_data = self.cfg.GetAllInstancesInfo()
243

    
244
    if self.group is None:
245
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
246
                               (self.op.group_name, self.group_uuid))
247

    
248
    (new_splits, previous_splits) = \
249
      self.CheckAssignmentForSplitInstances([(uuid, self.group_uuid)
250
                                             for uuid in self.op.node_uuids],
251
                                            self.node_data, instance_data)
252

    
253
    if new_splits:
254
      fmt_new_splits = utils.CommaJoin(utils.NiceSort(
255
                         self.cfg.GetInstanceNames(new_splits)))
256

    
257
      if not self.op.force:
258
        raise errors.OpExecError("The following instances get split by this"
259
                                 " change and --force was not given: %s" %
260
                                 fmt_new_splits)
261
      else:
262
        self.LogWarning("This operation will split the following instances: %s",
263
                        fmt_new_splits)
264

    
265
        if previous_splits:
266
          self.LogWarning("In addition, these already-split instances continue"
267
                          " to be split across groups: %s",
268
                          utils.CommaJoin(utils.NiceSort(
269
                            self.cfg.GetInstanceNames(previous_splits))))
270

    
271
  def Exec(self, feedback_fn):
272
    """Assign nodes to a new group.
273

274
    """
275
    mods = [(node_uuid, self.group_uuid) for node_uuid in self.op.node_uuids]
276

    
277
    self.cfg.AssignGroupNodes(mods)
278

    
279
  @staticmethod
280
  def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
281
    """Check for split instances after a node assignment.
282

283
    This method considers a series of node assignments as an atomic operation,
284
    and returns information about split instances after applying the set of
285
    changes.
286

287
    In particular, it returns information about newly split instances, and
288
    instances that were already split, and remain so after the change.
289

290
    Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
291
    considered.
292

293
    @type changes: list of (node_uuid, new_group_uuid) pairs.
294
    @param changes: list of node assignments to consider.
295
    @param node_data: a dict with data for all nodes
296
    @param instance_data: a dict with all instances to consider
297
    @rtype: a two-tuple
298
    @return: a list of instances that were previously okay and result split as a
299
      consequence of this change, and a list of instances that were previously
300
      split and this change does not fix.
301

302
    """
303
    changed_nodes = dict((uuid, group) for uuid, group in changes
304
                         if node_data[uuid].group != group)
305

    
306
    all_split_instances = set()
307
    previously_split_instances = set()
308

    
309
    for inst in instance_data.values():
310
      if inst.disk_template not in constants.DTS_INT_MIRROR:
311
        continue
312

    
313
      if len(set(node_data[node_uuid].group
314
                 for node_uuid in inst.all_nodes)) > 1:
315
        previously_split_instances.add(inst.uuid)
316

    
317
      if len(set(changed_nodes.get(node_uuid, node_data[node_uuid].group)
318
                 for node_uuid in inst.all_nodes)) > 1:
319
        all_split_instances.add(inst.uuid)
320

    
321
    return (list(all_split_instances - previously_split_instances),
322
            list(previously_split_instances & all_split_instances))
323

    
324

    
325
class LUGroupSetParams(LogicalUnit):
326
  """Modifies the parameters of a node group.
327

328
  """
329
  HPATH = "group-modify"
330
  HTYPE = constants.HTYPE_GROUP
331
  REQ_BGL = False
332

    
333
  def CheckArguments(self):
334
    all_changes = [
335
      self.op.ndparams,
336
      self.op.diskparams,
337
      self.op.alloc_policy,
338
      self.op.hv_state,
339
      self.op.disk_state,
340
      self.op.ipolicy,
341
      ]
342

    
343
    if all_changes.count(None) == len(all_changes):
344
      raise errors.OpPrereqError("Please pass at least one modification",
345
                                 errors.ECODE_INVAL)
346

    
347
    if self.op.diskparams:
348
      CheckDiskAccessModeValidity(self.op.diskparams)
349

    
350
  def ExpandNames(self):
351
    # This raises errors.OpPrereqError on its own:
352
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
353

    
354
    self.needed_locks = {
355
      locking.LEVEL_INSTANCE: [],
356
      locking.LEVEL_NODEGROUP: [self.group_uuid],
357
      }
358

    
359
    self.share_locks[locking.LEVEL_INSTANCE] = 1
360

    
361
  def DeclareLocks(self, level):
362
    if level == locking.LEVEL_INSTANCE:
363
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
364

    
365
      # Lock instances optimistically, needs verification once group lock has
366
      # been acquired
367
      self.needed_locks[locking.LEVEL_INSTANCE] = \
368
        self.cfg.GetInstanceNames(
369
          self.cfg.GetNodeGroupInstances(self.group_uuid))
370

    
371
  @staticmethod
372
  def _UpdateAndVerifyDiskParams(old, new):
373
    """Updates and verifies disk parameters.
374

375
    """
376
    new_params = GetUpdatedParams(old, new)
377
    utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
378
    return new_params
379

    
380
  def _CheckIpolicy(self, cluster, owned_instance_names):
381
    """Sanity checks for the ipolicy.
382

383
    @type cluster: C{objects.Cluster}
384
    @param cluster: the cluster's configuration
385
    @type owned_instance_names: list of string
386
    @param owned_instance_names: list of instances
387

388
    """
389
    if self.op.ipolicy:
390
      self.new_ipolicy = GetUpdatedIPolicy(self.group.ipolicy,
391
                                           self.op.ipolicy,
392
                                           group_policy=True)
393

    
394
      new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
395
      CheckIpolicyVsDiskTemplates(new_ipolicy,
396
                                  cluster.enabled_disk_templates)
397
      instances = \
398
        dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names))
399
      gmi = ganeti.masterd.instance
400
      violations = \
401
          ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
402
                                                                 self.group),
403
                                       new_ipolicy, instances.values(),
404
                                       self.cfg)
405

    
406
      if violations:
407
        self.LogWarning("After the ipolicy change the following instances"
408
                        " violate them: %s",
409
                        utils.CommaJoin(violations))
410

    
411
  def CheckPrereq(self):
412
    """Check prerequisites.
413

414
    """
415
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
416

    
417
    # Check if locked instances are still correct
418
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
419

    
420
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
421
    cluster = self.cfg.GetClusterInfo()
422

    
423
    if self.group is None:
424
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
425
                               (self.op.group_name, self.group_uuid))
426

    
427
    if self.op.ndparams:
428
      new_ndparams = GetUpdatedParams(self.group.ndparams, self.op.ndparams)
429
      utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
430
      self.new_ndparams = new_ndparams
431

    
432
    if self.op.diskparams:
433
      diskparams = self.group.diskparams
434
      uavdp = self._UpdateAndVerifyDiskParams
435
      # For each disktemplate subdict update and verify the values
436
      new_diskparams = dict((dt,
437
                             uavdp(diskparams.get(dt, {}),
438
                                   self.op.diskparams[dt]))
439
                            for dt in constants.DISK_TEMPLATES
440
                            if dt in self.op.diskparams)
441
      # As we've all subdicts of diskparams ready, lets merge the actual
442
      # dict with all updated subdicts
443
      self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
444

    
445
      try:
446
        utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
447
        CheckDiskAccessModeConsistency(self.new_diskparams, self.cfg,
448
                                       group=self.group)
449
      except errors.OpPrereqError, err:
450
        raise errors.OpPrereqError("While verify diskparams options: %s" % err,
451
                                   errors.ECODE_INVAL)
452

    
453
    if self.op.hv_state:
454
      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
455
                                                self.group.hv_state_static)
456

    
457
    if self.op.disk_state:
458
      self.new_disk_state = \
459
        MergeAndVerifyDiskState(self.op.disk_state,
460
                                self.group.disk_state_static)
461

    
462
    self._CheckIpolicy(cluster, owned_instance_names)
463

    
464
  def BuildHooksEnv(self):
465
    """Build hooks env.
466

467
    """
468
    return {
469
      "GROUP_NAME": self.op.group_name,
470
      "NEW_ALLOC_POLICY": self.op.alloc_policy,
471
      }
472

    
473
  def BuildHooksNodes(self):
474
    """Build hooks nodes.
475

476
    """
477
    mn = self.cfg.GetMasterNode()
478
    return ([mn], [mn])
479

    
480
  def Exec(self, feedback_fn):
481
    """Modifies the node group.
482

483
    """
484
    result = []
485

    
486
    if self.op.ndparams:
487
      self.group.ndparams = self.new_ndparams
488
      result.append(("ndparams", str(self.group.ndparams)))
489

    
490
    if self.op.diskparams:
491
      self.group.diskparams = self.new_diskparams
492
      result.append(("diskparams", str(self.group.diskparams)))
493

    
494
    if self.op.alloc_policy:
495
      self.group.alloc_policy = self.op.alloc_policy
496

    
497
    if self.op.hv_state:
498
      self.group.hv_state_static = self.new_hv_state
499

    
500
    if self.op.disk_state:
501
      self.group.disk_state_static = self.new_disk_state
502

    
503
    if self.op.ipolicy:
504
      self.group.ipolicy = self.new_ipolicy
505

    
506
    self.cfg.Update(self.group, feedback_fn)
507
    return result
508

    
509

    
510
class LUGroupRemove(LogicalUnit):
511
  HPATH = "group-remove"
512
  HTYPE = constants.HTYPE_GROUP
513
  REQ_BGL = False
514

    
515
  def ExpandNames(self):
516
    # This will raises errors.OpPrereqError on its own:
517
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
518
    self.needed_locks = {
519
      locking.LEVEL_NODEGROUP: [self.group_uuid],
520
      }
521

    
522
  def CheckPrereq(self):
523
    """Check prerequisites.
524

525
    This checks that the given group name exists as a node group, that is
526
    empty (i.e., contains no nodes), and that is not the last group of the
527
    cluster.
528

529
    """
530
    # Verify that the group is empty.
531
    group_nodes = [node.uuid
532
                   for node in self.cfg.GetAllNodesInfo().values()
533
                   if node.group == self.group_uuid]
534

    
535
    if group_nodes:
536
      raise errors.OpPrereqError("Group '%s' not empty, has the following"
537
                                 " nodes: %s" %
538
                                 (self.op.group_name,
539
                                  utils.CommaJoin(utils.NiceSort(group_nodes))),
540
                                 errors.ECODE_STATE)
541

    
542
    # Verify the cluster would not be left group-less.
543
    if len(self.cfg.GetNodeGroupList()) == 1:
544
      raise errors.OpPrereqError("Group '%s' is the only group, cannot be"
545
                                 " removed" % self.op.group_name,
546
                                 errors.ECODE_STATE)
547

    
548
  def BuildHooksEnv(self):
549
    """Build hooks env.
550

551
    """
552
    return {
553
      "GROUP_NAME": self.op.group_name,
554
      }
555

    
556
  def BuildHooksNodes(self):
557
    """Build hooks nodes.
558

559
    """
560
    mn = self.cfg.GetMasterNode()
561
    return ([mn], [mn])
562

    
563
  def Exec(self, feedback_fn):
564
    """Remove the node group.
565

566
    """
567
    try:
568
      self.cfg.RemoveNodeGroup(self.group_uuid)
569
    except errors.ConfigurationError:
570
      raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
571
                               (self.op.group_name, self.group_uuid))
572

    
573
    self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
574

    
575

    
576
class LUGroupRename(LogicalUnit):
577
  HPATH = "group-rename"
578
  HTYPE = constants.HTYPE_GROUP
579
  REQ_BGL = False
580

    
581
  def ExpandNames(self):
582
    # This raises errors.OpPrereqError on its own:
583
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
584

    
585
    self.needed_locks = {
586
      locking.LEVEL_NODEGROUP: [self.group_uuid],
587
      }
588

    
589
  def CheckPrereq(self):
590
    """Check prerequisites.
591

592
    Ensures requested new name is not yet used.
593

594
    """
595
    try:
596
      new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
597
    except errors.OpPrereqError:
598
      pass
599
    else:
600
      raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
601
                                 " node group (UUID: %s)" %
602
                                 (self.op.new_name, new_name_uuid),
603
                                 errors.ECODE_EXISTS)
604

    
605
  def BuildHooksEnv(self):
606
    """Build hooks env.
607

608
    """
609
    return {
610
      "OLD_NAME": self.op.group_name,
611
      "NEW_NAME": self.op.new_name,
612
      }
613

    
614
  def BuildHooksNodes(self):
615
    """Build hooks nodes.
616

617
    """
618
    mn = self.cfg.GetMasterNode()
619

    
620
    all_nodes = self.cfg.GetAllNodesInfo()
621
    all_nodes.pop(mn, None)
622

    
623
    run_nodes = [mn]
624
    run_nodes.extend(node.uuid for node in all_nodes.values()
625
                     if node.group == self.group_uuid)
626

    
627
    return (run_nodes, run_nodes)
628

    
629
  def Exec(self, feedback_fn):
630
    """Rename the node group.
631

632
    """
633
    group = self.cfg.GetNodeGroup(self.group_uuid)
634

    
635
    if group is None:
636
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
637
                               (self.op.group_name, self.group_uuid))
638

    
639
    group.name = self.op.new_name
640
    self.cfg.Update(group, feedback_fn)
641

    
642
    return self.op.new_name
643

    
644

    
645
class LUGroupEvacuate(LogicalUnit):
646
  HPATH = "group-evacuate"
647
  HTYPE = constants.HTYPE_GROUP
648
  REQ_BGL = False
649

    
650
  def ExpandNames(self):
651
    # This raises errors.OpPrereqError on its own:
652
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
653

    
654
    if self.op.target_groups:
655
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
656
                                  self.op.target_groups)
657
    else:
658
      self.req_target_uuids = []
659

    
660
    if self.group_uuid in self.req_target_uuids:
661
      raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
662
                                 " as a target group (targets are %s)" %
663
                                 (self.group_uuid,
664
                                  utils.CommaJoin(self.req_target_uuids)),
665
                                 errors.ECODE_INVAL)
666

    
667
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
668

    
669
    self.share_locks = ShareAll()
670
    self.needed_locks = {
671
      locking.LEVEL_INSTANCE: [],
672
      locking.LEVEL_NODEGROUP: [],
673
      locking.LEVEL_NODE: [],
674
      }
675

    
676
  def DeclareLocks(self, level):
677
    if level == locking.LEVEL_INSTANCE:
678
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
679

    
680
      # Lock instances optimistically, needs verification once node and group
681
      # locks have been acquired
682
      self.needed_locks[locking.LEVEL_INSTANCE] = \
683
        self.cfg.GetInstanceNames(
684
          self.cfg.GetNodeGroupInstances(self.group_uuid))
685

    
686
    elif level == locking.LEVEL_NODEGROUP:
687
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
688

    
689
      if self.req_target_uuids:
690
        lock_groups = set([self.group_uuid] + self.req_target_uuids)
691

    
692
        # Lock all groups used by instances optimistically; this requires going
693
        # via the node before it's locked, requiring verification later on
694
        lock_groups.update(group_uuid
695
                           for instance_name in
696
                             self.owned_locks(locking.LEVEL_INSTANCE)
697
                           for group_uuid in
698
                             self.cfg.GetInstanceNodeGroups(
699
                               self.cfg.GetInstanceInfoByName(instance_name)
700
                                 .uuid))
701
      else:
702
        # No target groups, need to lock all of them
703
        lock_groups = locking.ALL_SET
704

    
705
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
706

    
707
    elif level == locking.LEVEL_NODE:
708
      # This will only lock the nodes in the group to be evacuated which
709
      # contain actual instances
710
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
711
      self._LockInstancesNodes()
712

    
713
      # Lock all nodes in group to be evacuated and target groups
714
      owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
715
      assert self.group_uuid in owned_groups
716
      member_node_uuids = [node_uuid
717
                           for group in owned_groups
718
                           for node_uuid in
719
                             self.cfg.GetNodeGroup(group).members]
720
      self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
721

    
722
  def CheckPrereq(self):
723
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
724
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
725
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
726

    
727
    assert owned_groups.issuperset(self.req_target_uuids)
728
    assert self.group_uuid in owned_groups
729

    
730
    # Check if locked instances are still correct
731
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
732

    
733
    # Get instance information
734
    self.instances = \
735
      dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names))
736

    
737
    # Check if node groups for locked instances are still correct
738
    CheckInstancesNodeGroups(self.cfg, self.instances,
739
                             owned_groups, owned_node_uuids, self.group_uuid)
740

    
741
    if self.req_target_uuids:
742
      # User requested specific target groups
743
      self.target_uuids = self.req_target_uuids
744
    else:
745
      # All groups except the one to be evacuated are potential targets
746
      self.target_uuids = [group_uuid for group_uuid in owned_groups
747
                           if group_uuid != self.group_uuid]
748

    
749
      if not self.target_uuids:
750
        raise errors.OpPrereqError("There are no possible target groups",
751
                                   errors.ECODE_INVAL)
752

    
753
  def BuildHooksEnv(self):
754
    """Build hooks env.
755

756
    """
757
    return {
758
      "GROUP_NAME": self.op.group_name,
759
      "TARGET_GROUPS": " ".join(self.target_uuids),
760
      }
761

    
762
  def BuildHooksNodes(self):
763
    """Build hooks nodes.
764

765
    """
766
    mn = self.cfg.GetMasterNode()
767

    
768
    assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
769

    
770
    run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
771

    
772
    return (run_nodes, run_nodes)
773

    
774
  def Exec(self, feedback_fn):
775
    inst_names = list(self.owned_locks(locking.LEVEL_INSTANCE))
776

    
777
    assert self.group_uuid not in self.target_uuids
778

    
779
    req = iallocator.IAReqGroupChange(instances=inst_names,
780
                                      target_groups=self.target_uuids)
781
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
782

    
783
    ial.Run(self.op.iallocator)
784

    
785
    if not ial.success:
786
      raise errors.OpPrereqError("Can't compute group evacuation using"
787
                                 " iallocator '%s': %s" %
788
                                 (self.op.iallocator, ial.info),
789
                                 errors.ECODE_NORES)
790

    
791
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
792

    
793
    self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
794
                 len(jobs), self.op.group_name)
795

    
796
    return ResultWithJobs(jobs)
797

    
798

    
799
class LUGroupVerifyDisks(NoHooksLU):
800
  """Verifies the status of all disks in a node group.
801

802
  """
803
  REQ_BGL = False
804

    
805
  def ExpandNames(self):
806
    # Raises errors.OpPrereqError on its own if group can't be found
807
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
808

    
809
    self.share_locks = ShareAll()
810
    self.needed_locks = {
811
      locking.LEVEL_INSTANCE: [],
812
      locking.LEVEL_NODEGROUP: [],
813
      locking.LEVEL_NODE: [],
814

    
815
      # This opcode is acquires all node locks in a group. LUClusterVerifyDisks
816
      # starts one instance of this opcode for every group, which means all
817
      # nodes will be locked for a short amount of time, so it's better to
818
      # acquire the node allocation lock as well.
819
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
820
      }
821

    
822
  def DeclareLocks(self, level):
823
    if level == locking.LEVEL_INSTANCE:
824
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
825

    
826
      # Lock instances optimistically, needs verification once node and group
827
      # locks have been acquired
828
      self.needed_locks[locking.LEVEL_INSTANCE] = \
829
        self.cfg.GetInstanceNames(
830
          self.cfg.GetNodeGroupInstances(self.group_uuid))
831

    
832
    elif level == locking.LEVEL_NODEGROUP:
833
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
834

    
835
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
836
        set([self.group_uuid] +
837
            # Lock all groups used by instances optimistically; this requires
838
            # going via the node before it's locked, requiring verification
839
            # later on
840
            [group_uuid
841
             for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
842
             for group_uuid in
843
               self.cfg.GetInstanceNodeGroups(
844
                 self.cfg.GetInstanceInfoByName(instance_name).uuid)])
845

    
846
    elif level == locking.LEVEL_NODE:
847
      # This will only lock the nodes in the group to be verified which contain
848
      # actual instances
849
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
850
      self._LockInstancesNodes()
851

    
852
      # Lock all nodes in group to be verified
853
      assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
854
      member_node_uuids = self.cfg.GetNodeGroup(self.group_uuid).members
855
      self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
856

    
857
  def CheckPrereq(self):
858
    owned_inst_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
859
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
860
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
861

    
862
    assert self.group_uuid in owned_groups
863

    
864
    # Check if locked instances are still correct
865
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_inst_names)
866

    
867
    # Get instance information
868
    self.instances = dict(self.cfg.GetMultiInstanceInfoByName(owned_inst_names))
869

    
870
    # Check if node groups for locked instances are still correct
871
    CheckInstancesNodeGroups(self.cfg, self.instances,
872
                             owned_groups, owned_node_uuids, self.group_uuid)
873

    
874
  def _VerifyInstanceLvs(self, node_errors, offline_disk_instance_names,
875
                         missing_disks):
876
    node_lv_to_inst = MapInstanceLvsToNodes(
877
      [inst for inst in self.instances.values() if inst.disks_active])
878
    if node_lv_to_inst:
879
      node_uuids = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
880
                                  set(self.cfg.GetVmCapableNodeList()))
881

    
882
      node_lvs = self.rpc.call_lv_list(node_uuids, [])
883

    
884
      for (node_uuid, node_res) in node_lvs.items():
885
        if node_res.offline:
886
          continue
887

    
888
        msg = node_res.fail_msg
889
        if msg:
890
          logging.warning("Error enumerating LVs on node %s: %s",
891
                          self.cfg.GetNodeName(node_uuid), msg)
892
          node_errors[node_uuid] = msg
893
          continue
894

    
895
        for lv_name, (_, _, lv_online) in node_res.payload.items():
896
          inst = node_lv_to_inst.pop((node_uuid, lv_name), None)
897
          if not lv_online and inst is not None:
898
            offline_disk_instance_names.add(inst.name)
899

    
900
      # any leftover items in nv_dict are missing LVs, let's arrange the data
901
      # better
902
      for key, inst in node_lv_to_inst.iteritems():
903
        missing_disks.setdefault(inst.name, []).append(list(key))
904

    
905
  def _VerifyDrbdStates(self, node_errors, offline_disk_instance_names):
906
    node_to_inst = {}
907
    for inst in self.instances.values():
908
      if not inst.disks_active or inst.disk_template != constants.DT_DRBD8:
909
        continue
910

    
911
      for node_uuid in itertools.chain([inst.primary_node],
912
                                       inst.secondary_nodes):
913
        node_to_inst.setdefault(node_uuid, []).append(inst)
914

    
915
    for (node_uuid, insts) in node_to_inst.items():
916
      node_disks = [(inst.disks, inst) for inst in insts]
917
      node_res = self.rpc.call_drbd_needs_activation(node_uuid, node_disks)
918
      msg = node_res.fail_msg
919
      if msg:
920
        logging.warning("Error getting DRBD status on node %s: %s",
921
                        self.cfg.GetNodeName(node_uuid), msg)
922
        node_errors[node_uuid] = msg
923
        continue
924

    
925
      faulty_disk_uuids = set(node_res.payload)
926
      for inst in self.instances.values():
927
        inst_disk_uuids = set([disk.uuid for disk in inst.disks])
928
        if inst_disk_uuids.intersection(faulty_disk_uuids):
929
          offline_disk_instance_names.add(inst.name)
930

    
931
  def Exec(self, feedback_fn):
932
    """Verify integrity of cluster disks.
933

934
    @rtype: tuple of three items
935
    @return: a tuple of (dict of node-to-node_error, list of instances
936
        which need activate-disks, dict of instance: (node, volume) for
937
        missing volumes
938

939
    """
940
    node_errors = {}
941
    offline_disk_instance_names = set()
942
    missing_disks = {}
943

    
944
    self._VerifyInstanceLvs(node_errors, offline_disk_instance_names,
945
                            missing_disks)
946
    self._VerifyDrbdStates(node_errors, offline_disk_instance_names)
947

    
948
    return (node_errors, list(offline_disk_instance_names), missing_disks)