Revision 5eacbcae lib/cmdlib/group.py

b/lib/cmdlib/group.py
31 31
from ganeti import query
32 32
from ganeti import utils
33 33
from ganeti.masterd import iallocator
34
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, _QueryBase, \
34
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase, \
35 35
  ResultWithJobs
36
from ganeti.cmdlib.common import _MergeAndVerifyHvState, \
37
  _MergeAndVerifyDiskState, _GetWantedNodes, _GetUpdatedParams, \
38
  _CheckNodeGroupInstances, _GetUpdatedIPolicy, \
39
  _ComputeNewInstanceViolations, _GetDefaultIAllocator, _ShareAll, \
40
  _CheckInstancesNodeGroups, _LoadNodeEvacResult, _MapInstanceDisksToNodes
36
from ganeti.cmdlib.common import MergeAndVerifyHvState, \
37
  MergeAndVerifyDiskState, GetWantedNodes, GetUpdatedParams, \
38
  CheckNodeGroupInstances, GetUpdatedIPolicy, \
39
  ComputeNewInstanceViolations, GetDefaultIAllocator, ShareAll, \
40
  CheckInstancesNodeGroups, LoadNodeEvacResult, MapInstanceDisksToNodes
41 41

  
42 42
import ganeti.masterd.instance
43 43

  
......
79 79
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
80 80

  
81 81
    if self.op.hv_state:
82
      self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
82
      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, None)
83 83
    else:
84 84
      self.new_hv_state = None
85 85

  
86 86
    if self.op.disk_state:
87
      self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
87
      self.new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, None)
88 88
    else:
89 89
      self.new_disk_state = None
90 90

  
......
152 152
  def ExpandNames(self):
153 153
    # These raise errors.OpPrereqError on their own:
154 154
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
155
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
155
    self.op.nodes = GetWantedNodes(self, self.op.nodes)
156 156

  
157 157
    # We want to lock all the affected nodes and groups. We have readily
158 158
    # available the list of nodes, and the *destination* group. To gather the
......
276 276
            list(previously_split_instances & all_split_instances))
277 277

  
278 278

  
279
class _GroupQuery(_QueryBase):
279
class GroupQuery(QueryBase):
280 280
  FIELDS = query.GROUP_FIELDS
281 281

  
282 282
  def ExpandNames(self, lu):
......
363 363
  REQ_BGL = False
364 364

  
365 365
  def CheckArguments(self):
366
    self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
366
    self.gq = GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
367 367
                          self.op.output_fields, False)
368 368

  
369 369
  def ExpandNames(self):
......
423 423
    """Updates and verifies disk parameters.
424 424

  
425 425
    """
426
    new_params = _GetUpdatedParams(old, new)
426
    new_params = GetUpdatedParams(old, new)
427 427
    utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
428 428
    return new_params
429 429

  
......
434 434
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
435 435

  
436 436
    # Check if locked instances are still correct
437
    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
437
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
438 438

  
439 439
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
440 440
    cluster = self.cfg.GetClusterInfo()
......
444 444
                               (self.op.group_name, self.group_uuid))
445 445

  
446 446
    if self.op.ndparams:
447
      new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
447
      new_ndparams = GetUpdatedParams(self.group.ndparams, self.op.ndparams)
448 448
      utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
449 449
      self.new_ndparams = new_ndparams
450 450

  
......
467 467
                                   errors.ECODE_INVAL)
468 468

  
469 469
    if self.op.hv_state:
470
      self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
471
                                                 self.group.hv_state_static)
470
      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
471
                                                self.group.hv_state_static)
472 472

  
473 473
    if self.op.disk_state:
474 474
      self.new_disk_state = \
475
        _MergeAndVerifyDiskState(self.op.disk_state,
476
                                 self.group.disk_state_static)
475
        MergeAndVerifyDiskState(self.op.disk_state,
476
                                self.group.disk_state_static)
477 477

  
478 478
    if self.op.ipolicy:
479
      self.new_ipolicy = _GetUpdatedIPolicy(self.group.ipolicy,
480
                                            self.op.ipolicy,
481
                                            group_policy=True)
479
      self.new_ipolicy = GetUpdatedIPolicy(self.group.ipolicy,
480
                                           self.op.ipolicy,
481
                                           group_policy=True)
482 482

  
483 483
      new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
484 484
      inst_filter = lambda inst: inst.name in owned_instances
485 485
      instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
486 486
      gmi = ganeti.masterd.instance
487 487
      violations = \
488
          _ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
489
                                                                  self.group),
490
                                        new_ipolicy, instances, self.cfg)
488
          ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
489
                                                                 self.group),
490
                                       new_ipolicy, instances, self.cfg)
491 491

  
492 492
      if violations:
493 493
        self.LogWarning("After the ipolicy change the following instances"
......
697 697
                                  utils.CommaJoin(self.req_target_uuids)),
698 698
                                 errors.ECODE_INVAL)
699 699

  
700
    self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
700
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
701 701

  
702
    self.share_locks = _ShareAll()
702
    self.share_locks = ShareAll()
703 703
    self.needed_locks = {
704 704
      locking.LEVEL_INSTANCE: [],
705 705
      locking.LEVEL_NODEGROUP: [],
......
757 757
    assert self.group_uuid in owned_groups
758 758

  
759 759
    # Check if locked instances are still correct
760
    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
760
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
761 761

  
762 762
    # Get instance information
763 763
    self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
764 764

  
765 765
    # Check if node groups for locked instances are still correct
766
    _CheckInstancesNodeGroups(self.cfg, self.instances,
767
                              owned_groups, owned_nodes, self.group_uuid)
766
    CheckInstancesNodeGroups(self.cfg, self.instances,
767
                             owned_groups, owned_nodes, self.group_uuid)
768 768

  
769 769
    if self.req_target_uuids:
770 770
      # User requested specific target groups
......
816 816
                                 (self.op.iallocator, ial.info),
817 817
                                 errors.ECODE_NORES)
818 818

  
819
    jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
819
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
820 820

  
821 821
    self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
822 822
                 len(jobs), self.op.group_name)
......
834 834
    # Raises errors.OpPrereqError on its own if group can't be found
835 835
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
836 836

  
837
    self.share_locks = _ShareAll()
837
    self.share_locks = ShareAll()
838 838
    self.needed_locks = {
839 839
      locking.LEVEL_INSTANCE: [],
840 840
      locking.LEVEL_NODEGROUP: [],
......
887 887
    assert self.group_uuid in owned_groups
888 888

  
889 889
    # Check if locked instances are still correct
890
    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
890
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
891 891

  
892 892
    # Get instance information
893 893
    self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
894 894

  
895 895
    # Check if node groups for locked instances are still correct
896
    _CheckInstancesNodeGroups(self.cfg, self.instances,
897
                              owned_groups, owned_nodes, self.group_uuid)
896
    CheckInstancesNodeGroups(self.cfg, self.instances,
897
                             owned_groups, owned_nodes, self.group_uuid)
898 898

  
899 899
  def Exec(self, feedback_fn):
900 900
    """Verify integrity of cluster disks.
......
909 909
    res_instances = set()
910 910
    res_missing = {}
911 911

  
912
    nv_dict = _MapInstanceDisksToNodes(
912
    nv_dict = MapInstanceDisksToNodes(
913 913
      [inst for inst in self.instances.values()
914 914
       if inst.admin_state == constants.ADMINST_UP])
915 915

  

Also available in: Unified diff