Revision 4f758333

b/lib/cmdlib/cluster.py
943 943
      all_instances = self.cfg.GetAllInstancesInfo().values()
944 944
      violations = set()
945 945
      for group in self.cfg.GetAllNodeGroupsInfo().values():
946
        instances = frozenset([inst for inst in all_instances
947
                               if compat.any(nuuid in group.members
948
                                             for nuuid in inst.all_nodes)])
946
        instances = frozenset(
947
          [inst for inst in all_instances
948
           if compat.any(nuuid in group.members
949
             for nuuid in self.cfg.GetInstanceNodes(inst))])
949 950
        new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
950 951
        ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
951 952
        new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
......
2011 2012

  
2012 2013
    for inst in self.my_inst_info.values():
2013 2014
      if inst.disk_template in constants.DTS_INT_MIRROR:
2014
        for nuuid in inst.all_nodes:
2015
        inst_nodes = self.cfg.GetInstanceNodes(inst)
2016
        for nuuid in inst_nodes:
2015 2017
          if self.all_node_info[nuuid].group != self.group_uuid:
2016 2018
            extra_lv_nodes.add(nuuid)
2017 2019

  
......
2365 2367
                  utils.CommaJoin(secondary_nodes),
2366 2368
                  code=self.ETYPE_WARNING)
2367 2369

  
2368
    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, instance.all_nodes)
2370
    inst_nodes = self.cfg.GetInstanceNodes(instance)
2371
    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes)
2369 2372
    if any(es_flags.values()):
2370 2373
      if instance.disk_template not in constants.DTS_EXCL_STORAGE:
2371 2374
        # Disk template not compatible with exclusive_storage: no instance
......
2386 2389
                      " gnt-cluster repair-disk-sizes", idx)
2387 2390

  
2388 2391
    if instance.disk_template in constants.DTS_INT_MIRROR:
2389
      instance_nodes = utils.NiceSort(instance.all_nodes)
2392
      instance_nodes = utils.NiceSort(inst_nodes)
2390 2393
      instance_groups = {}
2391 2394

  
2392 2395
      for node_uuid in instance_nodes:
......
2423 2426
                  instance.name, "instance has offline secondary node(s) %s",
2424 2427
                  utils.CommaJoin(self.cfg.GetNodeNames(inst_nodes_offline)))
2425 2428
    # ... or ghost/non-vm_capable nodes
2426
    for node_uuid in instance.all_nodes:
2429
    for node_uuid in inst_nodes:
2427 2430
      self._ErrorIf(node_image[node_uuid].ghost, constants.CV_EINSTANCEBADNODE,
2428 2431
                    instance.name, "instance lives on ghost node %s",
2429 2432
                    self.cfg.GetNodeName(node_uuid))
......
3123 3126
      instdisk[inst_uuid] = {}
3124 3127

  
3125 3128
    assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
3126
                      len(nuuids) <= len(instanceinfo[inst].all_nodes) and
3129
                      len(nuuids) <= len(
3130
                        self.cfg.GetInstanceNodes(instanceinfo[inst])) and
3127 3131
                      compat.all(isinstance(s, (tuple, list)) and
3128 3132
                                 len(s) == 2 for s in statuses)
3129 3133
                      for inst, nuuids in instdisk.items()
......
3320 3324
      if instance.admin_state == constants.ADMINST_OFFLINE:
3321 3325
        i_offline += 1
3322 3326

  
3323
      for nuuid in instance.all_nodes:
3327
      inst_nodes = self.cfg.GetInstanceNodes(instance)
3328
      for nuuid in inst_nodes:
3324 3329
        if nuuid not in node_image:
3325 3330
          gnode = self.NodeImage(uuid=nuuid)
3326 3331
          gnode.ghost = (nuuid not in self.all_node_info)
b/lib/cmdlib/common.py
604 604
  be_full = cfg.GetClusterInfo().FillBE(instance)
605 605
  mem_size = be_full[constants.BE_MAXMEM]
606 606
  cpu_count = be_full[constants.BE_VCPUS]
607
  es_flags = rpc.GetExclusiveStorageForNodes(cfg, instance.all_nodes)
607
  inst_nodes = cfg.GetInstanceNodes(instance)
608
  es_flags = rpc.GetExclusiveStorageForNodes(cfg, inst_nodes)
608 609
  if any(es_flags.values()):
609 610
    # With exclusive storage use the actual spindles
610 611
    try:
......
846 847

  
847 848
  """
848 849
  for (uuid, inst) in instances.items():
849
    assert owned_node_uuids.issuperset(inst.all_nodes), \
850
    inst_nodes = cfg.GetInstanceNodes(inst)
851
    assert owned_node_uuids.issuperset(inst_nodes), \
850 852
      "Instance %s's nodes changed while we kept the lock" % inst.name
851 853

  
852 854
    inst_groups = CheckInstanceNodeGroups(cfg, uuid, owned_groups)
b/lib/cmdlib/group.py
276 276

  
277 277
    self.cfg.AssignGroupNodes(mods)
278 278

  
279
  @staticmethod
280
  def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
279
  def CheckAssignmentForSplitInstances(self, changes, node_data, instance_data):
281 280
    """Check for split instances after a node assignment.
282 281

  
283 282
    This method considers a series of node assignments as an atomic operation,
......
310 309
      if inst.disk_template not in constants.DTS_INT_MIRROR:
311 310
        continue
312 311

  
312
      inst_nodes = self.cfg.GetInstanceNodes(inst)
313 313
      if len(set(node_data[node_uuid].group
314
                 for node_uuid in inst.all_nodes)) > 1:
314
                 for node_uuid in inst_nodes)) > 1:
315 315
        previously_split_instances.add(inst.uuid)
316 316

  
317 317
      if len(set(changed_nodes.get(node_uuid, node_data[node_uuid].group)
318
                 for node_uuid in inst.all_nodes)) > 1:
318
                 for node_uuid in inst_nodes)) > 1:
319 319
        all_split_instances.add(inst.uuid)
320 320

  
321 321
    return (list(all_split_instances - previously_split_instances),
b/lib/cmdlib/instance.py
1632 1632
                                            False, self.op.reason)
1633 1633
      result.Raise("Could not start instance")
1634 1634

  
1635
    return self.cfg.GetNodeNames(list(iobj.all_nodes))
1635
    return self.cfg.GetNodeNames(list(self.cfg.GetInstanceNodes(iobj)))
1636 1636

  
1637 1637

  
1638 1638
class LUInstanceRename(LogicalUnit):
......
1665 1665
    """Build hooks nodes.
1666 1666

  
1667 1667
    """
1668
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1668
    nl = [self.cfg.GetMasterNode()] + \
1669
      list(self.cfg.GetInstanceNodes(self.instance))
1669 1670
    return (nl, nl)
1670 1671

  
1671 1672
  def CheckPrereq(self):
......
1747 1748
    # update info on disks
1748 1749
    info = GetInstanceInfoText(renamed_inst)
1749 1750
    for (idx, disk) in enumerate(renamed_inst.disks):
1750
      for node_uuid in renamed_inst.all_nodes:
1751
      for node_uuid in self.cfg.GetInstanceNodes(renamed_inst):
1751 1752
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1752 1753
                                                (disk, renamed_inst), info)
1753 1754
        result.Warn("Error setting info on node %s for disk %s" %
......
1804 1805

  
1805 1806
    """
1806 1807
    nl = [self.cfg.GetMasterNode()]
1807
    nl_post = list(self.instance.all_nodes) + nl
1808
    nl_post = list(self.cfg.GetInstanceNodes(self.instance)) + nl
1808 1809
    return (nl, nl_post)
1809 1810

  
1810 1811
  def CheckPrereq(self):
......
1837 1838

  
1838 1839
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1839 1840
            self.owned_locks(locking.LEVEL_NODE_RES))
1840
    assert not (set(self.instance.all_nodes) -
1841
    assert not (set(self.cfg.GetInstanceNodes(self.instance)) -
1841 1842
                self.owned_locks(locking.LEVEL_NODE)), \
1842 1843
      "Not owning correct locks"
1843 1844

  
......
2681 2682
    """Build hooks nodes.
2682 2683

  
2683 2684
    """
2684
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2685
    nl = [self.cfg.GetMasterNode()] + \
2686
        list(self.cfg.GetInstanceNodes(self.instance))
2685 2687
    return (nl, nl)
2686 2688

  
2687 2689
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
......
2896 2898
    """
2897 2899
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2898 2900

  
2901
    inst_nodes = self.cfg.GetInstanceNodes(self.instance)
2899 2902
    excl_stor = compat.any(
2900
      rpc.GetExclusiveStorageForNodes(self.cfg,
2901
                                      self.instance.all_nodes).values()
2903
      rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes).values()
2902 2904
      )
2903 2905

  
2904 2906
    # Check disk modifications. This is done here and not in CheckArguments
......
3062 3064
                                   errors.ECODE_STATE)
3063 3065

  
3064 3066
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
3065
    node_uuids = list(self.instance.all_nodes)
3067
    node_uuids = list(self.cfg.GetInstanceNodes(self.instance))
3066 3068
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
3067 3069

  
3068 3070
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
......
3165 3167
        hvspecs = [(self.instance.hypervisor,
3166 3168
                    self.cfg.GetClusterInfo()
3167 3169
                      .hvparams[self.instance.hypervisor])]
3168
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
3170
        _CheckNodesPhysicalCPUs(self, self.cfg.GetInstanceNodes(self.instance),
3169 3171
                                max_requested_cpu + 1,
3170 3172
                                hvspecs)
3171 3173

  
......
3740 3742

  
3741 3743
    if self.op.disk_template:
3742 3744
      if __debug__:
3743
        check_nodes = set(self.instance.all_nodes)
3745
        check_nodes = set(self.cfg.GetInstanceNodes(self.instance))
3744 3746
        if self.op.remote_node_uuid:
3745 3747
          check_nodes.add(self.op.remote_node_uuid)
3746 3748
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
......
3900 3902
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3901 3903

  
3902 3904
    # Check if node groups for locked instance are still correct
3903
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3905
    assert owned_nodes.issuperset(self.cfg.GetInstanceNodes(self.instance)), \
3904 3906
      ("Instance %s's nodes changed while we kept the lock" %
3905 3907
       self.op.instance_name)
3906 3908

  
b/lib/cmdlib/instance_operation.py
86 86
    """Build hooks nodes.
87 87

  
88 88
    """
89
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
89
    nl = [self.cfg.GetMasterNode()] + \
90
        list(self.cfg.GetInstanceNodes(self.instance))
90 91
    return (nl, nl)
91 92

  
92 93
  def CheckPrereq(self):
......
108 109
      filled_hvp.update(self.op.hvparams)
109 110
      hv_type = hypervisor.GetHypervisorClass(self.instance.hypervisor)
110 111
      hv_type.CheckParameterSyntax(filled_hvp)
111
      CheckHVParams(self, self.instance.all_nodes, self.instance.hypervisor,
112
                    filled_hvp)
112
      CheckHVParams(self, self.cfg.GetInstanceNodes(self.instance),
113
                    self.instance.hypervisor, filled_hvp)
113 114

  
114 115
    CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
115 116

  
......
198 199
    """Build hooks nodes.
199 200

  
200 201
    """
201
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
202
    nl = [self.cfg.GetMasterNode()] + \
203
      list(self.cfg.GetInstanceNodes(self.instance))
202 204
    return (nl, nl)
203 205

  
204 206
  def CheckPrereq(self):
......
274 276
    """Build hooks nodes.
275 277

  
276 278
    """
277
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
279
    nl = [self.cfg.GetMasterNode()] + \
280
      list(self.cfg.GetInstanceNodes(self.instance))
278 281
    return (nl, nl)
279 282

  
280 283
  def CheckPrereq(self):
......
302 305

  
303 306
  def _MergeValidateOsParams(self, instance):
304 307
    "Handle the OS parameter merging and validation for the target instance."
305
    node_uuids = list(instance.all_nodes)
308
    node_uuids = list(self.cfg.GetInstanceNodes(instance))
306 309

  
307 310
    self.op.osparams = self.op.osparams or {}
308 311
    self.op.osparams_private = self.op.osparams_private or {}
......
424 427
    """Build hooks nodes.
425 428

  
426 429
    """
427
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
430
    nl = [self.cfg.GetMasterNode()] + \
431
      list(self.cfg.GetInstanceNodes(self.instance))
428 432
    return (nl, nl)
429 433

  
430 434
  def CheckPrereq(self):
b/lib/cmdlib/instance_query.py
203 203

  
204 204
    cluster = self.cfg.GetClusterInfo()
205 205

  
206
    node_uuids = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
206
    node_uuids = itertools.chain(*(self.cfg.GetInstanceNodes(i)
207
                                   for i in self.wanted_instances))
207 208
    nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
208 209

  
209 210
    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
b/lib/cmdlib/instance_storage.py
226 226
  info = GetInstanceInfoText(instance)
227 227
  if target_node_uuid is None:
228 228
    pnode_uuid = instance.primary_node
229
    all_node_uuids = instance.all_nodes
229
    all_node_uuids = lu.cfg.GetInstanceNodes(instance, disks=disks)
230 230
  else:
231 231
    pnode_uuid = target_node_uuid
232 232
    all_node_uuids = [pnode_uuid]
......
610 610

  
611 611
    ial.Run(self.op.iallocator)
612 612

  
613
    assert req.RequiredNodes() == len(self.instance.all_nodes)
613
    assert req.RequiredNodes() == len(self.cfg.GetInstanceNodes(self.instance))
614 614

  
615 615
    if not ial.success:
616 616
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
......
711 711
    """Build hooks nodes.
712 712

  
713 713
    """
714
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
714
    nl = [self.cfg.GetMasterNode()] + \
715
      list(self.cfg.GetInstanceNodes(self.instance))
715 716
    return (nl, nl)
716 717

  
717 718
  def CheckPrereq(self):
......
724 725
    assert instance is not None, \
725 726
      "Cannot retrieve locked instance %s" % self.op.instance_name
726 727
    if self.op.node_uuids:
727
      if len(self.op.node_uuids) != len(instance.all_nodes):
728
      inst_nodes = self.cfg.GetInstanceNodes(instance)
729
      if len(self.op.node_uuids) != len(inst_nodes):
728 730
        raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
729 731
                                   " %d replacement nodes were specified" %
730
                                   (instance.name, len(instance.all_nodes),
732
                                   (instance.name, len(inst_nodes),
731 733
                                    len(self.op.node_uuids)),
732 734
                                   errors.ECODE_INVAL)
733 735
      assert instance.disk_template != constants.DT_DRBD8 or \
......
787 789
    if self.op.node_uuids:
788 790
      node_uuids = self.op.node_uuids
789 791
    else:
790
      node_uuids = instance.all_nodes
792
      node_uuids = self.cfg.GetInstanceNodes(instance)
791 793
    excl_stor = compat.any(
792 794
      rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids).values()
793 795
      )
......
852 854

  
853 855
    # All touched nodes must be locked
854 856
    mylocks = self.owned_locks(locking.LEVEL_NODE)
855
    assert mylocks.issuperset(frozenset(self.instance.all_nodes))
857
    inst_nodes = self.cfg.GetInstanceNodes(self.instance)
858
    assert mylocks.issuperset(frozenset(inst_nodes))
856 859
    new_disks = CreateDisks(self, self.instance, to_skip=to_skip)
857 860

  
858 861
    # TODO: Release node locks before wiping, or explain why it's not possible
......
1461 1464
    """Build hooks nodes.
1462 1465

  
1463 1466
    """
1464
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1467
    nl = [self.cfg.GetMasterNode()] + \
1468
      list(self.cfg.GetInstanceNodes(self.instance))
1465 1469
    return (nl, nl)
1466 1470

  
1467 1471
  def CheckPrereq(self):
......
1473 1477
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1474 1478
    assert self.instance is not None, \
1475 1479
      "Cannot retrieve locked instance %s" % self.op.instance_name
1476
    node_uuids = list(self.instance.all_nodes)
1480
    node_uuids = list(self.cfg.GetInstanceNodes(self.instance))
1477 1481
    for node_uuid in node_uuids:
1478 1482
      CheckNodeOnline(self, node_uuid)
1479 1483
    self.node_es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids)
......
1534 1538
                 utils.FormatUnit(self.target, "h")))
1535 1539

  
1536 1540
    # First run all grow ops in dry-run mode
1537
    for node_uuid in self.instance.all_nodes:
1541
    inst_nodes = self.cfg.GetInstanceNodes(self.instance)
1542
    for node_uuid in inst_nodes:
1538 1543
      result = self.rpc.call_blockdev_grow(node_uuid,
1539 1544
                                           (self.disk, self.instance),
1540 1545
                                           self.delta, True, True,
......
1566 1571

  
1567 1572
    # We know that (as far as we can test) operations across different
1568 1573
    # nodes will succeed, time to run it for real on the backing storage
1569
    for node_uuid in self.instance.all_nodes:
1574
    for node_uuid in inst_nodes:
1570 1575
      result = self.rpc.call_blockdev_grow(node_uuid,
1571 1576
                                           (self.disk, self.instance),
1572 1577
                                           self.delta, False, True,
......
1982 1987
    @return: True if they are activated, False otherwise
1983 1988

  
1984 1989
    """
1985
    node_uuids = instance.all_nodes
1990
    node_uuids = self.cfg.GetInstanceNodes(instance)
1986 1991

  
1987 1992
    for idx, dev in enumerate(instance.disks):
1988 1993
      for node_uuid in node_uuids:
b/lib/cmdlib/node.py
483 483

  
484 484
    """
485 485
    return (instance.disk_template in constants.DTS_INT_MIRROR and
486
            self.op.node_uuid in instance.all_nodes)
486
            self.op.node_uuid in self.cfg.GetInstanceNodes(instance))
487 487

  
488 488
  def ExpandNames(self):
489 489
    if self.lock_all:
......
866 866

  
867 867
  """
868 868

  
869
  return _GetNodeInstancesInner(cfg, lambda inst: node_uuid in inst.all_nodes)
869
  return _GetNodeInstancesInner(cfg,
870
                                lambda inst: node_uuid in
871
                                  cfg.GetInstanceNodes(inst))
870 872

  
871 873

  
872 874
class LUNodeEvacuate(NoHooksLU):
......
1458 1460
                                 " node is required", errors.ECODE_INVAL)
1459 1461

  
1460 1462
    for _, instance in self.cfg.GetAllInstancesInfo().items():
1461
      if node.uuid in instance.all_nodes:
1463
      if node.uuid in self.cfg.GetInstanceNodes(instance):
1462 1464
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1463 1465
                                   " please remove first" % instance.name,
1464 1466
                                   errors.ECODE_INVAL)
......
1557 1559
    for inst in _GetNodeInstances(self.cfg, self.op.node_uuid):
1558 1560
      if not inst.disks_active:
1559 1561
        continue
1560
      check_nodes = set(inst.all_nodes)
1562
      check_nodes = set(self.cfg.GetInstanceNodes(inst))
1561 1563
      check_nodes.discard(self.op.node_uuid)
1562 1564
      for inst_node_uuid in check_nodes:
1563 1565
        self._CheckFaultyDisks(inst, inst_node_uuid)
b/lib/config.py
1842 1842
    if primary_only:
1843 1843
      nodes = [instance.primary_node]
1844 1844
    else:
1845
      nodes = instance.all_nodes
1845
      nodes = self._UnlockedGetInstanceNodes(instance)
1846 1846

  
1847 1847
    return frozenset(self._UnlockedGetNodeInfo(node_uuid).group
1848 1848
                     for node_uuid in nodes)
......
2091 2091
    if primary_only:
2092 2092
      nodes_fn = lambda inst: [inst.primary_node]
2093 2093
    else:
2094
      nodes_fn = lambda inst: inst.all_nodes
2094
      nodes_fn = self._UnlockedGetInstanceNodes
2095 2095

  
2096 2096
    return frozenset(inst.uuid
2097 2097
                     for inst in self._config_data.instances.values()
b/lib/objects.py
1152 1152
    "serial_no",
1153 1153
    ] + _TIMESTAMPS + _UUID
1154 1154

  
1155
  def _ComputeAllNodes(self):
1156
    """Compute the list of all nodes.
1157

  
1158
    Since the data is already there (in the drbd disks), keeping it as
1159
    a separate normal attribute is redundant and if not properly
1160
    synchronised can cause problems. Thus it's better to compute it
1161
    dynamically.
1162

  
1163
    """
1164
    def _Helper(nodes, device):
1165
      """Recursively computes nodes given a top device."""
1166
      if device.dev_type in constants.DTS_DRBD:
1167
        nodea, nodeb = device.logical_id[:2]
1168
        nodes.add(nodea)
1169
        nodes.add(nodeb)
1170
      if device.children:
1171
        for child in device.children:
1172
          _Helper(nodes, child)
1173

  
1174
    all_nodes = set()
1175
    for device in self.disks:
1176
      _Helper(all_nodes, device)
1177
    # ensure that the primary node is always the first
1178
    all_nodes.discard(self.primary_node)
1179
    return (self.primary_node, ) + tuple(all_nodes)
1180

  
1181
  all_nodes = property(_ComputeAllNodes, None, None,
1182
                       "List of names of all the nodes of the instance")
1183

  
1184 1155
  def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
1185 1156
    """Provide a mapping of nodes to LVs this instance owns.
1186 1157

  
b/test/py/cmdlib/cmdlib_unittest.py
588 588
      constants.ND_EXCLUSIVE_STORAGE: self.excl_stor,
589 589
      }
590 590

  
591
  def GetInstanceNodes(self, instance):
592
    return tuple(instance.primary_node)
593

  
591 594

  
592 595
class TestComputeIPolicyInstanceViolation(unittest.TestCase):
593 596
  def test(self):
......
599 602
    disks = [objects.Disk(size=512, spindles=13)]
600 603
    cfg = _FakeConfigForComputeIPolicyInstanceViolation(beparams, False)
601 604
    instance = objects.Instance(beparams=beparams, disks=disks, nics=[],
605
                                primary_node="pnode_uuid",
602 606
                                disk_template=constants.DT_PLAIN)
603 607
    stub = _StubComputeIPolicySpecViolation(2048, 2, 1, 0, [512], 4,
604 608
                                            constants.DT_PLAIN)
......
606 610
                                                 cfg, _compute_fn=stub)
607 611
    self.assertEqual(ret, [])
608 612
    instance2 = objects.Instance(beparams={}, disks=disks, nics=[],
613
                                 primary_node="pnode_uuid",
609 614
                                 disk_template=constants.DT_PLAIN)
610 615
    ret = common.ComputeIPolicyInstanceViolation(NotImplemented, instance2,
611 616
                                                 cfg, _compute_fn=stub)

Also available in: Unified diff