Revision 4e7f986e

b/lib/cmdlib/cluster.py
941 941
      all_instances = self.cfg.GetAllInstancesInfo().values()
942 942
      violations = set()
943 943
      for group in self.cfg.GetAllNodeGroupsInfo().values():
944
        instances = frozenset([inst for inst in all_instances
945
                               if compat.any(nuuid in group.members
946
                                             for nuuid in inst.all_nodes)])
944
        instances = frozenset(
945
          [inst for inst in all_instances
946
           if compat.any(nuuid in group.members
947
             for nuuid in self.cfg.GetInstanceNodes(inst))])
947 948
        new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
948 949
        ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
949 950
        new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
......
2006 2007

  
2007 2008
    for inst in self.my_inst_info.values():
2008 2009
      if inst.disk_template in constants.DTS_INT_MIRROR:
2009
        for nuuid in inst.all_nodes:
2010
        inst_nodes = self.cfg.GetInstanceNodes(inst)
2011
        for nuuid in inst_nodes:
2010 2012
          if self.all_node_info[nuuid].group != self.group_uuid:
2011 2013
            extra_lv_nodes.add(nuuid)
2012 2014

  
......
2360 2362
                  utils.CommaJoin(secondary_nodes),
2361 2363
                  code=self.ETYPE_WARNING)
2362 2364

  
2363
    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, instance.all_nodes)
2365
    inst_nodes = self.cfg.GetInstanceNodes(instance)
2366
    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes)
2364 2367
    if any(es_flags.values()):
2365 2368
      if instance.disk_template not in constants.DTS_EXCL_STORAGE:
2366 2369
        # Disk template not compatible with exclusive_storage: no instance
......
2381 2384
                      " gnt-cluster repair-disk-sizes", idx)
2382 2385

  
2383 2386
    if instance.disk_template in constants.DTS_INT_MIRROR:
2384
      instance_nodes = utils.NiceSort(instance.all_nodes)
2387
      instance_nodes = utils.NiceSort(inst_nodes)
2385 2388
      instance_groups = {}
2386 2389

  
2387 2390
      for node_uuid in instance_nodes:
......
2418 2421
                  instance.name, "instance has offline secondary node(s) %s",
2419 2422
                  utils.CommaJoin(self.cfg.GetNodeNames(inst_nodes_offline)))
2420 2423
    # ... or ghost/non-vm_capable nodes
2421
    for node_uuid in instance.all_nodes:
2424
    for node_uuid in inst_nodes:
2422 2425
      self._ErrorIf(node_image[node_uuid].ghost, constants.CV_EINSTANCEBADNODE,
2423 2426
                    instance.name, "instance lives on ghost node %s",
2424 2427
                    self.cfg.GetNodeName(node_uuid))
......
3118 3121
      instdisk[inst_uuid] = {}
3119 3122

  
3120 3123
    assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
3121
                      len(nuuids) <= len(instanceinfo[inst].all_nodes) and
3124
                      len(nuuids) <= len(
3125
                        self.cfg.GetInstanceNodes(instanceinfo[inst])) and
3122 3126
                      compat.all(isinstance(s, (tuple, list)) and
3123 3127
                                 len(s) == 2 for s in statuses)
3124 3128
                      for inst, nuuids in instdisk.items()
......
3315 3319
      if instance.admin_state == constants.ADMINST_OFFLINE:
3316 3320
        i_offline += 1
3317 3321

  
3318
      for nuuid in instance.all_nodes:
3322
      inst_nodes = self.cfg.GetInstanceNodes(instance)
3323
      for nuuid in inst_nodes:
3319 3324
        if nuuid not in node_image:
3320 3325
          gnode = self.NodeImage(uuid=nuuid)
3321 3326
          gnode.ghost = (nuuid not in self.all_node_info)
b/lib/cmdlib/common.py
579 579
  be_full = cfg.GetClusterInfo().FillBE(instance)
580 580
  mem_size = be_full[constants.BE_MAXMEM]
581 581
  cpu_count = be_full[constants.BE_VCPUS]
582
  es_flags = rpc.GetExclusiveStorageForNodes(cfg, instance.all_nodes)
582
  inst_nodes = cfg.GetInstanceNodes(instance)
583
  es_flags = rpc.GetExclusiveStorageForNodes(cfg, inst_nodes)
583 584
  if any(es_flags.values()):
584 585
    # With exclusive storage use the actual spindles
585 586
    try:
......
821 822

  
822 823
  """
823 824
  for (uuid, inst) in instances.items():
824
    assert owned_node_uuids.issuperset(inst.all_nodes), \
825
    inst_nodes = cfg.GetInstanceNodes(inst)
826
    assert owned_node_uuids.issuperset(inst_nodes), \
825 827
      "Instance %s's nodes changed while we kept the lock" % inst.name
826 828

  
827 829
    inst_groups = CheckInstanceNodeGroups(cfg, uuid, owned_groups)
b/lib/cmdlib/group.py
276 276

  
277 277
    self.cfg.AssignGroupNodes(mods)
278 278

  
279
  @staticmethod
280
  def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
279
  def CheckAssignmentForSplitInstances(self, changes, node_data, instance_data):
281 280
    """Check for split instances after a node assignment.
282 281

  
283 282
    This method considers a series of node assignments as an atomic operation,
......
310 309
      if inst.disk_template not in constants.DTS_INT_MIRROR:
311 310
        continue
312 311

  
312
      inst_nodes = self.cfg.GetInstanceNodes(inst)
313 313
      if len(set(node_data[node_uuid].group
314
                 for node_uuid in inst.all_nodes)) > 1:
314
                 for node_uuid in inst_nodes)) > 1:
315 315
        previously_split_instances.add(inst.uuid)
316 316

  
317 317
      if len(set(changed_nodes.get(node_uuid, node_data[node_uuid].group)
318
                 for node_uuid in inst.all_nodes)) > 1:
318
                 for node_uuid in inst_nodes)) > 1:
319 319
        all_split_instances.add(inst.uuid)
320 320

  
321 321
    return (list(all_split_instances - previously_split_instances),
b/lib/cmdlib/instance.py
1571 1571
                                            False, self.op.reason)
1572 1572
      result.Raise("Could not start instance")
1573 1573

  
1574
    return self.cfg.GetNodeNames(list(iobj.all_nodes))
1574
    return self.cfg.GetNodeNames(list(self.cfg.GetInstanceNodes(iobj)))
1575 1575

  
1576 1576

  
1577 1577
class LUInstanceRename(LogicalUnit):
......
1604 1604
    """Build hooks nodes.
1605 1605

  
1606 1606
    """
1607
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1607
    nl = [self.cfg.GetMasterNode()] + \
1608
      list(self.cfg.GetInstanceNodes(self.instance))
1608 1609
    return (nl, nl)
1609 1610

  
1610 1611
  def CheckPrereq(self):
......
1688 1689
    # update info on disks
1689 1690
    info = GetInstanceInfoText(renamed_inst)
1690 1691
    for (idx, disk) in enumerate(renamed_inst.disks):
1691
      for node_uuid in renamed_inst.all_nodes:
1692
      for node_uuid in self.cfg.GetInstanceNodes(renamed_inst):
1692 1693
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1693 1694
                                                (disk, renamed_inst), info)
1694 1695
        result.Warn("Error setting info on node %s for disk %s" %
......
1745 1746

  
1746 1747
    """
1747 1748
    nl = [self.cfg.GetMasterNode()]
1748
    nl_post = list(self.instance.all_nodes) + nl
1749
    nl_post = list(self.cfg.GetInstanceNodes(self.instance)) + nl
1749 1750
    return (nl, nl_post)
1750 1751

  
1751 1752
  def CheckPrereq(self):
......
1778 1779

  
1779 1780
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1780 1781
            self.owned_locks(locking.LEVEL_NODE_RES))
1781
    assert not (set(self.instance.all_nodes) -
1782
    assert not (set(self.cfg.GetInstanceNodes(self.instance)) -
1782 1783
                self.owned_locks(locking.LEVEL_NODE)), \
1783 1784
      "Not owning correct locks"
1784 1785

  
......
2622 2623
    """Build hooks nodes.
2623 2624

  
2624 2625
    """
2625
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2626
    nl = [self.cfg.GetMasterNode()] + \
2627
        list(self.cfg.GetInstanceNodes(self.instance))
2626 2628
    return (nl, nl)
2627 2629

  
2628 2630
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
......
2837 2839
    """
2838 2840
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2839 2841

  
2842
    inst_nodes = self.cfg.GetInstanceNodes(self.instance)
2840 2843
    excl_stor = compat.any(
2841
      rpc.GetExclusiveStorageForNodes(self.cfg,
2842
                                      self.instance.all_nodes).values()
2844
      rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes).values()
2843 2845
      )
2844 2846

  
2845 2847
    # Check disk modifications. This is done here and not in CheckArguments
......
3003 3005
                                   errors.ECODE_STATE)
3004 3006

  
3005 3007
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
3006
    node_uuids = list(self.instance.all_nodes)
3008
    node_uuids = list(self.cfg.GetInstanceNodes(self.instance))
3007 3009
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
3008 3010

  
3009 3011
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
......
3113 3115
        hvspecs = [(self.instance.hypervisor,
3114 3116
                    self.cfg.GetClusterInfo()
3115 3117
                      .hvparams[self.instance.hypervisor])]
3116
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
3118
        _CheckNodesPhysicalCPUs(self, self.cfg.GetInstanceNodes(self.instance),
3117 3119
                                max_requested_cpu + 1,
3118 3120
                                hvspecs)
3119 3121

  
......
3682 3684

  
3683 3685
    if self.op.disk_template:
3684 3686
      if __debug__:
3685
        check_nodes = set(self.instance.all_nodes)
3687
        check_nodes = set(self.cfg.GetInstanceNodes(self.instance))
3686 3688
        if self.op.remote_node_uuid:
3687 3689
          check_nodes.add(self.op.remote_node_uuid)
3688 3690
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
......
3842 3844
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3843 3845

  
3844 3846
    # Check if node groups for locked instance are still correct
3845
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3847
    assert owned_nodes.issuperset(self.cfg.GetInstanceNodes(self.instance)), \
3846 3848
      ("Instance %s's nodes changed while we kept the lock" %
3847 3849
       self.op.instance_name)
3848 3850

  
b/lib/cmdlib/instance_operation.py
86 86
    """Build hooks nodes.
87 87

  
88 88
    """
89
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
89
    nl = [self.cfg.GetMasterNode()] + \
90
        list(self.cfg.GetInstanceNodes(self.instance))
90 91
    return (nl, nl)
91 92

  
92 93
  def CheckPrereq(self):
......
108 109
      filled_hvp.update(self.op.hvparams)
109 110
      hv_type = hypervisor.GetHypervisorClass(self.instance.hypervisor)
110 111
      hv_type.CheckParameterSyntax(filled_hvp)
111
      CheckHVParams(self, self.instance.all_nodes, self.instance.hypervisor,
112
                    filled_hvp)
112
      CheckHVParams(self, self.cfg.GetInstanceNodes(self.instance),
113
                    self.instance.hypervisor, filled_hvp)
113 114

  
114 115
    CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
115 116

  
......
198 199
    """Build hooks nodes.
199 200

  
200 201
    """
201
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
202
    nl = [self.cfg.GetMasterNode()] + \
203
      list(self.cfg.GetInstanceNodes(self.instance))
202 204
    return (nl, nl)
203 205

  
204 206
  def CheckPrereq(self):
......
271 273
    """Build hooks nodes.
272 274

  
273 275
    """
274
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
276
    nl = [self.cfg.GetMasterNode()] + \
277
      list(self.cfg.GetInstanceNodes(self.instance))
275 278
    return (nl, nl)
276 279

  
277 280
  def CheckPrereq(self):
......
308 311
    else:
309 312
      instance_os = instance.os
310 313

  
311
    node_uuids = list(instance.all_nodes)
314
    node_uuids = list(self.cfg.GetInstanceNodes(instance))
312 315

  
313 316
    self.op.osparams = self.op.osparams or {}
314 317
    self.op.osparams_private = self.op.osparams_private or {}
......
385 388
    """Build hooks nodes.
386 389

  
387 390
    """
388
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
391
    nl = [self.cfg.GetMasterNode()] + \
392
      list(self.cfg.GetInstanceNodes(self.instance))
389 393
    return (nl, nl)
390 394

  
391 395
  def CheckPrereq(self):
b/lib/cmdlib/instance_query.py
203 203

  
204 204
    cluster = self.cfg.GetClusterInfo()
205 205

  
206
    node_uuids = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
206
    node_uuids = itertools.chain(*(self.cfg.GetInstanceNodes(i)
207
                                   for i in self.wanted_instances))
207 208
    nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
208 209

  
209 210
    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
b/lib/cmdlib/instance_storage.py
226 226
  info = GetInstanceInfoText(instance)
227 227
  if target_node_uuid is None:
228 228
    pnode_uuid = instance.primary_node
229
    all_node_uuids = instance.all_nodes
229
    all_node_uuids = lu.cfg.GetInstanceNodes(instance, disks=disks)
230 230
  else:
231 231
    pnode_uuid = target_node_uuid
232 232
    all_node_uuids = [pnode_uuid]
......
610 610

  
611 611
    ial.Run(self.op.iallocator)
612 612

  
613
    assert req.RequiredNodes() == len(self.instance.all_nodes)
613
    assert req.RequiredNodes() == len(self.cfg.GetInstanceNodes(self.instance))
614 614

  
615 615
    if not ial.success:
616 616
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
......
711 711
    """Build hooks nodes.
712 712

  
713 713
    """
714
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
714
    nl = [self.cfg.GetMasterNode()] + \
715
      list(self.cfg.GetInstanceNodes(self.instance))
715 716
    return (nl, nl)
716 717

  
717 718
  def CheckPrereq(self):
......
724 725
    assert instance is not None, \
725 726
      "Cannot retrieve locked instance %s" % self.op.instance_name
726 727
    if self.op.node_uuids:
727
      if len(self.op.node_uuids) != len(instance.all_nodes):
728
      inst_nodes = self.cfg.GetInstanceNodes(instance)
729
      if len(self.op.node_uuids) != len(inst_nodes):
728 730
        raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
729 731
                                   " %d replacement nodes were specified" %
730
                                   (instance.name, len(instance.all_nodes),
732
                                   (instance.name, len(inst_nodes),
731 733
                                    len(self.op.node_uuids)),
732 734
                                   errors.ECODE_INVAL)
733 735
      assert instance.disk_template != constants.DT_DRBD8 or \
......
789 791
    if self.op.node_uuids:
790 792
      node_uuids = self.op.node_uuids
791 793
    else:
792
      node_uuids = instance.all_nodes
794
      node_uuids = self.cfg.GetInstanceNodes(instance)
793 795
    excl_stor = compat.any(
794 796
      rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids).values()
795 797
      )
......
854 856

  
855 857
    # All touched nodes must be locked
856 858
    mylocks = self.owned_locks(locking.LEVEL_NODE)
857
    assert mylocks.issuperset(frozenset(self.instance.all_nodes))
859
    inst_nodes = self.cfg.GetInstanceNodes(self.instance)
860
    assert mylocks.issuperset(frozenset(inst_nodes))
858 861
    new_disks = CreateDisks(self, self.instance, to_skip=to_skip)
859 862

  
860 863
    # TODO: Release node locks before wiping, or explain why it's not possible
......
1401 1404
    """Build hooks nodes.
1402 1405

  
1403 1406
    """
1404
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1407
    nl = [self.cfg.GetMasterNode()] + \
1408
      list(self.cfg.GetInstanceNodes(self.instance))
1405 1409
    return (nl, nl)
1406 1410

  
1407 1411
  def CheckPrereq(self):
......
1413 1417
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1414 1418
    assert self.instance is not None, \
1415 1419
      "Cannot retrieve locked instance %s" % self.op.instance_name
1416
    node_uuids = list(self.instance.all_nodes)
1420
    node_uuids = list(self.cfg.GetInstanceNodes(self.instance))
1417 1421
    for node_uuid in node_uuids:
1418 1422
      CheckNodeOnline(self, node_uuid)
1419 1423
    self.node_es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids)
......
1474 1478
                 utils.FormatUnit(self.target, "h")))
1475 1479

  
1476 1480
    # First run all grow ops in dry-run mode
1477
    for node_uuid in self.instance.all_nodes:
1481
    inst_nodes = self.cfg.GetInstanceNodes(self.instance)
1482
    for node_uuid in inst_nodes:
1478 1483
      result = self.rpc.call_blockdev_grow(node_uuid,
1479 1484
                                           (self.disk, self.instance),
1480 1485
                                           self.delta, True, True,
......
1506 1511

  
1507 1512
    # We know that (as far as we can test) operations across different
1508 1513
    # nodes will succeed, time to run it for real on the backing storage
1509
    for node_uuid in self.instance.all_nodes:
1514
    for node_uuid in inst_nodes:
1510 1515
      result = self.rpc.call_blockdev_grow(node_uuid,
1511 1516
                                           (self.disk, self.instance),
1512 1517
                                           self.delta, False, True,
......
1926 1931
    @return: True if they are activated, False otherwise
1927 1932

  
1928 1933
    """
1929
    node_uuids = instance.all_nodes
1934
    node_uuids = self.cfg.GetInstanceNodes(instance)
1930 1935

  
1931 1936
    for idx, dev in enumerate(instance.disks):
1932 1937
      for node_uuid in node_uuids:
b/lib/cmdlib/node.py
486 486

  
487 487
    """
488 488
    return (instance.disk_template in constants.DTS_INT_MIRROR and
489
            self.op.node_uuid in instance.all_nodes)
489
            self.op.node_uuid in self.cfg.GetInstanceNodes(instance))
490 490

  
491 491
  def ExpandNames(self):
492 492
    if self.lock_all:
......
870 870

  
871 871
  """
872 872

  
873
  return _GetNodeInstancesInner(cfg, lambda inst: node_uuid in inst.all_nodes)
873
  return _GetNodeInstancesInner(cfg,
874
                                lambda inst: node_uuid in
875
                                  cfg.GetInstanceNodes(inst))
874 876

  
875 877

  
876 878
class LUNodeEvacuate(NoHooksLU):
......
1462 1464
                                 " node is required", errors.ECODE_INVAL)
1463 1465

  
1464 1466
    for _, instance in self.cfg.GetAllInstancesInfo().items():
1465
      if node.uuid in instance.all_nodes:
1467
      if node.uuid in self.cfg.GetInstanceNodes(instance):
1466 1468
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1467 1469
                                   " please remove first" % instance.name,
1468 1470
                                   errors.ECODE_INVAL)
......
1563 1565
    for inst in _GetNodeInstances(self.cfg, self.op.node_uuid):
1564 1566
      if not inst.disks_active:
1565 1567
        continue
1566
      check_nodes = set(inst.all_nodes)
1568
      check_nodes = set(self.cfg.GetInstanceNodes(inst))
1567 1569
      check_nodes.discard(self.op.node_uuid)
1568 1570
      for inst_node_uuid in check_nodes:
1569 1571
        self._CheckFaultyDisks(inst, inst_node_uuid)
b/lib/config.py
1808 1808
    if primary_only:
1809 1809
      nodes = [instance.primary_node]
1810 1810
    else:
1811
      nodes = instance.all_nodes
1811
      nodes = self._UnlockedGetInstanceNodes(instance)
1812 1812

  
1813 1813
    return frozenset(self._UnlockedGetNodeInfo(node_uuid).group
1814 1814
                     for node_uuid in nodes)
......
2057 2057
    if primary_only:
2058 2058
      nodes_fn = lambda inst: [inst.primary_node]
2059 2059
    else:
2060
      nodes_fn = lambda inst: inst.all_nodes
2060
      nodes_fn = self._UnlockedGetInstanceNodes
2061 2061

  
2062 2062
    return frozenset(inst.uuid
2063 2063
                     for inst in self._config_data.instances.values()
b/lib/objects.py
1108 1108
    "serial_no",
1109 1109
    ] + _TIMESTAMPS + _UUID
1110 1110

  
1111
  def _ComputeAllNodes(self):
1112
    """Compute the list of all nodes.
1113

  
1114
    Since the data is already there (in the drbd disks), keeping it as
1115
    a separate normal attribute is redundant and if not properly
1116
    synchronised can cause problems. Thus it's better to compute it
1117
    dynamically.
1118

  
1119
    """
1120
    def _Helper(nodes, device):
1121
      """Recursively computes nodes given a top device."""
1122
      if device.dev_type in constants.DTS_DRBD:
1123
        nodea, nodeb = device.logical_id[:2]
1124
        nodes.add(nodea)
1125
        nodes.add(nodeb)
1126
      if device.children:
1127
        for child in device.children:
1128
          _Helper(nodes, child)
1129

  
1130
    all_nodes = set()
1131
    all_nodes.add(self.primary_node)
1132
    for device in self.disks:
1133
      _Helper(all_nodes, device)
1134
    return tuple(all_nodes)
1135

  
1136
  all_nodes = property(_ComputeAllNodes, None, None,
1137
                       "List of names of all the nodes of the instance")
1138

  
1139 1111
  def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
1140 1112
    """Provide a mapping of nodes to LVs this instance owns.
1141 1113

  
b/test/py/cmdlib/cmdlib_unittest.py
588 588
      constants.ND_EXCLUSIVE_STORAGE: self.excl_stor,
589 589
      }
590 590

  
591
  def GetInstanceNodes(self, instance):
592
    return tuple(instance.primary_node)
593

  
591 594

  
592 595
class TestComputeIPolicyInstanceViolation(unittest.TestCase):
593 596
  def test(self):
......
599 602
    disks = [objects.Disk(size=512, spindles=13)]
600 603
    cfg = _FakeConfigForComputeIPolicyInstanceViolation(beparams, False)
601 604
    instance = objects.Instance(beparams=beparams, disks=disks, nics=[],
605
                                primary_node="pnode_uuid",
602 606
                                disk_template=constants.DT_PLAIN)
603 607
    stub = _StubComputeIPolicySpecViolation(2048, 2, 1, 0, [512], 4,
604 608
                                            constants.DT_PLAIN)
......
606 610
                                                 cfg, _compute_fn=stub)
607 611
    self.assertEqual(ret, [])
608 612
    instance2 = objects.Instance(beparams={}, disks=disks, nics=[],
613
                                 primary_node="pnode_uuid",
609 614
                                 disk_template=constants.DT_PLAIN)
610 615
    ret = common.ComputeIPolicyInstanceViolation(NotImplemented, instance2,
611 616
                                                 cfg, _compute_fn=stub)

Also available in: Unified diff