Revision 6ccce5d4
b/lib/cmdlib/base.py | ||
---|---|---|
365 | 365 |
for _, instance in self.cfg.GetMultiInstanceInfoByName(locked_i): |
366 | 366 |
wanted_node_uuids.append(instance.primary_node) |
367 | 367 |
if not primary_only: |
368 |
wanted_node_uuids.extend(instance.secondary_nodes)
|
|
368 |
wanted_node_uuids.extend(self.cfg.GetInstanceSecondaryNodes(instance))
|
|
369 | 369 |
|
370 | 370 |
if self.recalculate_locks[level] == constants.LOCKS_REPLACE: |
371 | 371 |
self.needed_locks[level] = wanted_node_uuids |
b/lib/cmdlib/cluster.py | ||
---|---|---|
1957 | 1957 |
# Important: access only the instances whose lock is owned |
1958 | 1958 |
instance = self.cfg.GetInstanceInfoByName(inst_name) |
1959 | 1959 |
if instance.disk_template in constants.DTS_INT_MIRROR: |
1960 |
nodes.update(instance.secondary_nodes)
|
|
1960 |
nodes.update(self.cfg.GetInstanceSecondaryNodes(instance))
|
|
1961 | 1961 |
|
1962 | 1962 |
self.needed_locks[locking.LEVEL_NODE] = nodes |
1963 | 1963 |
|
... | ... | |
2353 | 2353 |
"instance %s, connection to primary node failed", |
2354 | 2354 |
instance.name) |
2355 | 2355 |
|
2356 |
self._ErrorIf(len(instance.secondary_nodes) > 1, |
|
2356 |
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance) |
|
2357 |
self._ErrorIf(len(secondary_nodes) > 1, |
|
2357 | 2358 |
constants.CV_EINSTANCELAYOUT, instance.name, |
2358 | 2359 |
"instance has multiple secondary nodes: %s", |
2359 |
utils.CommaJoin(instance.secondary_nodes),
|
|
2360 |
utils.CommaJoin(secondary_nodes), |
|
2360 | 2361 |
code=self.ETYPE_WARNING) |
2361 | 2362 |
|
2362 | 2363 |
es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, instance.all_nodes) |
... | ... | |
2402 | 2403 |
code=self.ETYPE_WARNING) |
2403 | 2404 |
|
2404 | 2405 |
inst_nodes_offline = [] |
2405 |
for snode in instance.secondary_nodes:
|
|
2406 |
for snode in secondary_nodes: |
|
2406 | 2407 |
s_img = node_image[snode] |
2407 | 2408 |
self._ErrorIf(s_img.rpc_fail and not s_img.offline, constants.CV_ENODERPC, |
2408 | 2409 |
self.cfg.GetNodeName(snode), |
... | ... | |
3325 | 3326 |
pnode = instance.primary_node |
3326 | 3327 |
node_image[pnode].pinst.append(instance.uuid) |
3327 | 3328 |
|
3328 |
for snode in instance.secondary_nodes:
|
|
3329 |
for snode in self.cfg.GetInstanceSecondaryNodes(instance):
|
|
3329 | 3330 |
nimg = node_image[snode] |
3330 | 3331 |
nimg.sinst.append(instance.uuid) |
3331 | 3332 |
if pnode not in nimg.sbp: |
... | ... | |
3514 | 3515 |
# is secondary for an instance whose primary is in another group. To avoid |
3515 | 3516 |
# them, we find these instances and add their volumes to node_vol_should. |
3516 | 3517 |
for instance in self.all_inst_info.values(): |
3517 |
for secondary in instance.secondary_nodes:
|
|
3518 |
for secondary in self.cfg.GetInstanceSecondaryNodes(instance):
|
|
3518 | 3519 |
if (secondary in self.my_node_info |
3519 | 3520 |
and instance.name not in self.my_inst_info): |
3520 | 3521 |
instance.MapLVsByNode(node_vol_should) |
b/lib/cmdlib/group.py | ||
---|---|---|
908 | 908 |
if not inst.disks_active or inst.disk_template != constants.DT_DRBD8: |
909 | 909 |
continue |
910 | 910 |
|
911 |
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(inst) |
|
911 | 912 |
for node_uuid in itertools.chain([inst.primary_node], |
912 |
inst.secondary_nodes):
|
|
913 |
secondary_nodes): |
|
913 | 914 |
node_to_inst.setdefault(node_uuid, []).append(inst) |
914 | 915 |
|
915 | 916 |
for (node_uuid, insts) in node_to_inst.items(): |
b/lib/cmdlib/instance.py | ||
---|---|---|
3146 | 3146 |
mem_check_list = [pnode_uuid] |
3147 | 3147 |
if be_new[constants.BE_AUTO_BALANCE]: |
3148 | 3148 |
# either we changed auto_balance to yes or it was from before |
3149 |
mem_check_list.extend(self.instance.secondary_nodes) |
|
3149 |
mem_check_list.extend( |
|
3150 |
self.cfg.GetInstanceSecondaryNodes(self.instance)) |
|
3150 | 3151 |
instance_info = self.rpc.call_instance_info( |
3151 | 3152 |
pnode_uuid, self.instance.name, self.instance.hypervisor, |
3152 | 3153 |
cluster_hvparams) |
... | ... | |
3188 | 3189 |
miss_mem, errors.ECODE_NORES) |
3189 | 3190 |
|
3190 | 3191 |
if be_new[constants.BE_AUTO_BALANCE]: |
3192 |
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance) |
|
3191 | 3193 |
for node_uuid, nres in nodeinfo.items(): |
3192 |
if node_uuid not in self.instance.secondary_nodes:
|
|
3194 |
if node_uuid not in secondary_nodes: |
|
3193 | 3195 |
continue |
3194 | 3196 |
nres.Raise("Can't get info from secondary node %s" % |
3195 | 3197 |
self.cfg.GetNodeName(node_uuid), prereq=True, |
... | ... | |
3396 | 3398 |
"""Converts an instance from drbd to plain. |
3397 | 3399 |
|
3398 | 3400 |
""" |
3399 |
assert len(self.instance.secondary_nodes) == 1 |
|
3401 |
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance) |
|
3402 |
assert len(secondary_nodes) == 1 |
|
3400 | 3403 |
assert self.instance.disk_template == constants.DT_DRBD8 |
3401 | 3404 |
|
3402 | 3405 |
pnode_uuid = self.instance.primary_node |
3403 |
snode_uuid = self.instance.secondary_nodes[0]
|
|
3406 |
snode_uuid = secondary_nodes[0] |
|
3404 | 3407 |
feedback_fn("Converting template to plain") |
3405 | 3408 |
|
3406 | 3409 |
old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg) |
... | ... | |
3471 | 3474 |
else: |
3472 | 3475 |
file_driver = file_path = None |
3473 | 3476 |
|
3477 |
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance) |
|
3474 | 3478 |
disk = \ |
3475 | 3479 |
GenerateDiskTemplate(self, self.instance.disk_template, |
3476 | 3480 |
self.instance.uuid, self.instance.primary_node, |
3477 |
self.instance.secondary_nodes, [params], file_path,
|
|
3481 |
secondary_nodes, [params], file_path, |
|
3478 | 3482 |
file_driver, idx, self.Log, self.diskparams)[0] |
3479 | 3483 |
|
3480 | 3484 |
new_disks = CreateDisks(self, self.instance, disks=[disk]) |
b/lib/cmdlib/instance_migration.py | ||
---|---|---|
146 | 146 |
} |
147 | 147 |
|
148 | 148 |
if instance.disk_template in constants.DTS_INT_MIRROR: |
149 |
env["OLD_SECONDARY"] = self.cfg.GetNodeName(instance.secondary_nodes[0]) |
|
149 |
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance) |
|
150 |
env["OLD_SECONDARY"] = self.cfg.GetNodeName(secondary_nodes[0]) |
|
150 | 151 |
env["NEW_SECONDARY"] = self.cfg.GetNodeName(source_node_uuid) |
151 | 152 |
else: |
152 | 153 |
env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = "" |
... | ... | |
160 | 161 |
|
161 | 162 |
""" |
162 | 163 |
instance = self._migrater.instance |
163 |
nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes) |
|
164 |
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance) |
|
165 |
nl = [self.cfg.GetMasterNode()] + list(secondary_nodes) |
|
164 | 166 |
nl.append(self._migrater.target_node_uuid) |
165 | 167 |
return (nl, nl + [instance.primary_node]) |
166 | 168 |
|
... | ... | |
211 | 213 |
}) |
212 | 214 |
|
213 | 215 |
if instance.disk_template in constants.DTS_INT_MIRROR: |
214 |
env["OLD_SECONDARY"] = self.cfg.GetNodeName(instance.secondary_nodes[0]) |
|
216 |
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance) |
|
217 |
env["OLD_SECONDARY"] = self.cfg.GetNodeName(secondary_nodes[0]) |
|
215 | 218 |
env["NEW_SECONDARY"] = self.cfg.GetNodeName(source_node_uuid) |
216 | 219 |
else: |
217 | 220 |
env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = "" |
... | ... | |
223 | 226 |
|
224 | 227 |
""" |
225 | 228 |
instance = self._migrater.instance |
226 |
snode_uuids = list(instance.secondary_nodes) |
|
229 |
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance) |
|
230 |
snode_uuids = list(secondary_nodes) |
|
227 | 231 |
nl = [self.cfg.GetMasterNode(), instance.primary_node] + snode_uuids |
228 | 232 |
nl.append(self._migrater.target_node_uuid) |
229 | 233 |
return (nl, nl) |
... | ... | |
349 | 353 |
else: |
350 | 354 |
assert not self.lu.glm.is_owned(locking.LEVEL_NODE_ALLOC) |
351 | 355 |
|
352 |
secondary_node_uuids = self.instance.secondary_nodes
|
|
356 |
secondary_node_uuids = self.cfg.GetInstanceSecondaryNodes(self.instance)
|
|
353 | 357 |
if not secondary_node_uuids: |
354 | 358 |
raise errors.ConfigurationError("No secondary node but using" |
355 | 359 |
" %s disk template" % |
... | ... | |
927 | 931 |
|
928 | 932 |
# FIXME: if we implement migrate-to-any in DRBD, this needs fixing |
929 | 933 |
if self.instance.disk_template in constants.DTS_INT_MIRROR: |
930 |
self.target_node_uuid = self.instance.secondary_nodes[0] |
|
934 |
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance) |
|
935 |
self.target_node_uuid = secondary_nodes[0] |
|
931 | 936 |
# Otherwise self.target_node has been populated either |
932 | 937 |
# directly, or through an iallocator. |
933 | 938 |
|
b/lib/cmdlib/instance_query.py | ||
---|---|---|
244 | 244 |
node_uuid2name_fn), |
245 | 245 |
instance.disks) |
246 | 246 |
|
247 |
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance) |
|
247 | 248 |
snodes_group_uuids = [nodes[snode_uuid].group |
248 |
for snode_uuid in instance.secondary_nodes]
|
|
249 |
for snode_uuid in secondary_nodes] |
|
249 | 250 |
|
250 | 251 |
result[instance.name] = { |
251 | 252 |
"name": instance.name, |
... | ... | |
254 | 255 |
"pnode": pnode.name, |
255 | 256 |
"pnode_group_uuid": pnode.group, |
256 | 257 |
"pnode_group_name": group2name_fn(pnode.group), |
257 |
"snodes": map(node_uuid2name_fn, instance.secondary_nodes),
|
|
258 |
"snodes": map(node_uuid2name_fn, secondary_nodes), |
|
258 | 259 |
"snodes_group_uuids": snodes_group_uuids, |
259 | 260 |
"snodes_group_names": map(group2name_fn, snodes_group_uuids), |
260 | 261 |
"os": instance.os, |
b/lib/cmdlib/instance_storage.py | ||
---|---|---|
1303 | 1303 |
instance.name, False, idx) |
1304 | 1304 |
msg = result.fail_msg |
1305 | 1305 |
if msg: |
1306 |
is_offline_secondary = (node_uuid in instance.secondary_nodes and |
|
1306 |
secondary_nodes = lu.cfg.GetInstanceSecondaryNodes(instance) |
|
1307 |
is_offline_secondary = (node_uuid in secondary_nodes and |
|
1307 | 1308 |
result.offline) |
1308 | 1309 |
lu.LogWarning("Could not prepare block device %s on node %s" |
1309 | 1310 |
" (is_primary=False, pass=1): %s", |
... | ... | |
1661 | 1662 |
|
1662 | 1663 |
""" |
1663 | 1664 |
instance = self.replacer.instance |
1665 |
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance) |
|
1664 | 1666 |
env = { |
1665 | 1667 |
"MODE": self.op.mode, |
1666 | 1668 |
"NEW_SECONDARY": self.op.remote_node, |
1667 |
"OLD_SECONDARY": self.cfg.GetNodeName(instance.secondary_nodes[0]),
|
|
1669 |
"OLD_SECONDARY": self.cfg.GetNodeName(secondary_nodes[0]), |
|
1668 | 1670 |
} |
1669 | 1671 |
env.update(BuildInstanceHookEnvByObject(self, instance)) |
1670 | 1672 |
return env |
... | ... | |
1954 | 1956 |
raise errors.OpPrereqError("Can only run replace disks for DRBD8-based" |
1955 | 1957 |
" instances", errors.ECODE_INVAL) |
1956 | 1958 |
|
1957 |
if len(self.instance.secondary_nodes) != 1: |
|
1959 |
secondary_nodes = self.cfg.GetInstanceSeconaryNodes(self.instance) |
|
1960 |
if len(secondary_nodes) != 1: |
|
1958 | 1961 |
raise errors.OpPrereqError("The instance has a strange layout," |
1959 | 1962 |
" expected one secondary but found %d" % |
1960 |
len(self.instance.secondary_nodes),
|
|
1963 |
len(secondary_nodes), |
|
1961 | 1964 |
errors.ECODE_FAULT) |
1962 | 1965 |
|
1963 |
secondary_node_uuid = self.instance.secondary_nodes[0]
|
|
1966 |
secondary_node_uuid = secondary_nodes[0] |
|
1964 | 1967 |
|
1965 | 1968 |
if self.iallocator_name is None: |
1966 | 1969 |
remote_node_uuid = self.remote_node_uuid |
1967 | 1970 |
else: |
1968 | 1971 |
remote_node_uuid = self._RunAllocator(self.lu, self.iallocator_name, |
1969 | 1972 |
self.instance.uuid, |
1970 |
self.instance.secondary_nodes)
|
|
1973 |
secondary_nodes) |
|
1971 | 1974 |
|
1972 | 1975 |
if remote_node_uuid is None: |
1973 | 1976 |
self.remote_node_info = None |
... | ... | |
2126 | 2129 |
(utils.CommaJoin(self.disks), self.instance.name)) |
2127 | 2130 |
feedback_fn("Current primary node: %s" % |
2128 | 2131 |
self.cfg.GetNodeName(self.instance.primary_node)) |
2132 |
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance) |
|
2129 | 2133 |
feedback_fn("Current seconary node: %s" % |
2130 |
utils.CommaJoin(self.cfg.GetNodeNames( |
|
2131 |
self.instance.secondary_nodes))) |
|
2134 |
utils.CommaJoin(self.cfg.GetNodeNames(secondary_nodes))) |
|
2132 | 2135 |
|
2133 | 2136 |
activate_disks = not self.instance.disks_active |
2134 | 2137 |
|
b/lib/cmdlib/instance_utils.py | ||
---|---|---|
165 | 165 |
cluster = lu.cfg.GetClusterInfo() |
166 | 166 |
bep = cluster.FillBE(instance) |
167 | 167 |
hvp = cluster.FillHV(instance) |
168 |
secondary_nodes = lu.cfg.GetInstanceSecondaryNodes(instance) |
|
168 | 169 |
args = { |
169 | 170 |
"name": instance.name, |
170 | 171 |
"primary_node_name": lu.cfg.GetNodeName(instance.primary_node), |
171 |
"secondary_node_names": lu.cfg.GetNodeNames(instance.secondary_nodes),
|
|
172 |
"secondary_node_names": lu.cfg.GetNodeNames(secondary_nodes), |
|
172 | 173 |
"os_type": instance.os, |
173 | 174 |
"status": instance.admin_state, |
174 | 175 |
"maxmem": bep[constants.BE_MAXMEM], |
b/lib/cmdlib/node.py | ||
---|---|---|
861 | 861 |
|
862 | 862 |
""" |
863 | 863 |
return _GetNodeInstancesInner(cfg, |
864 |
lambda inst: node_uuid in inst.secondary_nodes) |
|
864 |
lambda inst: node_uuid in |
|
865 |
cfg.GetInstanceSecondaryNodes(inst)) |
|
865 | 866 |
|
866 | 867 |
|
867 | 868 |
def _GetNodeInstances(cfg, node_uuid): |
b/lib/cmdlib/test.py | ||
---|---|---|
263 | 263 |
(self.inst_uuid, self.op.name) = ExpandInstanceUuidAndName(self.cfg, None, |
264 | 264 |
self.op.name) |
265 | 265 |
self.relocate_from_node_uuids = \ |
266 |
list(self.cfg.GetInstanceInfo(self.inst_uuid).secondary_nodes) |
|
266 |
list(self.cfg.GetInstanceSecondaryNodes( |
|
267 |
self.cfg.GetInstanceInfo(self.inst_uuid))) |
|
267 | 268 |
elif self.op.mode in (constants.IALLOCATOR_MODE_CHG_GROUP, |
268 | 269 |
constants.IALLOCATOR_MODE_NODE_EVAC): |
269 | 270 |
if not self.op.instances: |
b/lib/config.py | ||
---|---|---|
811 | 811 |
if instance.primary_node not in data.nodes: |
812 | 812 |
result.append("instance '%s' has invalid primary node '%s'" % |
813 | 813 |
(instance.name, instance.primary_node)) |
814 |
for snode in instance.secondary_nodes:
|
|
814 |
for snode in self._UnlockedGetInstanceSecondaryNodes(instance):
|
|
815 | 815 |
if snode not in data.nodes: |
816 | 816 |
result.append("instance '%s' has invalid secondary node '%s'" % |
817 | 817 |
(instance.name, snode)) |
... | ... | |
2040 | 2040 |
for inst in self._config_data.instances.values(): |
2041 | 2041 |
if inst.primary_node == node_uuid: |
2042 | 2042 |
pri.append(inst.uuid) |
2043 |
if node_uuid in inst.secondary_nodes:
|
|
2043 |
if node_uuid in self._UnlockedGetInstanceSecondaryNodes(inst):
|
|
2044 | 2044 |
sec.append(inst.uuid) |
2045 | 2045 |
return (pri, sec) |
2046 | 2046 |
|
b/lib/masterd/iallocator.py | ||
---|---|---|
255 | 255 |
raise errors.OpPrereqError("Can't relocate non-mirrored instances", |
256 | 256 |
errors.ECODE_INVAL) |
257 | 257 |
|
258 |
secondary_nodes = cfg.GetInstanceSecondaryNodes(instance) |
|
258 | 259 |
if (instance.disk_template in constants.DTS_INT_MIRROR and |
259 |
len(instance.secondary_nodes) != 1):
|
|
260 |
len(secondary_nodes) != 1): |
|
260 | 261 |
raise errors.OpPrereqError("Instance has not exactly one secondary node", |
261 | 262 |
errors.ECODE_STATE) |
262 | 263 |
|
... | ... | |
739 | 740 |
"spindle_use": beinfo[constants.BE_SPINDLE_USE], |
740 | 741 |
"os": iinfo.os, |
741 | 742 |
"nodes": [cfg.GetNodeName(iinfo.primary_node)] + |
742 |
cfg.GetNodeNames(iinfo.secondary_nodes), |
|
743 |
cfg.GetNodeNames( |
|
744 |
cfg.GetInstanceSecondaryNodes(iinfo)), |
|
743 | 745 |
"nics": nic_data, |
744 | 746 |
"disks": [{constants.IDISK_SIZE: dsk.size, |
745 | 747 |
constants.IDISK_MODE: dsk.mode, |
b/lib/objects.py | ||
---|---|---|
1092 | 1092 |
__slots__ = [ |
1093 | 1093 |
"name", |
1094 | 1094 |
"primary_node", |
1095 |
"secondary_nodes", |
|
1095 | 1096 |
"os", |
1096 | 1097 |
"hypervisor", |
1097 | 1098 |
"hvparams", |
... | ... | |
1107 | 1108 |
"serial_no", |
1108 | 1109 |
] + _TIMESTAMPS + _UUID |
1109 | 1110 |
|
1110 |
def _ComputeSecondaryNodes(self): |
|
1111 |
"""Compute the list of secondary nodes. |
|
1112 |
|
|
1113 |
This is a simple wrapper over _ComputeAllNodes. |
|
1114 |
|
|
1115 |
""" |
|
1116 |
all_nodes = set(self._ComputeAllNodes()) |
|
1117 |
all_nodes.discard(self.primary_node) |
|
1118 |
return tuple(all_nodes) |
|
1119 |
|
|
1120 |
secondary_nodes = property(_ComputeSecondaryNodes, None, None, |
|
1121 |
"List of names of secondary nodes") |
|
1122 |
|
|
1123 | 1111 |
def _ComputeAllNodes(self): |
1124 | 1112 |
"""Compute the list of all nodes. |
1125 | 1113 |
|
b/lib/rpc/node.py | ||
---|---|---|
877 | 877 |
idict = instance.ToDict() |
878 | 878 |
cluster = self._cfg.GetClusterInfo() |
879 | 879 |
idict["hvparams"] = cluster.FillHV(instance) |
880 |
idict["secondary_nodes"] = self._cfg.GetInstanceSecondaryNodes(instance) |
|
880 | 881 |
if hvp is not None: |
881 | 882 |
idict["hvparams"].update(hvp) |
882 | 883 |
idict["beparams"] = cluster.FillBE(instance) |
b/test/py/ganeti.query_unittest.py | ||
---|---|---|
697 | 697 |
admin_state=constants.ADMINST_UP, hypervisor=constants.HT_XEN_PVM, |
698 | 698 |
os="linux1", |
699 | 699 |
primary_node="node1-uuid", |
700 |
secondary_nodes=[], |
|
700 | 701 |
disk_template=constants.DT_PLAIN, |
701 | 702 |
disks=[], |
702 | 703 |
disks_active=True, |
... | ... | |
707 | 708 |
admin_state=constants.ADMINST_UP, hypervisor=constants.HT_XEN_HVM, |
708 | 709 |
os="deb99", |
709 | 710 |
primary_node="node5-uuid", |
711 |
secondary_nodes=[], |
|
710 | 712 |
disk_template=constants.DT_DISKLESS, |
711 | 713 |
disks=[], |
712 | 714 |
disks_active=True, |
... | ... | |
721 | 723 |
admin_state=constants.ADMINST_DOWN, hypervisor=constants.HT_KVM, |
722 | 724 |
os="busybox", |
723 | 725 |
primary_node="node6-uuid", |
726 |
secondary_nodes=[], |
|
724 | 727 |
disk_template=constants.DT_DRBD8, |
725 | 728 |
disks=[], |
726 | 729 |
disks_active=False, |
... | ... | |
738 | 741 |
admin_state=constants.ADMINST_DOWN, hypervisor=constants.HT_XEN_PVM, |
739 | 742 |
os="linux1", |
740 | 743 |
primary_node="nodeoff2-uuid", |
744 |
secondary_nodes=[], |
|
741 | 745 |
disk_template=constants.DT_DRBD8, |
742 | 746 |
disks=[], |
743 | 747 |
disks_active=True, |
... | ... | |
764 | 768 |
admin_state=constants.ADMINST_UP, hypervisor=constants.HT_XEN_HVM, |
765 | 769 |
os="deb99", |
766 | 770 |
primary_node="nodebad2-uuid", |
771 |
secondary_nodes=[], |
|
767 | 772 |
disk_template=constants.DT_DISKLESS, |
768 | 773 |
disks=[], |
769 | 774 |
disks_active=True, |
... | ... | |
778 | 783 |
admin_state=constants.ADMINST_DOWN, hypervisor=constants.HT_XEN_HVM, |
779 | 784 |
os="deb99", |
780 | 785 |
primary_node="node7-uuid", |
786 |
secondary_nodes=[], |
|
781 | 787 |
disk_template=constants.DT_DISKLESS, |
782 | 788 |
disks=[], |
783 | 789 |
disks_active=False, |
... | ... | |
794 | 800 |
admin_state=constants.ADMINST_DOWN, hypervisor=constants.HT_XEN_HVM, |
795 | 801 |
os="deb99", |
796 | 802 |
primary_node="node6-uuid", |
803 |
secondary_nodes=[], |
|
797 | 804 |
disk_template=constants.DT_DISKLESS, |
798 | 805 |
disks=[], |
799 | 806 |
disks_active=False, |
... | ... | |
805 | 812 |
admin_state=constants.ADMINST_OFFLINE, hypervisor=constants.HT_XEN_HVM, |
806 | 813 |
os="deb99", |
807 | 814 |
primary_node="node6-uuid", |
815 |
secondary_nodes=[], |
|
808 | 816 |
disk_template=constants.DT_DISKLESS, |
809 | 817 |
disks=[], |
810 | 818 |
disks_active=False, |
... | ... | |
816 | 824 |
admin_state=constants.ADMINST_UP, hypervisor=constants.HT_XEN_HVM, |
817 | 825 |
os="deb99", |
818 | 826 |
primary_node="node6-uuid", |
827 |
secondary_nodes=[], |
|
819 | 828 |
disk_template=constants.DT_DISKLESS, |
820 | 829 |
disks=[], |
821 | 830 |
disks_active=False, |
b/test/py/ganeti.rpc_unittest.py | ||
---|---|---|
729 | 729 |
def GetInstanceDiskParams(self, _): |
730 | 730 |
return constants.DISK_DT_DEFAULTS |
731 | 731 |
|
732 |
def GetInstanceSecondaryNodes(self, _): |
|
733 |
return [] |
|
734 |
|
|
732 | 735 |
|
733 | 736 |
class TestRpcRunner(unittest.TestCase): |
734 | 737 |
def testUploadFile(self): |
Also available in: Unified diff