Revision 1c3231aa lib/cmdlib/instance.py
b/lib/cmdlib/instance.py | ||
---|---|---|
49 | 49 |
LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \ |
50 | 50 |
IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \ |
51 | 51 |
AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \ |
52 |
ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName |
|
52 |
ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName
|
|
53 | 53 |
from ganeti.cmdlib.instance_storage import CreateDisks, \ |
54 | 54 |
CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \ |
55 |
IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
|
|
55 |
IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
|
|
56 | 56 |
CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \ |
57 | 57 |
StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \ |
58 | 58 |
CheckSpindlesExclusiveStorage |
... | ... | |
106 | 106 |
errors.ECODE_INVAL) |
107 | 107 |
|
108 | 108 |
|
109 |
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist): |
|
109 |
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
|
|
110 | 110 |
"""Wrapper around IAReqInstanceAlloc. |
111 | 111 |
|
112 | 112 |
@param op: The instance opcode |
113 | 113 |
@param disks: The computed disks |
114 | 114 |
@param nics: The computed nics |
115 | 115 |
@param beparams: The full filled beparams |
116 |
@param node_whitelist: List of nodes which should appear as online to the |
|
116 |
@param node_name_whitelist: List of nodes which should appear as online to the
|
|
117 | 117 |
allocator (unless the node is already marked offline) |
118 | 118 |
|
119 | 119 |
@returns: A filled L{iallocator.IAReqInstanceAlloc} |
... | ... | |
130 | 130 |
disks=disks, |
131 | 131 |
nics=[n.ToDict() for n in nics], |
132 | 132 |
hypervisor=op.hypervisor, |
133 |
node_whitelist=node_whitelist) |
|
133 |
node_whitelist=node_name_whitelist)
|
|
134 | 134 |
|
135 | 135 |
|
136 | 136 |
def _ComputeFullBeParams(op, cluster): |
... | ... | |
245 | 245 |
return nics |
246 | 246 |
|
247 | 247 |
|
248 |
def _CheckForConflictingIp(lu, ip, node): |
|
248 |
def _CheckForConflictingIp(lu, ip, node_uuid):
|
|
249 | 249 |
"""In case of conflicting IP address raise error. |
250 | 250 |
|
251 | 251 |
@type ip: string |
252 | 252 |
@param ip: IP address |
253 |
@type node: string |
|
254 |
@param node: node name
|
|
253 |
@type node_uuid: string
|
|
254 |
@param node_uuid: node UUID
|
|
255 | 255 |
|
256 | 256 |
""" |
257 |
(conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node) |
|
257 |
(conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
|
|
258 | 258 |
if conf_net is not None: |
259 | 259 |
raise errors.OpPrereqError(("The requested IP address (%s) belongs to" |
260 | 260 |
" network %s, but the target NIC does not." % |
... | ... | |
521 | 521 |
self.opportunistic_locks[locking.LEVEL_NODE] = True |
522 | 522 |
self.opportunistic_locks[locking.LEVEL_NODE_RES] = True |
523 | 523 |
else: |
524 |
self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode) |
|
525 |
nodelist = [self.op.pnode] |
|
524 |
(self.op.pnode_uuid, self.op.pnode) = \ |
|
525 |
ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode) |
|
526 |
nodelist = [self.op.pnode_uuid] |
|
526 | 527 |
if self.op.snode is not None: |
527 |
self.op.snode = ExpandNodeName(self.cfg, self.op.snode) |
|
528 |
nodelist.append(self.op.snode) |
|
528 |
(self.op.snode_uuid, self.op.snode) = \ |
|
529 |
ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode) |
|
530 |
nodelist.append(self.op.snode_uuid) |
|
529 | 531 |
self.needed_locks[locking.LEVEL_NODE] = nodelist |
530 | 532 |
|
531 | 533 |
# in case of import lock the source node too |
... | ... | |
545 | 547 |
" requires a source node option", |
546 | 548 |
errors.ECODE_INVAL) |
547 | 549 |
else: |
548 |
self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node) |
|
550 |
(self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \ |
|
551 |
ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node) |
|
549 | 552 |
if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET: |
550 |
self.needed_locks[locking.LEVEL_NODE].append(src_node)
|
|
553 |
self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
|
|
551 | 554 |
if not os.path.isabs(src_path): |
552 | 555 |
self.op.src_path = src_path = \ |
553 | 556 |
utils.PathJoin(pathutils.EXPORT_DIR, src_path) |
... | ... | |
569 | 572 |
# in a nodegroup that has the desired network connected to |
570 | 573 |
req = _CreateInstanceAllocRequest(self.op, self.disks, |
571 | 574 |
self.nics, self.be_full, |
572 |
node_whitelist)
|
|
575 |
self.cfg.GetNodeNames(node_whitelist))
|
|
573 | 576 |
ial = iallocator.IAllocator(self.cfg, self.rpc, req) |
574 | 577 |
|
575 | 578 |
ial.Run(self.op.iallocator) |
... | ... | |
586 | 589 |
(self.op.iallocator, ial.info), |
587 | 590 |
ecode) |
588 | 591 |
|
589 |
self.op.pnode = ial.result[0] |
|
592 |
(self.op.pnode_uuid, self.op.pnode) = \ |
|
593 |
ExpandNodeUuidAndName(self.cfg, None, ial.result[0]) |
|
590 | 594 |
self.LogInfo("Selected nodes for instance %s via iallocator %s: %s", |
591 | 595 |
self.op.instance_name, self.op.iallocator, |
592 | 596 |
utils.CommaJoin(ial.result)) |
... | ... | |
594 | 598 |
assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator" |
595 | 599 |
|
596 | 600 |
if req.RequiredNodes() == 2: |
597 |
self.op.snode = ial.result[1] |
|
601 |
(self.op.snode_uuid, self.op.snode) = \ |
|
602 |
ExpandNodeUuidAndName(self.cfg, None, ial.result[1]) |
|
598 | 603 |
|
599 | 604 |
def BuildHooksEnv(self): |
600 | 605 |
"""Build hooks env. |
... | ... | |
612 | 617 |
|
613 | 618 |
env.update(BuildInstanceHookEnv( |
614 | 619 |
name=self.op.instance_name, |
615 |
primary_node=self.op.pnode, |
|
616 |
secondary_nodes=self.secondaries,
|
|
620 |
primary_node_name=self.op.pnode,
|
|
621 |
secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
|
|
617 | 622 |
status=self.op.start, |
618 | 623 |
os_type=self.op.os_type, |
619 | 624 |
minmem=self.be_full[constants.BE_MINMEM], |
... | ... | |
635 | 640 |
"""Build hooks nodes. |
636 | 641 |
|
637 | 642 |
""" |
638 |
nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries |
|
643 |
nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
|
|
639 | 644 |
return nl, nl |
640 | 645 |
|
641 | 646 |
def _ReadExportInfo(self): |
... | ... | |
649 | 654 |
""" |
650 | 655 |
assert self.op.mode == constants.INSTANCE_IMPORT |
651 | 656 |
|
652 |
src_node = self.op.src_node
|
|
657 |
src_node_uuid = self.op.src_node_uuid
|
|
653 | 658 |
src_path = self.op.src_path |
654 | 659 |
|
655 |
if src_node is None: |
|
660 |
if src_node_uuid is None:
|
|
656 | 661 |
locked_nodes = self.owned_locks(locking.LEVEL_NODE) |
657 | 662 |
exp_list = self.rpc.call_export_list(locked_nodes) |
658 | 663 |
found = False |
... | ... | |
661 | 666 |
continue |
662 | 667 |
if src_path in exp_list[node].payload: |
663 | 668 |
found = True |
664 |
self.op.src_node = src_node = node |
|
669 |
self.op.src_node = node |
|
670 |
self.op.src_node_uuid = src_node_uuid = \ |
|
671 |
self.cfg.GetNodeInfoByName(node).uuid |
|
665 | 672 |
self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR, |
666 | 673 |
src_path) |
667 | 674 |
break |
... | ... | |
669 | 676 |
raise errors.OpPrereqError("No export found for relative path %s" % |
670 | 677 |
src_path, errors.ECODE_INVAL) |
671 | 678 |
|
672 |
CheckNodeOnline(self, src_node) |
|
673 |
result = self.rpc.call_export_info(src_node, src_path) |
|
679 |
CheckNodeOnline(self, src_node_uuid)
|
|
680 |
result = self.rpc.call_export_info(src_node_uuid, src_path)
|
|
674 | 681 |
result.Raise("No export or invalid export found in dir %s" % src_path) |
675 | 682 |
|
676 | 683 |
export_info = objects.SerializableConfigParser.Loads(str(result.payload)) |
... | ... | |
679 | 686 |
errors.ECODE_ENVIRON) |
680 | 687 |
|
681 | 688 |
ei_version = export_info.get(constants.INISECT_EXP, "version") |
682 |
if (int(ei_version) != constants.EXPORT_VERSION):
|
|
689 |
if int(ei_version) != constants.EXPORT_VERSION:
|
|
683 | 690 |
raise errors.OpPrereqError("Wrong export version %s (wanted %d)" % |
684 | 691 |
(ei_version, constants.EXPORT_VERSION), |
685 | 692 |
errors.ECODE_ENVIRON) |
... | ... | |
940 | 947 |
self._RunAllocator() |
941 | 948 |
|
942 | 949 |
# Release all unneeded node locks |
943 |
keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node]) |
|
950 |
keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid, |
|
951 |
self.op.src_node_uuid]) |
|
944 | 952 |
ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks) |
945 | 953 |
ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks) |
946 | 954 |
ReleaseLocks(self, locking.LEVEL_NODE_ALLOC) |
... | ... | |
952 | 960 |
#### node related checks |
953 | 961 |
|
954 | 962 |
# check primary node |
955 |
self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode) |
|
963 |
self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
|
|
956 | 964 |
assert self.pnode is not None, \ |
957 |
"Cannot retrieve locked node %s" % self.op.pnode |
|
965 |
"Cannot retrieve locked node %s" % self.op.pnode_uuid
|
|
958 | 966 |
if pnode.offline: |
959 | 967 |
raise errors.OpPrereqError("Cannot use offline primary node '%s'" % |
960 | 968 |
pnode.name, errors.ECODE_STATE) |
... | ... | |
973 | 981 |
net_uuid = nic.network |
974 | 982 |
if net_uuid is not None: |
975 | 983 |
nobj = self.cfg.GetNetwork(net_uuid) |
976 |
netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
|
|
984 |
netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
|
|
977 | 985 |
if netparams is None: |
978 | 986 |
raise errors.OpPrereqError("No netparams found for network" |
979 | 987 |
" %s. Propably not connected to" |
... | ... | |
1003 | 1011 |
|
1004 | 1012 |
# net is None, ip None or given |
1005 | 1013 |
elif self.op.conflicts_check: |
1006 |
_CheckForConflictingIp(self, nic.ip, self.pnode.name)
|
|
1014 |
_CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
|
|
1007 | 1015 |
|
1008 | 1016 |
# mirror node verification |
1009 | 1017 |
if self.op.disk_template in constants.DTS_INT_MIRROR: |
1010 |
if self.op.snode == pnode.name:
|
|
1018 |
if self.op.snode_uuid == pnode.uuid:
|
|
1011 | 1019 |
raise errors.OpPrereqError("The secondary node cannot be the" |
1012 | 1020 |
" primary node", errors.ECODE_INVAL) |
1013 |
CheckNodeOnline(self, self.op.snode) |
|
1014 |
CheckNodeNotDrained(self, self.op.snode) |
|
1015 |
CheckNodeVmCapable(self, self.op.snode) |
|
1016 |
self.secondaries.append(self.op.snode) |
|
1021 |
CheckNodeOnline(self, self.op.snode_uuid)
|
|
1022 |
CheckNodeNotDrained(self, self.op.snode_uuid)
|
|
1023 |
CheckNodeVmCapable(self, self.op.snode_uuid)
|
|
1024 |
self.secondaries.append(self.op.snode_uuid)
|
|
1017 | 1025 |
|
1018 |
snode = self.cfg.GetNodeInfo(self.op.snode) |
|
1026 |
snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
|
|
1019 | 1027 |
if pnode.group != snode.group: |
1020 | 1028 |
self.LogWarning("The primary and secondary nodes are in two" |
1021 | 1029 |
" different node groups; the disk parameters" |
... | ... | |
1034 | 1042 |
for disk in self.disks: |
1035 | 1043 |
CheckSpindlesExclusiveStorage(disk, excl_stor, True) |
1036 | 1044 |
|
1037 |
nodenames = [pnode.name] + self.secondaries
|
|
1045 |
node_uuids = [pnode.uuid] + self.secondaries
|
|
1038 | 1046 |
|
1039 | 1047 |
if not self.adopt_disks: |
1040 | 1048 |
if self.op.disk_template == constants.DT_RBD: |
... | ... | |
1048 | 1056 |
elif self.op.disk_template in utils.GetLvmDiskTemplates(): |
1049 | 1057 |
# Check lv size requirements, if not adopting |
1050 | 1058 |
req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks) |
1051 |
CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
|
|
1059 |
CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
|
|
1052 | 1060 |
else: |
1053 | 1061 |
# FIXME: add checks for other, non-adopting, non-lvm disk templates |
1054 | 1062 |
pass |
... | ... | |
1069 | 1077 |
raise errors.OpPrereqError("LV named %s used by another instance" % |
1070 | 1078 |
lv_name, errors.ECODE_NOTUNIQUE) |
1071 | 1079 |
|
1072 |
vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
|
|
1080 |
vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
|
|
1073 | 1081 |
vg_names.Raise("Cannot get VG information from node %s" % pnode.name) |
1074 | 1082 |
|
1075 |
node_lvs = self.rpc.call_lv_list([pnode.name],
|
|
1076 |
vg_names.payload.keys())[pnode.name]
|
|
1083 |
node_lvs = self.rpc.call_lv_list([pnode.uuid],
|
|
1084 |
vg_names.payload.keys())[pnode.uuid]
|
|
1077 | 1085 |
node_lvs.Raise("Cannot get LV information from node %s" % pnode.name) |
1078 | 1086 |
node_lvs = node_lvs.payload |
1079 | 1087 |
|
... | ... | |
1109 | 1117 |
constants.ADOPTABLE_BLOCKDEV_ROOT), |
1110 | 1118 |
errors.ECODE_INVAL) |
1111 | 1119 |
|
1112 |
node_disks = self.rpc.call_bdev_sizes([pnode.name],
|
|
1113 |
list(all_disks))[pnode.name]
|
|
1120 |
node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
|
|
1121 |
list(all_disks))[pnode.uuid]
|
|
1114 | 1122 |
node_disks.Raise("Cannot get block device information from node %s" % |
1115 | 1123 |
pnode.name) |
1116 | 1124 |
node_disks = node_disks.payload |
... | ... | |
1144 | 1152 |
(pnode.group, group_info.name, utils.CommaJoin(res))) |
1145 | 1153 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL) |
1146 | 1154 |
|
1147 |
CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
|
|
1155 |
CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
|
|
1148 | 1156 |
|
1149 |
CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
|
|
1157 |
CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
|
|
1150 | 1158 |
# check OS parameters (remotely) |
1151 |
CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
|
|
1159 |
CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
|
|
1152 | 1160 |
|
1153 |
CheckNicsBridgesExist(self, self.nics, self.pnode.name)
|
|
1161 |
CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
|
|
1154 | 1162 |
|
1155 | 1163 |
#TODO: _CheckExtParams (remotely) |
1156 | 1164 |
# Check parameters for extstorage |
... | ... | |
1160 | 1168 |
if self.op.start: |
1161 | 1169 |
hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}), |
1162 | 1170 |
self.op.hvparams) |
1163 |
CheckNodeFreeMemory(self, self.pnode.name,
|
|
1171 |
CheckNodeFreeMemory(self, self.pnode.uuid,
|
|
1164 | 1172 |
"creating instance %s" % self.op.instance_name, |
1165 | 1173 |
self.be_full[constants.BE_MAXMEM], |
1166 | 1174 |
self.op.hypervisor, hvfull) |
1167 | 1175 |
|
1168 |
self.dry_run_result = list(nodenames)
|
|
1176 |
self.dry_run_result = list(node_uuids)
|
|
1169 | 1177 |
|
1170 | 1178 |
def Exec(self, feedback_fn): |
1171 | 1179 |
"""Create and add the instance to the cluster. |
... | ... | |
1188 | 1196 |
# This is ugly but we got a chicken-egg problem here |
1189 | 1197 |
# We can only take the group disk parameters, as the instance |
1190 | 1198 |
# has no disks yet (we are generating them right here). |
1191 |
node = self.cfg.GetNodeInfo(pnode_name) |
|
1192 |
nodegroup = self.cfg.GetNodeGroup(node.group) |
|
1199 |
nodegroup = self.cfg.GetNodeGroup(self.pnode.group) |
|
1193 | 1200 |
disks = GenerateDiskTemplate(self, |
1194 | 1201 |
self.op.disk_template, |
1195 |
instance, pnode_name,
|
|
1202 |
instance, self.pnode.uuid,
|
|
1196 | 1203 |
self.secondaries, |
1197 | 1204 |
self.disks, |
1198 | 1205 |
self.instance_file_storage_dir, |
... | ... | |
1202 | 1209 |
self.cfg.GetGroupDiskParams(nodegroup)) |
1203 | 1210 |
|
1204 | 1211 |
iobj = objects.Instance(name=instance, os=self.op.os_type, |
1205 |
primary_node=pnode_name,
|
|
1212 |
primary_node=self.pnode.uuid,
|
|
1206 | 1213 |
nics=self.nics, disks=disks, |
1207 | 1214 |
disk_template=self.op.disk_template, |
1208 | 1215 |
disks_active=False, |
... | ... | |
1227 | 1234 |
for t_dsk, a_dsk in zip(tmp_disks, self.disks): |
1228 | 1235 |
rename_to.append(t_dsk.logical_id) |
1229 | 1236 |
t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT]) |
1230 |
self.cfg.SetDiskID(t_dsk, pnode_name)
|
|
1231 |
result = self.rpc.call_blockdev_rename(pnode_name,
|
|
1237 |
self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
|
|
1238 |
result = self.rpc.call_blockdev_rename(self.pnode.uuid,
|
|
1232 | 1239 |
zip(tmp_disks, rename_to)) |
1233 | 1240 |
result.Raise("Failed to rename adoped LVs") |
1234 | 1241 |
else: |
... | ... | |
1250 | 1257 |
|
1251 | 1258 |
if self.op.mode == constants.INSTANCE_IMPORT: |
1252 | 1259 |
# Release unused nodes |
1253 |
ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node]) |
|
1260 |
ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
|
|
1254 | 1261 |
else: |
1255 | 1262 |
# Release all nodes |
1256 | 1263 |
ReleaseLocks(self, locking.LEVEL_NODE) |
... | ... | |
1296 | 1303 |
# preceding code might or might have not done it, depending on |
1297 | 1304 |
# disk template and other options |
1298 | 1305 |
for disk in iobj.disks: |
1299 |
self.cfg.SetDiskID(disk, pnode_name)
|
|
1306 |
self.cfg.SetDiskID(disk, self.pnode.uuid)
|
|
1300 | 1307 |
if self.op.mode == constants.INSTANCE_CREATE: |
1301 | 1308 |
if not self.op.no_install: |
1302 | 1309 |
pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and |
1303 | 1310 |
not self.op.wait_for_sync) |
1304 | 1311 |
if pause_sync: |
1305 | 1312 |
feedback_fn("* pausing disk sync to install instance OS") |
1306 |
result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
|
|
1313 |
result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
|
|
1307 | 1314 |
(iobj.disks, |
1308 | 1315 |
iobj), True) |
1309 | 1316 |
for idx, success in enumerate(result.payload): |
... | ... | |
1314 | 1321 |
feedback_fn("* running the instance OS create scripts...") |
1315 | 1322 |
# FIXME: pass debug option from opcode to backend |
1316 | 1323 |
os_add_result = \ |
1317 |
self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
|
|
1324 |
self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
|
|
1318 | 1325 |
self.op.debug_level) |
1319 | 1326 |
if pause_sync: |
1320 | 1327 |
feedback_fn("* resuming disk sync") |
1321 |
result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
|
|
1328 |
result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
|
|
1322 | 1329 |
(iobj.disks, |
1323 | 1330 |
iobj), False) |
1324 | 1331 |
for idx, success in enumerate(result.payload): |
... | ... | |
1349 | 1356 |
|
1350 | 1357 |
import_result = \ |
1351 | 1358 |
masterd.instance.TransferInstanceData(self, feedback_fn, |
1352 |
self.op.src_node, pnode_name, |
|
1359 |
self.op.src_node_uuid, |
|
1360 |
self.pnode.uuid, |
|
1353 | 1361 |
self.pnode.secondary_ip, |
1354 | 1362 |
iobj, transfers) |
1355 | 1363 |
if not compat.all(import_result): |
... | ... | |
1368 | 1376 |
self.op.source_shutdown_timeout) |
1369 | 1377 |
timeouts = masterd.instance.ImportExportTimeouts(connect_timeout) |
1370 | 1378 |
|
1371 |
assert iobj.primary_node == self.pnode.name
|
|
1379 |
assert iobj.primary_node == self.pnode.uuid
|
|
1372 | 1380 |
disk_results = \ |
1373 | 1381 |
masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode, |
1374 | 1382 |
self.source_x509_ca, |
... | ... | |
1389 | 1397 |
# Run rename script on newly imported instance |
1390 | 1398 |
assert iobj.name == instance |
1391 | 1399 |
feedback_fn("Running rename script for %s" % instance) |
1392 |
result = self.rpc.call_instance_run_rename(pnode_name, iobj,
|
|
1400 |
result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
|
|
1393 | 1401 |
rename_from, |
1394 | 1402 |
self.op.debug_level) |
1395 | 1403 |
result.Warn("Failed to run rename script for %s on node %s" % |
... | ... | |
1402 | 1410 |
self.cfg.Update(iobj, feedback_fn) |
1403 | 1411 |
logging.info("Starting instance %s on node %s", instance, pnode_name) |
1404 | 1412 |
feedback_fn("* starting instance...") |
1405 |
result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
|
|
1413 |
result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
|
|
1406 | 1414 |
False, self.op.reason) |
1407 | 1415 |
result.Raise("Could not start instance") |
1408 | 1416 |
|
... | ... | |
1503 | 1511 |
new_file_storage_dir) |
1504 | 1512 |
result.Raise("Could not rename on node %s directory '%s' to '%s'" |
1505 | 1513 |
" (but the instance has been renamed in Ganeti)" % |
1506 |
(inst.primary_node, old_file_storage_dir,
|
|
1507 |
new_file_storage_dir)) |
|
1514 |
(self.cfg.GetNodeName(inst.primary_node),
|
|
1515 |
old_file_storage_dir, new_file_storage_dir))
|
|
1508 | 1516 |
|
1509 | 1517 |
StartInstanceDisks(self, inst, None) |
1510 | 1518 |
# update info on disks |
1511 | 1519 |
info = GetInstanceInfoText(inst) |
1512 | 1520 |
for (idx, disk) in enumerate(inst.disks): |
1513 |
for node in inst.all_nodes: |
|
1514 |
self.cfg.SetDiskID(disk, node) |
|
1515 |
result = self.rpc.call_blockdev_setinfo(node, disk, info) |
|
1516 |
result.Warn("Error setting info on node %s for disk %s" % (node, idx),
|
|
1517 |
self.LogWarning) |
|
1521 |
for node_uuid in inst.all_nodes:
|
|
1522 |
self.cfg.SetDiskID(disk, node_uuid)
|
|
1523 |
result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
|
|
1524 |
result.Warn("Error setting info on node %s for disk %s" % |
|
1525 |
(self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
|
|
1518 | 1526 |
try: |
1519 | 1527 |
result = self.rpc.call_instance_run_rename(inst.primary_node, inst, |
1520 | 1528 |
old_name, self.op.debug_level) |
1521 | 1529 |
result.Warn("Could not run OS rename script for instance %s on node %s" |
1522 | 1530 |
" (but the instance has been renamed in Ganeti)" % |
1523 |
(inst.name, inst.primary_node), self.LogWarning) |
|
1531 |
(inst.name, self.cfg.GetNodeName(inst.primary_node)), |
|
1532 |
self.LogWarning) |
|
1524 | 1533 |
finally: |
1525 | 1534 |
ShutdownInstanceDisks(self, inst) |
1526 | 1535 |
|
... | ... | |
1583 | 1592 |
""" |
1584 | 1593 |
instance = self.instance |
1585 | 1594 |
logging.info("Shutting down instance %s on node %s", |
1586 |
instance.name, instance.primary_node)
|
|
1595 |
instance.name, self.cfg.GetNodeName(instance.primary_node))
|
|
1587 | 1596 |
|
1588 | 1597 |
result = self.rpc.call_instance_shutdown(instance.primary_node, instance, |
1589 | 1598 |
self.op.shutdown_timeout, |
... | ... | |
1592 | 1601 |
result.Warn("Warning: can't shutdown instance", feedback_fn) |
1593 | 1602 |
else: |
1594 | 1603 |
result.Raise("Could not shutdown instance %s on node %s" % |
1595 |
(instance.name, instance.primary_node))
|
|
1604 |
(instance.name, self.cfg.GetNodeName(instance.primary_node)))
|
|
1596 | 1605 |
|
1597 | 1606 |
assert (self.owned_locks(locking.LEVEL_NODE) == |
1598 | 1607 |
self.owned_locks(locking.LEVEL_NODE_RES)) |
... | ... | |
1613 | 1622 |
|
1614 | 1623 |
def ExpandNames(self): |
1615 | 1624 |
self._ExpandAndLockInstance() |
1616 |
target_node = ExpandNodeName(self.cfg, self.op.target_node) |
|
1617 |
self.op.target_node = target_node |
|
1618 |
self.needed_locks[locking.LEVEL_NODE] = [target_node] |
|
1625 |
(self.op.target_node_uuid, self.op.target_node) = \ |
|
1626 |
ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid, |
|
1627 |
self.op.target_node) |
|
1628 |
self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node] |
|
1619 | 1629 |
self.needed_locks[locking.LEVEL_NODE_RES] = [] |
1620 | 1630 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND |
1621 | 1631 |
|
... | ... | |
1647 | 1657 |
nl = [ |
1648 | 1658 |
self.cfg.GetMasterNode(), |
1649 | 1659 |
self.instance.primary_node, |
1650 |
self.op.target_node, |
|
1660 |
self.op.target_node_uuid,
|
|
1651 | 1661 |
] |
1652 | 1662 |
return (nl, nl) |
1653 | 1663 |
|
... | ... | |
1665 | 1675 |
raise errors.OpPrereqError("Disk template %s not suitable for copying" % |
1666 | 1676 |
instance.disk_template, errors.ECODE_STATE) |
1667 | 1677 |
|
1668 |
node = self.cfg.GetNodeInfo(self.op.target_node)
|
|
1669 |
assert node is not None, \ |
|
1678 |
target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
|
|
1679 |
assert target_node is not None, \
|
|
1670 | 1680 |
"Cannot retrieve locked node %s" % self.op.target_node |
1671 | 1681 |
|
1672 |
self.target_node = target_node = node.name |
|
1673 |
|
|
1674 |
if target_node == instance.primary_node: |
|
1682 |
self.target_node_uuid = target_node.uuid |
|
1683 |
if target_node.uuid == instance.primary_node: |
|
1675 | 1684 |
raise errors.OpPrereqError("Instance %s is already on the node %s" % |
1676 |
(instance.name, target_node), |
|
1685 |
(instance.name, target_node.name),
|
|
1677 | 1686 |
errors.ECODE_STATE) |
1678 | 1687 |
|
1679 | 1688 |
bep = self.cfg.GetClusterInfo().FillBE(instance) |
... | ... | |
1683 | 1692 |
raise errors.OpPrereqError("Instance disk %d has a complex layout," |
1684 | 1693 |
" cannot copy" % idx, errors.ECODE_STATE) |
1685 | 1694 |
|
1686 |
CheckNodeOnline(self, target_node) |
|
1687 |
CheckNodeNotDrained(self, target_node) |
|
1688 |
CheckNodeVmCapable(self, target_node) |
|
1695 |
CheckNodeOnline(self, target_node.uuid)
|
|
1696 |
CheckNodeNotDrained(self, target_node.uuid)
|
|
1697 |
CheckNodeVmCapable(self, target_node.uuid)
|
|
1689 | 1698 |
cluster = self.cfg.GetClusterInfo() |
1690 |
group_info = self.cfg.GetNodeGroup(node.group) |
|
1699 |
group_info = self.cfg.GetNodeGroup(target_node.group)
|
|
1691 | 1700 |
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) |
1692 |
CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg, |
|
1701 |
CheckTargetNodeIPolicy(self, ipolicy, instance, target_node, self.cfg,
|
|
1693 | 1702 |
ignore=self.op.ignore_ipolicy) |
1694 | 1703 |
|
1695 | 1704 |
if instance.admin_state == constants.ADMINST_UP: |
1696 | 1705 |
# check memory requirements on the secondary node |
1697 | 1706 |
CheckNodeFreeMemory( |
1698 |
self, target_node, "failing over instance %s" % |
|
1707 |
self, target_node.uuid, "failing over instance %s" %
|
|
1699 | 1708 |
instance.name, bep[constants.BE_MAXMEM], instance.hypervisor, |
1700 | 1709 |
self.cfg.GetClusterInfo().hvparams[instance.hypervisor]) |
1701 | 1710 |
else: |
... | ... | |
1703 | 1712 |
" instance will not be started") |
1704 | 1713 |
|
1705 | 1714 |
# check bridge existance |
1706 |
CheckInstanceBridgesExist(self, instance, node=target_node)
|
|
1715 |
CheckInstanceBridgesExist(self, instance, node_uuid=target_node.uuid)
|
|
1707 | 1716 |
|
1708 | 1717 |
def Exec(self, feedback_fn): |
1709 | 1718 |
"""Move an instance. |
... | ... | |
1714 | 1723 |
""" |
1715 | 1724 |
instance = self.instance |
1716 | 1725 |
|
1717 |
source_node = instance.primary_node
|
|
1718 |
target_node = self.target_node
|
|
1726 |
source_node = self.cfg.GetNodeInfo(instance.primary_node)
|
|
1727 |
target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
|
|
1719 | 1728 |
|
1720 | 1729 |
self.LogInfo("Shutting down instance %s on source node %s", |
1721 |
instance.name, source_node) |
|
1730 |
instance.name, source_node.name)
|
|
1722 | 1731 |
|
1723 | 1732 |
assert (self.owned_locks(locking.LEVEL_NODE) == |
1724 | 1733 |
self.owned_locks(locking.LEVEL_NODE_RES)) |
1725 | 1734 |
|
1726 |
result = self.rpc.call_instance_shutdown(source_node, instance, |
|
1735 |
result = self.rpc.call_instance_shutdown(source_node.uuid, instance,
|
|
1727 | 1736 |
self.op.shutdown_timeout, |
1728 | 1737 |
self.op.reason) |
1729 | 1738 |
if self.op.ignore_consistency: |
1730 | 1739 |
result.Warn("Could not shutdown instance %s on node %s. Proceeding" |
1731 | 1740 |
" anyway. Please make sure node %s is down. Error details" % |
1732 |
(instance.name, source_node, source_node), self.LogWarning) |
|
1741 |
(instance.name, source_node.name, source_node.name), |
|
1742 |
self.LogWarning) |
|
1733 | 1743 |
else: |
1734 | 1744 |
result.Raise("Could not shutdown instance %s on node %s" % |
1735 |
(instance.name, source_node)) |
|
1745 |
(instance.name, source_node.name))
|
|
1736 | 1746 |
|
1737 | 1747 |
# create the target disks |
1738 | 1748 |
try: |
1739 |
CreateDisks(self, instance, target_node=target_node)
|
|
1749 |
CreateDisks(self, instance, target_node_uuid=target_node.uuid)
|
|
1740 | 1750 |
except errors.OpExecError: |
1741 | 1751 |
self.LogWarning("Device creation failed") |
1742 | 1752 |
self.cfg.ReleaseDRBDMinors(instance.name) |
... | ... | |
1748 | 1758 |
# activate, get path, copy the data over |
1749 | 1759 |
for idx, disk in enumerate(instance.disks): |
1750 | 1760 |
self.LogInfo("Copying data for disk %d", idx) |
1751 |
result = self.rpc.call_blockdev_assemble(target_node, (disk, instance), |
|
1752 |
instance.name, True, idx) |
|
1761 |
result = self.rpc.call_blockdev_assemble(target_node.uuid, |
|
1762 |
(disk, instance), instance.name, |
|
1763 |
True, idx) |
|
1753 | 1764 |
if result.fail_msg: |
1754 | 1765 |
self.LogWarning("Can't assemble newly created disk %d: %s", |
1755 | 1766 |
idx, result.fail_msg) |
1756 | 1767 |
errs.append(result.fail_msg) |
1757 | 1768 |
break |
1758 | 1769 |
dev_path = result.payload |
1759 |
result = self.rpc.call_blockdev_export(source_node, (disk, instance), |
|
1760 |
target_node, dev_path, |
|
1770 |
result = self.rpc.call_blockdev_export(source_node.uuid, (disk, instance),
|
|
1771 |
target_node.name, dev_path,
|
|
1761 | 1772 |
cluster_name) |
1762 | 1773 |
if result.fail_msg: |
1763 | 1774 |
self.LogWarning("Can't copy data over for disk %d: %s", |
... | ... | |
1768 | 1779 |
if errs: |
1769 | 1780 |
self.LogWarning("Some disks failed to copy, aborting") |
1770 | 1781 |
try: |
1771 |
RemoveDisks(self, instance, target_node=target_node)
|
|
1782 |
RemoveDisks(self, instance, target_node_uuid=target_node.uuid)
|
|
1772 | 1783 |
finally: |
1773 | 1784 |
self.cfg.ReleaseDRBDMinors(instance.name) |
1774 | 1785 |
raise errors.OpExecError("Errors during disk copy: %s" % |
1775 | 1786 |
(",".join(errs),)) |
1776 | 1787 |
|
1777 |
instance.primary_node = target_node |
|
1788 |
instance.primary_node = target_node.uuid
|
|
1778 | 1789 |
self.cfg.Update(instance, feedback_fn) |
1779 | 1790 |
|
1780 | 1791 |
self.LogInfo("Removing the disks on the original node") |
1781 |
RemoveDisks(self, instance, target_node=source_node)
|
|
1792 |
RemoveDisks(self, instance, target_node_uuid=source_node.uuid)
|
|
1782 | 1793 |
|
1783 | 1794 |
# Only start the instance if it's marked as up |
1784 | 1795 |
if instance.admin_state == constants.ADMINST_UP: |
1785 | 1796 |
self.LogInfo("Starting instance %s on node %s", |
1786 |
instance.name, target_node) |
|
1797 |
instance.name, target_node.name)
|
|
1787 | 1798 |
|
1788 | 1799 |
disks_ok, _ = AssembleInstanceDisks(self, instance, |
1789 | 1800 |
ignore_secondaries=True) |
... | ... | |
1791 | 1802 |
ShutdownInstanceDisks(self, instance) |
1792 | 1803 |
raise errors.OpExecError("Can't activate the instance's disks") |
1793 | 1804 |
|
1794 |
result = self.rpc.call_instance_start(target_node, |
|
1805 |
result = self.rpc.call_instance_start(target_node.uuid,
|
|
1795 | 1806 |
(instance, None, None), False, |
1796 | 1807 |
self.op.reason) |
1797 | 1808 |
msg = result.fail_msg |
1798 | 1809 |
if msg: |
1799 | 1810 |
ShutdownInstanceDisks(self, instance) |
1800 | 1811 |
raise errors.OpExecError("Could not start instance %s on node %s: %s" % |
1801 |
(instance.name, target_node, msg)) |
|
1812 |
(instance.name, target_node.name, msg))
|
|
1802 | 1813 |
|
1803 | 1814 |
|
1804 | 1815 |
class LUInstanceMultiAlloc(NoHooksLU): |
... | ... | |
1866 | 1877 |
else: |
1867 | 1878 |
nodeslist = [] |
1868 | 1879 |
for inst in self.op.instances: |
1869 |
inst.pnode = ExpandNodeName(self.cfg, inst.pnode) |
|
1880 |
(inst.pnode_uuid, inst.pnode) = \ |
|
1881 |
ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode) |
|
1870 | 1882 |
nodeslist.append(inst.pnode) |
1871 | 1883 |
if inst.snode is not None: |
1872 |
inst.snode = ExpandNodeName(self.cfg, inst.snode) |
|
1884 |
(inst.snode_uuid, inst.snode) = \ |
|
1885 |
ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode) |
|
1873 | 1886 |
nodeslist.append(inst.snode) |
1874 | 1887 |
|
1875 | 1888 |
self.needed_locks[locking.LEVEL_NODE] = nodeslist |
... | ... | |
1887 | 1900 |
|
1888 | 1901 |
if self.op.opportunistic_locking: |
1889 | 1902 |
# Only consider nodes for which a lock is held |
1890 |
node_whitelist = list(self.owned_locks(locking.LEVEL_NODE)) |
|
1903 |
node_whitelist = self.cfg.GetNodeNames( |
|
1904 |
list(self.owned_locks(locking.LEVEL_NODE))) |
|
1891 | 1905 |
else: |
1892 | 1906 |
node_whitelist = None |
1893 | 1907 |
|
... | ... | |
1935 | 1949 |
(allocatable, failed) = self.ia_result |
1936 | 1950 |
|
1937 | 1951 |
jobs = [] |
1938 |
for (name, nodes) in allocatable: |
|
1952 |
for (name, node_names) in allocatable:
|
|
1939 | 1953 |
op = op2inst.pop(name) |
1940 | 1954 |
|
1941 |
if len(nodes) > 1: |
|
1942 |
(op.pnode, op.snode) = nodes |
|
1943 |
else: |
|
1944 |
(op.pnode,) = nodes |
|
1955 |
(op.pnode_uuid, op.pnode) = \ |
|
1956 |
ExpandNodeUuidAndName(self.cfg, None, node_names[0]) |
|
1957 |
if len(node_names) > 1: |
|
1958 |
(op.snode_uuid, op.snode) = \ |
|
1959 |
ExpandNodeUuidAndName(self.cfg, None, node_names[1]) |
|
1945 | 1960 |
|
1946 | 1961 |
jobs.append([op]) |
1947 | 1962 |
|
... | ... | |
1982 | 1997 |
return [(op, idx, params, fn()) for (op, idx, params) in mods] |
1983 | 1998 |
|
1984 | 1999 |
|
1985 |
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_specs):
|
|
2000 |
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
|
|
1986 | 2001 |
"""Checks if nodes have enough physical CPUs |
1987 | 2002 |
|
1988 | 2003 |
This function checks if all given nodes have the needed number of |
... | ... | |
1992 | 2007 |
|
1993 | 2008 |
@type lu: C{LogicalUnit} |
1994 | 2009 |
@param lu: a logical unit from which we get configuration data |
1995 |
@type nodenames: C{list}
|
|
1996 |
@param nodenames: the list of node names to check
|
|
2010 |
@type node_uuids: C{list}
|
|
2011 |
@param node_uuids: the list of node UUIDs to check
|
|
1997 | 2012 |
@type requested: C{int} |
1998 | 2013 |
@param requested: the minimum acceptable number of physical CPUs |
1999 | 2014 |
@type hypervisor_specs: list of pairs (string, dict of strings) |
... | ... | |
2003 | 2018 |
or we cannot check the node |
2004 | 2019 |
|
2005 | 2020 |
""" |
2006 |
nodeinfo = lu.rpc.call_node_info(nodenames, None, hypervisor_specs, None) |
|
2007 |
for node in nodenames: |
|
2008 |
info = nodeinfo[node] |
|
2009 |
info.Raise("Cannot get current information from node %s" % node, |
|
2021 |
nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs, None) |
|
2022 |
for node_uuid in node_uuids: |
|
2023 |
info = nodeinfo[node_uuid] |
|
2024 |
node_name = lu.cfg.GetNodeName(node_uuid) |
|
2025 |
info.Raise("Cannot get current information from node %s" % node_name, |
|
2010 | 2026 |
prereq=True, ecode=errors.ECODE_ENVIRON) |
2011 | 2027 |
(_, _, (hv_info, )) = info.payload |
2012 | 2028 |
num_cpus = hv_info.get("cpu_total", None) |
2013 | 2029 |
if not isinstance(num_cpus, int): |
2014 | 2030 |
raise errors.OpPrereqError("Can't compute the number of physical CPUs" |
2015 | 2031 |
" on node %s, result was '%s'" % |
2016 |
(node, num_cpus), errors.ECODE_ENVIRON) |
|
2032 |
(node_name, num_cpus), errors.ECODE_ENVIRON)
|
|
2017 | 2033 |
if requested > num_cpus: |
2018 | 2034 |
raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are " |
2019 |
"required" % (node, num_cpus, requested), |
|
2035 |
"required" % (node_name, num_cpus, requested),
|
|
2020 | 2036 |
errors.ECODE_NORES) |
2021 | 2037 |
|
2022 | 2038 |
|
... | ... | |
2336 | 2352 |
self._VerifyNicModification) |
2337 | 2353 |
|
2338 | 2354 |
if self.op.pnode: |
2339 |
self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode) |
|
2355 |
(self.op.pnode_uuid, self.op.pnode) = \ |
|
2356 |
ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode) |
|
2340 | 2357 |
|
2341 | 2358 |
def ExpandNames(self): |
2342 | 2359 |
self._ExpandAndLockInstance() |
... | ... | |
2359 | 2376 |
elif level == locking.LEVEL_NODE: |
2360 | 2377 |
self._LockInstancesNodes() |
2361 | 2378 |
if self.op.disk_template and self.op.remote_node: |
2362 |
self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node) |
|
2363 |
self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node) |
|
2379 |
(self.op.remote_node_uuid, self.op.remote_node) = \ |
|
2380 |
ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid, |
|
2381 |
self.op.remote_node) |
|
2382 |
self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid) |
|
2364 | 2383 |
elif level == locking.LEVEL_NODE_RES and self.op.disk_template: |
2365 | 2384 |
# Copy node locks |
2366 | 2385 |
self.needed_locks[locking.LEVEL_NODE_RES] = \ |
... | ... | |
2409 | 2428 |
return (nl, nl) |
2410 | 2429 |
|
2411 | 2430 |
def _PrepareNicModification(self, params, private, old_ip, old_net_uuid, |
2412 |
old_params, cluster, pnode): |
|
2431 |
old_params, cluster, pnode_uuid):
|
|
2413 | 2432 |
|
2414 | 2433 |
update_params_dict = dict([(key, params[key]) |
2415 | 2434 |
for key in constants.NICS_PARAMETERS |
... | ... | |
2428 | 2447 |
old_net_obj = self.cfg.GetNetwork(old_net_uuid) |
2429 | 2448 |
|
2430 | 2449 |
if new_net_uuid: |
2431 |
netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode) |
|
2450 |
netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
|
|
2432 | 2451 |
if not netparams: |
2433 | 2452 |
raise errors.OpPrereqError("No netparams found for the network" |
2434 | 2453 |
" %s, probably not connected" % |
... | ... | |
2445 | 2464 |
new_mode = new_filled_params[constants.NIC_MODE] |
2446 | 2465 |
if new_mode == constants.NIC_MODE_BRIDGED: |
2447 | 2466 |
bridge = new_filled_params[constants.NIC_LINK] |
2448 |
msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg |
|
2467 |
msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
|
|
2449 | 2468 |
if msg: |
2450 |
msg = "Error checking bridges on node '%s': %s" % (pnode, msg) |
|
2469 |
msg = "Error checking bridges on node '%s': %s" % \ |
|
2470 |
(self.cfg.GetNodeName(pnode_uuid), msg) |
|
2451 | 2471 |
if self.op.force: |
2452 | 2472 |
self.warn.append(msg) |
2453 | 2473 |
else: |
... | ... | |
2528 | 2548 |
errors.ECODE_NOTUNIQUE) |
2529 | 2549 |
# new network is None so check if new IP is a conflicting IP |
2530 | 2550 |
elif self.op.conflicts_check: |
2531 |
_CheckForConflictingIp(self, new_ip, pnode) |
|
2551 |
_CheckForConflictingIp(self, new_ip, pnode_uuid)
|
|
2532 | 2552 |
|
2533 | 2553 |
# release old IP if old network is not None |
2534 | 2554 |
if old_ip and old_net_uuid: |
... | ... | |
2552 | 2572 |
"""CheckPrereq checks related to a new disk template.""" |
2553 | 2573 |
# Arguments are passed to avoid configuration lookups |
2554 | 2574 |
instance = self.instance |
2555 |
pnode = instance.primary_node |
|
2575 |
pnode_uuid = instance.primary_node
|
|
2556 | 2576 |
cluster = self.cluster |
2557 | 2577 |
if instance.disk_template == self.op.disk_template: |
2558 | 2578 |
raise errors.OpPrereqError("Instance already has disk template %s" % |
... | ... | |
2567 | 2587 |
CheckInstanceState(self, instance, INSTANCE_DOWN, |
2568 | 2588 |
msg="cannot change disk template") |
2569 | 2589 |
if self.op.disk_template in constants.DTS_INT_MIRROR: |
2570 |
if self.op.remote_node == pnode:
|
|
2590 |
if self.op.remote_node_uuid == pnode_uuid:
|
|
2571 | 2591 |
raise errors.OpPrereqError("Given new secondary node %s is the same" |
2572 | 2592 |
" as the primary node of the instance" % |
2573 | 2593 |
self.op.remote_node, errors.ECODE_STATE) |
2574 |
CheckNodeOnline(self, self.op.remote_node) |
|
2575 |
CheckNodeNotDrained(self, self.op.remote_node) |
|
2594 |
CheckNodeOnline(self, self.op.remote_node_uuid)
|
|
2595 |
CheckNodeNotDrained(self, self.op.remote_node_uuid)
|
|
2576 | 2596 |
# FIXME: here we assume that the old instance type is DT_PLAIN |
2577 | 2597 |
assert instance.disk_template == constants.DT_PLAIN |
2578 | 2598 |
disks = [{constants.IDISK_SIZE: d.size, |
2579 | 2599 |
constants.IDISK_VG: d.logical_id[0]} |
2580 | 2600 |
for d in instance.disks] |
2581 | 2601 |
required = ComputeDiskSizePerVG(self.op.disk_template, disks) |
2582 |
CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required) |
|
2602 |
CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
|
|
2583 | 2603 |
|
2584 |
snode_info = self.cfg.GetNodeInfo(self.op.remote_node) |
|
2604 |
snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
|
|
2585 | 2605 |
snode_group = self.cfg.GetNodeGroup(snode_info.group) |
2586 | 2606 |
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, |
2587 | 2607 |
snode_group) |
... | ... | |
2617 | 2637 |
self.diskparams = self.cfg.GetInstanceDiskParams(instance) |
2618 | 2638 |
|
2619 | 2639 |
excl_stor = compat.any( |
2620 |
rpc.GetExclusiveStorageForNodeNames(self.cfg, instance.all_nodes).values()
|
|
2640 |
rpc.GetExclusiveStorageForNodes(self.cfg, instance.all_nodes).values() |
|
2621 | 2641 |
) |
2622 | 2642 |
|
2623 | 2643 |
# Check disk modifications. This is done here and not in CheckArguments |
... | ... | |
2697 | 2717 |
assert self.instance is not None, \ |
2698 | 2718 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
2699 | 2719 |
|
2700 |
pnode = instance.primary_node |
|
2720 |
pnode_uuid = instance.primary_node
|
|
2701 | 2721 |
|
2702 | 2722 |
self.warn = [] |
2703 | 2723 |
|
2704 |
if (self.op.pnode is not None and self.op.pnode != pnode and
|
|
2724 |
if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
|
|
2705 | 2725 |
not self.op.force): |
2706 | 2726 |
# verify that the instance is not up |
2707 | 2727 |
instance_info = self.rpc.call_instance_info( |
2708 |
pnode, instance.name, instance.hypervisor, instance.hvparams) |
|
2728 |
pnode_uuid, instance.name, instance.hypervisor, instance.hvparams)
|
|
2709 | 2729 |
if instance_info.fail_msg: |
2710 | 2730 |
self.warn.append("Can't get instance runtime information: %s" % |
2711 | 2731 |
instance_info.fail_msg) |
2712 | 2732 |
elif instance_info.payload: |
2713 |
raise errors.OpPrereqError("Instance is still running on %s" % pnode, |
|
2733 |
raise errors.OpPrereqError("Instance is still running on %s" % |
|
2734 |
self.cfg.GetNodeName(pnode_uuid), |
|
2714 | 2735 |
errors.ECODE_STATE) |
2715 | 2736 |
|
2716 |
assert pnode in self.owned_locks(locking.LEVEL_NODE) |
|
2717 |
nodelist = list(instance.all_nodes)
|
|
2718 |
pnode_info = self.cfg.GetNodeInfo(pnode) |
|
2737 |
assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
|
|
2738 |
node_uuids = list(instance.all_nodes)
|
|
2739 |
pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
|
|
2719 | 2740 |
|
2720 | 2741 |
#_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups) |
2721 | 2742 |
assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP) |
... | ... | |
2752 | 2773 |
|
2753 | 2774 |
# local check |
2754 | 2775 |
hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new) |
2755 |
CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
|
|
2776 |
CheckHVParams(self, node_uuids, instance.hypervisor, hv_new)
|
|
2756 | 2777 |
self.hv_proposed = self.hv_new = hv_new # the new actual values |
2757 | 2778 |
self.hv_inst = i_hvdict # the new dict (without defaults) |
2758 | 2779 |
else: |
... | ... | |
2807 | 2828 |
# osparams processing |
2808 | 2829 |
if self.op.osparams: |
2809 | 2830 |
i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams) |
2810 |
CheckOSParams(self, True, nodelist, instance_os, i_osdict)
|
|
2831 |
CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
|
|
2811 | 2832 |
self.os_inst = i_osdict # the new dict (without defaults) |
2812 | 2833 |
else: |
2813 | 2834 |
self.os_inst = {} |
... | ... | |
2815 | 2836 |
#TODO(dynmem): do the appropriate check involving MINMEM |
2816 | 2837 |
if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and |
2817 | 2838 |
be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]): |
2818 |
mem_check_list = [pnode] |
|
2839 |
mem_check_list = [pnode_uuid]
|
|
2819 | 2840 |
if be_new[constants.BE_AUTO_BALANCE]: |
2820 | 2841 |
# either we changed auto_balance to yes or it was from before |
2821 | 2842 |
mem_check_list.extend(instance.secondary_nodes) |
2822 | 2843 |
instance_info = self.rpc.call_instance_info( |
2823 |
pnode, instance.name, instance.hypervisor, instance.hvparams) |
|
2844 |
pnode_uuid, instance.name, instance.hypervisor, instance.hvparams)
|
|
2824 | 2845 |
hvspecs = [(instance.hypervisor, cluster.hvparams[instance.hypervisor])] |
2825 | 2846 |
nodeinfo = self.rpc.call_node_info(mem_check_list, None, |
2826 | 2847 |
hvspecs, False) |
2827 |
pninfo = nodeinfo[pnode] |
|
2848 |
pninfo = nodeinfo[pnode_uuid]
|
|
2828 | 2849 |
msg = pninfo.fail_msg |
2829 | 2850 |
if msg: |
2830 | 2851 |
# Assume the primary node is unreachable and go ahead |
2831 | 2852 |
self.warn.append("Can't get info from primary node %s: %s" % |
2832 |
(pnode, msg))
|
|
2853 |
(self.cfg.GetNodeName(pnode_uuid), msg))
|
|
2833 | 2854 |
else: |
2834 | 2855 |
(_, _, (pnhvinfo, )) = pninfo.payload |
2835 | 2856 |
if not isinstance(pnhvinfo.get("memory_free", None), int): |
2836 | 2857 |
self.warn.append("Node data from primary node %s doesn't contain" |
2837 |
" free memory information" % pnode) |
|
2858 |
" free memory information" % |
|
2859 |
self.cfg.GetNodeName(pnode_uuid)) |
|
2838 | 2860 |
elif instance_info.fail_msg: |
2839 | 2861 |
self.warn.append("Can't get instance runtime information: %s" % |
2840 | 2862 |
instance_info.fail_msg) |
... | ... | |
2857 | 2879 |
miss_mem, errors.ECODE_NORES) |
2858 | 2880 |
|
2859 | 2881 |
if be_new[constants.BE_AUTO_BALANCE]: |
2860 |
for node, nres in nodeinfo.items(): |
|
2861 |
if node not in instance.secondary_nodes: |
|
2882 |
for node_uuid, nres in nodeinfo.items():
|
|
2883 |
if node_uuid not in instance.secondary_nodes:
|
|
2862 | 2884 |
continue |
2863 |
nres.Raise("Can't get info from secondary node %s" % node, |
|
2864 |
prereq=True, ecode=errors.ECODE_STATE) |
|
2885 |
nres.Raise("Can't get info from secondary node %s" % |
|
2886 |
self.cfg.GetNodeName(node_uuid), prereq=True, |
|
2887 |
ecode=errors.ECODE_STATE) |
|
2865 | 2888 |
(_, _, (nhvinfo, )) = nres.payload |
2866 | 2889 |
if not isinstance(nhvinfo.get("memory_free", None), int): |
2867 | 2890 |
raise errors.OpPrereqError("Secondary node %s didn't return free" |
2868 |
" memory information" % node, |
|
2891 |
" memory information" % |
|
2892 |
self.cfg.GetNodeName(node_uuid), |
|
2869 | 2893 |
errors.ECODE_STATE) |
2870 | 2894 |
#TODO(dynmem): do the appropriate check involving MINMEM |
2871 | 2895 |
elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]: |
2872 | 2896 |
raise errors.OpPrereqError("This change will prevent the instance" |
2873 | 2897 |
" from failover to its secondary node" |
2874 |
" %s, due to not enough memory" % node, |
|
2898 |
" %s, due to not enough memory" % |
|
2899 |
self.cfg.GetNodeName(node_uuid), |
|
2875 | 2900 |
errors.ECODE_STATE) |
2876 | 2901 |
|
2877 | 2902 |
if self.op.runtime_mem: |
2878 | 2903 |
remote_info = self.rpc.call_instance_info( |
2879 | 2904 |
instance.primary_node, instance.name, instance.hypervisor, |
2880 | 2905 |
instance.hvparams) |
2881 |
remote_info.Raise("Error checking node %s" % instance.primary_node) |
|
2906 |
remote_info.Raise("Error checking node %s" % |
|
2907 |
self.cfg.GetNodeName(instance.primary_node)) |
|
2882 | 2908 |
if not remote_info.payload: # not running already |
2883 | 2909 |
raise errors.OpPrereqError("Instance %s is not running" % |
2884 | 2910 |
instance.name, errors.ECODE_STATE) |
... | ... | |
2904 | 2930 |
|
2905 | 2931 |
def _PrepareNicCreate(_, params, private): |
2906 | 2932 |
self._PrepareNicModification(params, private, None, None, |
2907 |
{}, cluster, pnode) |
|
2933 |
{}, cluster, pnode_uuid)
|
|
2908 | 2934 |
return (None, None) |
2909 | 2935 |
|
2910 | 2936 |
def _PrepareNicMod(_, nic, params, private): |
2911 | 2937 |
self._PrepareNicModification(params, private, nic.ip, nic.network, |
2912 |
nic.nicparams, cluster, pnode) |
|
2938 |
nic.nicparams, cluster, pnode_uuid)
|
|
2913 | 2939 |
return None |
2914 | 2940 |
|
2915 | 2941 |
def _PrepareNicRemove(_, params, __): |
... | ... | |
2982 | 3008 |
""" |
2983 | 3009 |
feedback_fn("Converting template to drbd") |
2984 | 3010 |
instance = self.instance |
2985 |
pnode = instance.primary_node |
|
2986 |
snode = self.op.remote_node
|
|
3011 |
pnode_uuid = instance.primary_node
|
|
3012 |
snode_uuid = self.op.remote_node_uuid
|
|
2987 | 3013 |
|
2988 | 3014 |
assert instance.disk_template == constants.DT_PLAIN |
2989 | 3015 |
|
... | ... | |
2993 | 3019 |
constants.IDISK_NAME: d.name} |
2994 | 3020 |
for d in instance.disks] |
2995 | 3021 |
new_disks = GenerateDiskTemplate(self, self.op.disk_template, |
2996 |
instance.name, pnode, [snode],
|
|
3022 |
instance.name, pnode_uuid, [snode_uuid],
|
|
2997 | 3023 |
disk_info, None, None, 0, feedback_fn, |
2998 | 3024 |
self.diskparams) |
2999 | 3025 |
anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks, |
3000 | 3026 |
self.diskparams) |
3001 |
p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
|
|
3002 |
s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
|
|
3027 |
p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
|
|
3028 |
s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
|
|
3003 | 3029 |
info = GetInstanceInfoText(instance) |
3004 | 3030 |
feedback_fn("Creating additional volumes...") |
3005 | 3031 |
# first, create the missing data and meta devices |
3006 | 3032 |
for disk in anno_disks: |
3007 | 3033 |
# unfortunately this is... not too nice |
3008 |
CreateSingleBlockDev(self, pnode, instance, disk.children[1], |
|
3034 |
CreateSingleBlockDev(self, pnode_uuid, instance, disk.children[1],
|
|
3009 | 3035 |
info, True, p_excl_stor) |
3010 | 3036 |
for child in disk.children: |
3011 |
CreateSingleBlockDev(self, snode, instance, child, info, True, |
|
3037 |
CreateSingleBlockDev(self, snode_uuid, instance, child, info, True,
|
|
3012 | 3038 |
s_excl_stor) |
3013 | 3039 |
# at this stage, all new LVs have been created, we can rename the |
3014 | 3040 |
# old ones |
3015 | 3041 |
feedback_fn("Renaming original volumes...") |
3016 | 3042 |
rename_list = [(o, n.children[0].logical_id) |
3017 | 3043 |
for (o, n) in zip(instance.disks, new_disks)] |
3018 |
result = self.rpc.call_blockdev_rename(pnode, rename_list) |
|
3044 |
result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
|
|
3019 | 3045 |
result.Raise("Failed to rename original LVs") |
3020 | 3046 |
|
3021 | 3047 |
feedback_fn("Initializing DRBD devices...") |
3022 | 3048 |
# all child devices are in place, we can now create the DRBD devices |
3023 | 3049 |
try: |
3024 | 3050 |
for disk in anno_disks: |
3025 |
for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]: |
|
3026 |
f_create = node == pnode |
|
3027 |
CreateSingleBlockDev(self, node, instance, disk, info, f_create, |
|
3051 |
for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor), |
|
3052 |
(snode_uuid, s_excl_stor)]: |
|
3053 |
f_create = node_uuid == pnode_uuid |
|
3054 |
CreateSingleBlockDev(self, node_uuid, instance, disk, info, f_create, |
|
3028 | 3055 |
excl_stor) |
3029 | 3056 |
except errors.GenericError, e: |
3030 | 3057 |
feedback_fn("Initializing of DRBD devices failed;" |
3031 | 3058 |
" renaming back original volumes...") |
3032 | 3059 |
for disk in new_disks: |
3033 |
self.cfg.SetDiskID(disk, pnode) |
|
3060 |
self.cfg.SetDiskID(disk, pnode_uuid)
|
|
3034 | 3061 |
rename_back_list = [(n.children[0], o.logical_id) |
3035 | 3062 |
for (n, o) in zip(new_disks, instance.disks)] |
3036 |
result = self.rpc.call_blockdev_rename(pnode, rename_back_list) |
|
3063 |
result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
|
|
3037 | 3064 |
result.Raise("Failed to rename LVs back after error %s" % str(e)) |
3038 | 3065 |
raise |
3039 | 3066 |
|
... | ... | |
3063 | 3090 |
assert len(instance.secondary_nodes) == 1 |
3064 | 3091 |
assert instance.disk_template == constants.DT_DRBD8 |
3065 | 3092 |
|
3066 |
pnode = instance.primary_node |
|
3067 |
snode = instance.secondary_nodes[0] |
|
3093 |
pnode_uuid = instance.primary_node
|
|
3094 |
snode_uuid = instance.secondary_nodes[0]
|
|
3068 | 3095 |
feedback_fn("Converting template to plain") |
3069 | 3096 |
|
3070 | 3097 |
old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg) |
... | ... | |
3093 | 3120 |
|
3094 | 3121 |
feedback_fn("Removing volumes on the secondary node...") |
3095 | 3122 |
for disk in old_disks: |
3096 |
self.cfg.SetDiskID(disk, snode) |
|
3097 |
msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg |
|
3123 |
self.cfg.SetDiskID(disk, snode_uuid)
|
|
3124 |
msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
|
|
3098 | 3125 |
if msg: |
3099 | 3126 |
self.LogWarning("Could not remove block device %s on node %s," |
3100 |
" continuing anyway: %s", disk.iv_name, snode, msg) |
|
3127 |
" continuing anyway: %s", disk.iv_name, |
|
3128 |
self.cfg.GetNodeName(snode_uuid), msg) |
|
3101 | 3129 |
|
3102 | 3130 |
feedback_fn("Removing unneeded volumes on the primary node...") |
3103 | 3131 |
for idx, disk in enumerate(old_disks): |
3104 | 3132 |
meta = disk.children[1] |
3105 |
self.cfg.SetDiskID(meta, pnode) |
|
3106 |
msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg |
|
3133 |
self.cfg.SetDiskID(meta, pnode_uuid)
|
|
3134 |
msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
|
|
3107 | 3135 |
if msg: |
3108 | 3136 |
self.LogWarning("Could not remove metadata for disk %d on node %s," |
3109 |
" continuing anyway: %s", idx, pnode, msg) |
|
3137 |
" continuing anyway: %s", idx, |
|
3138 |
self.cfg.GetNodeName(pnode_uuid), msg) |
|
3110 | 3139 |
|
3111 | 3140 |
def _CreateNewDisk(self, idx, params, _): |
3112 | 3141 |
"""Creates a new disk. |
... | ... | |
3161 | 3190 |
|
3162 | 3191 |
""" |
3163 | 3192 |
(anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg) |
3164 |
for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node): |
|
3165 |
self.cfg.SetDiskID(disk, node) |
|
3166 |
msg = self.rpc.call_blockdev_remove(node, disk).fail_msg |
|
3193 |
for node_uuid, disk in anno_disk.ComputeNodeTree( |
|
3194 |
self.instance.primary_node): |
|
3195 |
self.cfg.SetDiskID(disk, node_uuid) |
|
3196 |
msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg |
|
3167 | 3197 |
if msg: |
3168 | 3198 |
self.LogWarning("Could not remove disk/%d on node '%s': %s," |
3169 |
" continuing anyway", idx, node, msg) |
|
3199 |
" continuing anyway", idx, |
|
3200 |
self.cfg.GetNodeName(node_uuid), msg) |
|
3170 | 3201 |
|
3171 | 3202 |
# if this is a DRBD disk, return its port to the pool |
3172 | 3203 |
if root.dev_type in constants.LDS_DRBD: |
... | ... | |
3240 | 3271 |
instance = self.instance |
3241 | 3272 |
|
3242 | 3273 |
# New primary node |
3243 |
if self.op.pnode: |
|
3244 |
instance.primary_node = self.op.pnode |
|
3274 |
if self.op.pnode_uuid:
|
|
3275 |
instance.primary_node = self.op.pnode_uuid
|
|
3245 | 3276 |
|
3246 | 3277 |
# runtime memory |
3247 | 3278 |
if self.op.runtime_mem: |
... | ... | |
3260 | 3291 |
if self.op.disk_template: |
3261 | 3292 |
if __debug__: |
3262 | 3293 |
check_nodes = set(instance.all_nodes) |
3263 |
if self.op.remote_node: |
|
3264 |
check_nodes.add(self.op.remote_node) |
|
3294 |
if self.op.remote_node_uuid:
|
|
3295 |
check_nodes.add(self.op.remote_node_uuid)
|
|
3265 | 3296 |
for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]: |
3266 | 3297 |
owned = self.owned_locks(level) |
3267 | 3298 |
assert not (check_nodes - owned), \ |
... | ... | |
3392 | 3423 |
# Lock all nodes in all potential target groups |
3393 | 3424 |
lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) - |
3394 | 3425 |
self.cfg.GetInstanceNodeGroups(self.op.instance_name)) |
3395 |
member_nodes = [node_name
|
|
3426 |
member_nodes = [node_uuid
|
|
3396 | 3427 |
for group in lock_groups |
3397 |
for node_name in self.cfg.GetNodeGroup(group).members]
|
|
3428 |
for node_uuid in self.cfg.GetNodeGroup(group).members]
|
|
3398 | 3429 |
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes) |
3399 | 3430 |
else: |
3400 | 3431 |
# Lock all nodes as all groups are potential targets |
Also available in: Unified diff