Revision 5eacbcae
b/lib/cmdlib/backup.py | ||
---|---|---|
33 | 33 |
from ganeti import query |
34 | 34 |
from ganeti import utils |
35 | 35 |
|
36 |
from ganeti.cmdlib.base import _QueryBase, NoHooksLU, LogicalUnit
|
|
37 |
from ganeti.cmdlib.common import _GetWantedNodes, _ShareAll, \
|
|
38 |
_CheckNodeOnline, _ExpandNodeName
|
|
39 |
from ganeti.cmdlib.instance_storage import _StartInstanceDisks, \
|
|
40 |
_ShutdownInstanceDisks
|
|
41 |
from ganeti.cmdlib.instance_utils import _GetClusterDomainSecret, \
|
|
42 |
_BuildInstanceHookEnvByObject, _CheckNodeNotDrained, _RemoveInstance
|
|
36 |
from ganeti.cmdlib.base import QueryBase, NoHooksLU, LogicalUnit |
|
37 |
from ganeti.cmdlib.common import GetWantedNodes, ShareAll, CheckNodeOnline, \
|
|
38 |
ExpandNodeName |
|
39 |
from ganeti.cmdlib.instance_storage import StartInstanceDisks, \ |
|
40 |
ShutdownInstanceDisks |
|
41 |
from ganeti.cmdlib.instance_utils import GetClusterDomainSecret, \ |
|
42 |
BuildInstanceHookEnvByObject, CheckNodeNotDrained, RemoveInstance
|
|
43 | 43 |
|
44 | 44 |
|
45 |
class _ExportQuery(_QueryBase):
|
|
45 |
class ExportQuery(QueryBase):
|
|
46 | 46 |
FIELDS = query.EXPORT_FIELDS |
47 | 47 |
|
48 | 48 |
#: The node name is not a unique key for this query |
... | ... | |
53 | 53 |
|
54 | 54 |
# The following variables interact with _QueryBase._GetNames |
55 | 55 |
if self.names: |
56 |
self.wanted = _GetWantedNodes(lu, self.names)
|
|
56 |
self.wanted = GetWantedNodes(lu, self.names) |
|
57 | 57 |
else: |
58 | 58 |
self.wanted = locking.ALL_SET |
59 | 59 |
|
60 | 60 |
self.do_locking = self.use_locking |
61 | 61 |
|
62 | 62 |
if self.do_locking: |
63 |
lu.share_locks = _ShareAll()
|
|
63 |
lu.share_locks = ShareAll() |
|
64 | 64 |
lu.needed_locks = { |
65 | 65 |
locking.LEVEL_NODE: self.wanted, |
66 | 66 |
} |
... | ... | |
102 | 102 |
REQ_BGL = False |
103 | 103 |
|
104 | 104 |
def CheckArguments(self): |
105 |
self.expq = _ExportQuery(qlang.MakeSimpleFilter("node", self.op.nodes),
|
|
106 |
["node", "export"], self.op.use_locking)
|
|
105 |
self.expq = ExportQuery(qlang.MakeSimpleFilter("node", self.op.nodes), |
|
106 |
["node", "export"], self.op.use_locking) |
|
107 | 107 |
|
108 | 108 |
def ExpandNames(self): |
109 | 109 |
self.expq.ExpandNames(self) |
... | ... | |
141 | 141 |
self.instance = self.cfg.GetInstanceInfo(instance_name) |
142 | 142 |
assert self.instance is not None, \ |
143 | 143 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
144 |
_CheckNodeOnline(self, self.instance.primary_node)
|
|
144 |
CheckNodeOnline(self, self.instance.primary_node) |
|
145 | 145 |
|
146 |
self._cds = _GetClusterDomainSecret()
|
|
146 |
self._cds = GetClusterDomainSecret() |
|
147 | 147 |
|
148 | 148 |
def Exec(self, feedback_fn): |
149 | 149 |
"""Prepares an instance for an export. |
... | ... | |
237 | 237 |
"REMOVE_INSTANCE": str(bool(self.op.remove_instance)), |
238 | 238 |
} |
239 | 239 |
|
240 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance))
|
|
240 |
env.update(BuildInstanceHookEnvByObject(self, self.instance)) |
|
241 | 241 |
|
242 | 242 |
return env |
243 | 243 |
|
... | ... | |
263 | 263 |
self.instance = self.cfg.GetInstanceInfo(instance_name) |
264 | 264 |
assert self.instance is not None, \ |
265 | 265 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
266 |
_CheckNodeOnline(self, self.instance.primary_node)
|
|
266 |
CheckNodeOnline(self, self.instance.primary_node) |
|
267 | 267 |
|
268 | 268 |
if (self.op.remove_instance and |
269 | 269 |
self.instance.admin_state == constants.ADMINST_UP and |
... | ... | |
272 | 272 |
" down before", errors.ECODE_STATE) |
273 | 273 |
|
274 | 274 |
if self.op.mode == constants.EXPORT_MODE_LOCAL: |
275 |
self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
|
|
275 |
self.op.target_node = ExpandNodeName(self.cfg, self.op.target_node) |
|
276 | 276 |
self.dst_node = self.cfg.GetNodeInfo(self.op.target_node) |
277 | 277 |
assert self.dst_node is not None |
278 | 278 |
|
279 |
_CheckNodeOnline(self, self.dst_node.name)
|
|
280 |
_CheckNodeNotDrained(self, self.dst_node.name)
|
|
279 |
CheckNodeOnline(self, self.dst_node.name) |
|
280 |
CheckNodeNotDrained(self, self.dst_node.name) |
|
281 | 281 |
|
282 | 282 |
self._cds = None |
283 | 283 |
self.dest_disk_info = None |
... | ... | |
293 | 293 |
len(self.instance.disks)), |
294 | 294 |
errors.ECODE_INVAL) |
295 | 295 |
|
296 |
cds = _GetClusterDomainSecret()
|
|
296 |
cds = GetClusterDomainSecret() |
|
297 | 297 |
|
298 | 298 |
# Check X509 key name |
299 | 299 |
try: |
... | ... | |
403 | 403 |
if activate_disks: |
404 | 404 |
# Activate the instance disks if we'exporting a stopped instance |
405 | 405 |
feedback_fn("Activating disks for %s" % instance.name) |
406 |
_StartInstanceDisks(self, instance, None)
|
|
406 |
StartInstanceDisks(self, instance, None) |
|
407 | 407 |
|
408 | 408 |
try: |
409 | 409 |
helper = masterd.instance.ExportInstanceHelper(self, feedback_fn, |
... | ... | |
422 | 422 |
msg = result.fail_msg |
423 | 423 |
if msg: |
424 | 424 |
feedback_fn("Failed to start instance: %s" % msg) |
425 |
_ShutdownInstanceDisks(self, instance)
|
|
425 |
ShutdownInstanceDisks(self, instance) |
|
426 | 426 |
raise errors.OpExecError("Could not start instance: %s" % msg) |
427 | 427 |
|
428 | 428 |
if self.op.mode == constants.EXPORT_MODE_LOCAL: |
... | ... | |
451 | 451 |
finally: |
452 | 452 |
if activate_disks: |
453 | 453 |
feedback_fn("Deactivating disks for %s" % instance.name) |
454 |
_ShutdownInstanceDisks(self, instance)
|
|
454 |
ShutdownInstanceDisks(self, instance) |
|
455 | 455 |
|
456 | 456 |
if not (compat.all(dresults) and fin_resu): |
457 | 457 |
failures = [] |
... | ... | |
470 | 470 |
# Remove instance if requested |
471 | 471 |
if self.op.remove_instance: |
472 | 472 |
feedback_fn("Removing instance %s" % instance.name) |
473 |
_RemoveInstance(self, feedback_fn, instance,
|
|
474 |
self.op.ignore_remove_failures)
|
|
473 |
RemoveInstance(self, feedback_fn, instance, |
|
474 |
self.op.ignore_remove_failures) |
|
475 | 475 |
|
476 | 476 |
if self.op.mode == constants.EXPORT_MODE_LOCAL: |
477 | 477 |
self._CleanupExports(feedback_fn) |
b/lib/cmdlib/base.py | ||
---|---|---|
28 | 28 |
from ganeti import locking |
29 | 29 |
from ganeti import query |
30 | 30 |
from ganeti import utils |
31 |
from ganeti.cmdlib.common import _ExpandInstanceName
|
|
31 |
from ganeti.cmdlib.common import ExpandInstanceName |
|
32 | 32 |
|
33 | 33 |
|
34 | 34 |
class ResultWithJobs: |
... | ... | |
319 | 319 |
else: |
320 | 320 |
assert locking.LEVEL_INSTANCE not in self.needed_locks, \ |
321 | 321 |
"_ExpandAndLockInstance called with instance-level locks set" |
322 |
self.op.instance_name = _ExpandInstanceName(self.cfg,
|
|
323 |
self.op.instance_name)
|
|
322 |
self.op.instance_name = ExpandInstanceName(self.cfg, |
|
323 |
self.op.instance_name) |
|
324 | 324 |
self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name |
325 | 325 |
|
326 | 326 |
def _LockInstancesNodes(self, primary_only=False, |
... | ... | |
444 | 444 |
raise NotImplementedError |
445 | 445 |
|
446 | 446 |
|
447 |
class _QueryBase:
|
|
447 |
class QueryBase: |
|
448 | 448 |
"""Base for query utility classes. |
449 | 449 |
|
450 | 450 |
""" |
b/lib/cmdlib/cluster.py | ||
---|---|---|
49 | 49 |
from ganeti import utils |
50 | 50 |
from ganeti import vcluster |
51 | 51 |
|
52 |
from ganeti.cmdlib.base import NoHooksLU, _QueryBase, LogicalUnit, \
|
|
52 |
from ganeti.cmdlib.base import NoHooksLU, QueryBase, LogicalUnit, \ |
|
53 | 53 |
ResultWithJobs |
54 |
from ganeti.cmdlib.common import _ShareAll, _RunPostHook, \ |
|
55 |
_ComputeAncillaryFiles, _RedistributeAncillaryFiles, _UploadHelper, \ |
|
56 |
_GetWantedInstances, _MergeAndVerifyHvState, _MergeAndVerifyDiskState, \ |
|
57 |
_GetUpdatedIPolicy, _ComputeNewInstanceViolations, _GetUpdatedParams, \ |
|
58 |
_CheckOSParams, _CheckHVParams, _AdjustCandidatePool, _CheckNodePVs, \ |
|
59 |
_ComputeIPolicyInstanceViolation, _AnnotateDiskParams, \ |
|
60 |
_SupportsOob |
|
54 |
from ganeti.cmdlib.common import ShareAll, RunPostHook, \ |
|
55 |
ComputeAncillaryFiles, RedistributeAncillaryFiles, UploadHelper, \ |
|
56 |
GetWantedInstances, MergeAndVerifyHvState, MergeAndVerifyDiskState, \ |
|
57 |
GetUpdatedIPolicy, ComputeNewInstanceViolations, GetUpdatedParams, \ |
|
58 |
CheckOSParams, CheckHVParams, AdjustCandidatePool, CheckNodePVs, \ |
|
59 |
ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob |
|
61 | 60 |
|
62 | 61 |
import ganeti.masterd.instance |
63 | 62 |
|
... | ... | |
99 | 98 |
REQ_BGL = False |
100 | 99 |
|
101 | 100 |
def CheckArguments(self): |
102 |
self.cq = _ClusterQuery(None, self.op.output_fields, False)
|
|
101 |
self.cq = ClusterQuery(None, self.op.output_fields, False) |
|
103 | 102 |
|
104 | 103 |
def ExpandNames(self): |
105 | 104 |
self.cq.ExpandNames(self) |
... | ... | |
164 | 163 |
master_params = self.cfg.GetMasterNetworkParameters() |
165 | 164 |
|
166 | 165 |
# Run post hooks on master node before it's removed |
167 |
_RunPostHook(self, master_params.name)
|
|
166 |
RunPostHook(self, master_params.name) |
|
168 | 167 |
|
169 | 168 |
ems = self.cfg.GetUseExternalMipScript() |
170 | 169 |
result = self.rpc.call_node_deactivate_master_ip(master_params.name, |
... | ... | |
204 | 203 |
return True |
205 | 204 |
|
206 | 205 |
|
207 |
class _ClusterQuery(_QueryBase):
|
|
206 |
class ClusterQuery(QueryBase):
|
|
208 | 207 |
FIELDS = query.CLUSTER_FIELDS |
209 | 208 |
|
210 | 209 |
#: Do not sort (there is only one item) |
... | ... | |
344 | 343 |
locking.LEVEL_NODE: locking.ALL_SET, |
345 | 344 |
locking.LEVEL_NODE_ALLOC: locking.ALL_SET, |
346 | 345 |
} |
347 |
self.share_locks = _ShareAll()
|
|
346 |
self.share_locks = ShareAll() |
|
348 | 347 |
|
349 | 348 |
def Exec(self, feedback_fn): |
350 | 349 |
"""Redistribute the configuration. |
351 | 350 |
|
352 | 351 |
""" |
353 | 352 |
self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn) |
354 |
_RedistributeAncillaryFiles(self)
|
|
353 |
RedistributeAncillaryFiles(self) |
|
355 | 354 |
|
356 | 355 |
|
357 | 356 |
class LUClusterRename(LogicalUnit): |
... | ... | |
426 | 425 |
node_list.remove(master_params.name) |
427 | 426 |
except ValueError: |
428 | 427 |
pass |
429 |
_UploadHelper(self, node_list, pathutils.SSH_KNOWN_HOSTS_FILE)
|
|
428 |
UploadHelper(self, node_list, pathutils.SSH_KNOWN_HOSTS_FILE) |
|
430 | 429 |
finally: |
431 | 430 |
master_params.ip = new_ip |
432 | 431 |
result = self.rpc.call_node_activate_master_ip(master_params.name, |
... | ... | |
447 | 446 |
|
448 | 447 |
def ExpandNames(self): |
449 | 448 |
if self.op.instances: |
450 |
self.wanted_names = _GetWantedInstances(self, self.op.instances)
|
|
449 |
self.wanted_names = GetWantedInstances(self, self.op.instances) |
|
451 | 450 |
# Not getting the node allocation lock as only a specific set of |
452 | 451 |
# instances (and their nodes) is going to be acquired |
453 | 452 |
self.needed_locks = { |
... | ... | |
633 | 632 |
locking.LEVEL_NODEGROUP: locking.ALL_SET, |
634 | 633 |
locking.LEVEL_NODE_ALLOC: locking.ALL_SET, |
635 | 634 |
} |
636 |
self.share_locks = _ShareAll()
|
|
635 |
self.share_locks = ShareAll() |
|
637 | 636 |
|
638 | 637 |
def BuildHooksEnv(self): |
639 | 638 |
"""Build hooks env. |
... | ... | |
727 | 726 |
constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM] |
728 | 727 |
|
729 | 728 |
if self.op.hv_state: |
730 |
new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
|
|
731 |
self.cluster.hv_state_static)
|
|
729 |
new_hv_state = MergeAndVerifyHvState(self.op.hv_state, |
|
730 |
self.cluster.hv_state_static) |
|
732 | 731 |
self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values)) |
733 | 732 |
for hv, values in new_hv_state.items()) |
734 | 733 |
|
735 | 734 |
if self.op.disk_state: |
736 |
new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state,
|
|
737 |
self.cluster.disk_state_static)
|
|
735 |
new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, |
|
736 |
self.cluster.disk_state_static) |
|
738 | 737 |
self.new_disk_state = \ |
739 | 738 |
dict((storage, dict((name, cluster.SimpleFillDiskState(values)) |
740 | 739 |
for name, values in svalues.items())) |
741 | 740 |
for storage, svalues in new_disk_state.items()) |
742 | 741 |
|
743 | 742 |
if self.op.ipolicy: |
744 |
self.new_ipolicy = _GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
|
|
745 |
group_policy=False)
|
|
743 |
self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy, |
|
744 |
group_policy=False) |
|
746 | 745 |
|
747 | 746 |
all_instances = self.cfg.GetAllInstancesInfo().values() |
748 | 747 |
violations = set() |
... | ... | |
752 | 751 |
for node in inst.all_nodes)]) |
753 | 752 |
new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy) |
754 | 753 |
ipol = masterd.instance.CalculateGroupIPolicy(cluster, group) |
755 |
new = _ComputeNewInstanceViolations(ipol,
|
|
756 |
new_ipolicy, instances, self.cfg)
|
|
754 |
new = ComputeNewInstanceViolations(ipol, |
|
755 |
new_ipolicy, instances, self.cfg) |
|
757 | 756 |
if new: |
758 | 757 |
violations.update(new) |
759 | 758 |
|
... | ... | |
831 | 830 |
if os_name not in self.new_osp: |
832 | 831 |
self.new_osp[os_name] = {} |
833 | 832 |
|
834 |
self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
|
|
835 |
use_none=True)
|
|
833 |
self.new_osp[os_name] = GetUpdatedParams(self.new_osp[os_name], osp, |
|
834 |
use_none=True) |
|
836 | 835 |
|
837 | 836 |
if not self.new_osp[os_name]: |
838 | 837 |
# we removed all parameters |
839 | 838 |
del self.new_osp[os_name] |
840 | 839 |
else: |
841 | 840 |
# check the parameter validity (remote check) |
842 |
_CheckOSParams(self, False, [self.cfg.GetMasterNode()],
|
|
843 |
os_name, self.new_osp[os_name])
|
|
841 |
CheckOSParams(self, False, [self.cfg.GetMasterNode()], |
|
842 |
os_name, self.new_osp[os_name]) |
|
844 | 843 |
|
845 | 844 |
# changes to the hypervisor list |
846 | 845 |
if self.op.enabled_hypervisors is not None: |
... | ... | |
868 | 867 |
hv_class = hypervisor.GetHypervisorClass(hv_name) |
869 | 868 |
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) |
870 | 869 |
hv_class.CheckParameterSyntax(hv_params) |
871 |
_CheckHVParams(self, node_list, hv_name, hv_params)
|
|
870 |
CheckHVParams(self, node_list, hv_name, hv_params) |
|
872 | 871 |
|
873 | 872 |
self._CheckDiskTemplateConsistency() |
874 | 873 |
|
... | ... | |
883 | 882 |
new_osp = objects.FillDict(cluster_defaults, hv_params) |
884 | 883 |
hv_class = hypervisor.GetHypervisorClass(hv_name) |
885 | 884 |
hv_class.CheckParameterSyntax(new_osp) |
886 |
_CheckHVParams(self, node_list, hv_name, new_osp)
|
|
885 |
CheckHVParams(self, node_list, hv_name, new_osp) |
|
887 | 886 |
|
888 | 887 |
if self.op.default_iallocator: |
889 | 888 |
alloc_script = utils.FindFile(self.op.default_iallocator, |
... | ... | |
963 | 962 |
if self.op.candidate_pool_size is not None: |
964 | 963 |
self.cluster.candidate_pool_size = self.op.candidate_pool_size |
965 | 964 |
# we need to update the pool size here, otherwise the save will fail |
966 |
_AdjustCandidatePool(self, [])
|
|
965 |
AdjustCandidatePool(self, []) |
|
967 | 966 |
|
968 | 967 |
if self.op.maintain_node_health is not None: |
969 | 968 |
if self.op.maintain_node_health and not constants.ENABLE_CONFD: |
... | ... | |
1242 | 1241 |
|
1243 | 1242 |
def ExpandNames(self): |
1244 | 1243 |
self.needed_locks = dict.fromkeys(locking.LEVELS, locking.ALL_SET) |
1245 |
self.share_locks = _ShareAll()
|
|
1244 |
self.share_locks = ShareAll() |
|
1246 | 1245 |
|
1247 | 1246 |
def CheckPrereq(self): |
1248 | 1247 |
"""Check prerequisites. |
... | ... | |
1399 | 1398 |
locking.LEVEL_NODE_ALLOC: locking.ALL_SET, |
1400 | 1399 |
} |
1401 | 1400 |
|
1402 |
self.share_locks = _ShareAll()
|
|
1401 |
self.share_locks = ShareAll() |
|
1403 | 1402 |
|
1404 | 1403 |
def DeclareLocks(self, level): |
1405 | 1404 |
if level == locking.LEVEL_NODE: |
... | ... | |
1607 | 1606 |
_ErrorIf(vgstatus, constants.CV_ENODELVM, node, vgstatus) |
1608 | 1607 |
|
1609 | 1608 |
# Check PVs |
1610 |
(errmsgs, pvminmax) = _CheckNodePVs(nresult, self._exclusive_storage)
|
|
1609 |
(errmsgs, pvminmax) = CheckNodePVs(nresult, self._exclusive_storage) |
|
1611 | 1610 |
for em in errmsgs: |
1612 | 1611 |
self._Error(constants.CV_ENODELVM, node, em) |
1613 | 1612 |
if pvminmax is not None: |
... | ... | |
1748 | 1747 |
cluster = self.cfg.GetClusterInfo() |
1749 | 1748 |
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, |
1750 | 1749 |
self.group_info) |
1751 |
err = _ComputeIPolicyInstanceViolation(ipolicy, inst_config, self.cfg)
|
|
1750 |
err = ComputeIPolicyInstanceViolation(ipolicy, inst_config, self.cfg) |
|
1752 | 1751 |
_ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err), |
1753 | 1752 |
code=self.ETYPE_WARNING) |
1754 | 1753 |
|
... | ... | |
2354 | 2353 |
# _AnnotateDiskParams makes already copies of the disks |
2355 | 2354 |
devonly = [] |
2356 | 2355 |
for (inst, dev) in disks: |
2357 |
(anno_disk,) = _AnnotateDiskParams(instanceinfo[inst], [dev], self.cfg)
|
|
2356 |
(anno_disk,) = AnnotateDiskParams(instanceinfo[inst], [dev], self.cfg) |
|
2358 | 2357 |
self.cfg.SetDiskID(anno_disk, nname) |
2359 | 2358 |
devonly.append(anno_disk) |
2360 | 2359 |
|
... | ... | |
2505 | 2504 |
# FIXME: verify OS list |
2506 | 2505 |
|
2507 | 2506 |
# File verification |
2508 |
filemap = _ComputeAncillaryFiles(cluster, False)
|
|
2507 |
filemap = ComputeAncillaryFiles(cluster, False) |
|
2509 | 2508 |
|
2510 | 2509 |
# do local checksums |
2511 | 2510 |
master_node = self.master_node = self.cfg.GetMasterNode() |
... | ... | |
2580 | 2579 |
# Gather OOB paths |
2581 | 2580 |
oob_paths = [] |
2582 | 2581 |
for node in self.all_node_info.values(): |
2583 |
path = _SupportsOob(self.cfg, node)
|
|
2582 |
path = SupportsOob(self.cfg, node) |
|
2584 | 2583 |
if path and path not in oob_paths: |
2585 | 2584 |
oob_paths.append(path) |
2586 | 2585 |
|
... | ... | |
2862 | 2861 |
REQ_BGL = False |
2863 | 2862 |
|
2864 | 2863 |
def ExpandNames(self): |
2865 |
self.share_locks = _ShareAll()
|
|
2864 |
self.share_locks = ShareAll() |
|
2866 | 2865 |
self.needed_locks = { |
2867 | 2866 |
locking.LEVEL_NODEGROUP: locking.ALL_SET, |
2868 | 2867 |
} |
b/lib/cmdlib/common.py | ||
---|---|---|
65 | 65 |
return full_name |
66 | 66 |
|
67 | 67 |
|
68 |
def _ExpandInstanceName(cfg, name):
|
|
68 |
def ExpandInstanceName(cfg, name): |
|
69 | 69 |
"""Wrapper over L{_ExpandItemName} for instance.""" |
70 | 70 |
return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance") |
71 | 71 |
|
72 | 72 |
|
73 |
def _ExpandNodeName(cfg, name):
|
|
73 |
def ExpandNodeName(cfg, name): |
|
74 | 74 |
"""Wrapper over L{_ExpandItemName} for nodes.""" |
75 | 75 |
return _ExpandItemName(cfg.ExpandNodeName, name, "Node") |
76 | 76 |
|
77 | 77 |
|
78 |
def _ShareAll():
|
|
78 |
def ShareAll(): |
|
79 | 79 |
"""Returns a dict declaring all lock levels shared. |
80 | 80 |
|
81 | 81 |
""" |
82 | 82 |
return dict.fromkeys(locking.LEVELS, 1) |
83 | 83 |
|
84 | 84 |
|
85 |
def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
|
|
85 |
def CheckNodeGroupInstances(cfg, group_uuid, owned_instances): |
|
86 | 86 |
"""Checks if the instances in a node group are still correct. |
87 | 87 |
|
88 | 88 |
@type cfg: L{config.ConfigWriter} |
... | ... | |
106 | 106 |
return wanted_instances |
107 | 107 |
|
108 | 108 |
|
109 |
def _GetWantedNodes(lu, nodes):
|
|
109 |
def GetWantedNodes(lu, nodes): |
|
110 | 110 |
"""Returns list of checked and expanded node names. |
111 | 111 |
|
112 | 112 |
@type lu: L{LogicalUnit} |
... | ... | |
119 | 119 |
|
120 | 120 |
""" |
121 | 121 |
if nodes: |
122 |
return [_ExpandNodeName(lu.cfg, name) for name in nodes]
|
|
122 |
return [ExpandNodeName(lu.cfg, name) for name in nodes] |
|
123 | 123 |
|
124 | 124 |
return utils.NiceSort(lu.cfg.GetNodeList()) |
125 | 125 |
|
126 | 126 |
|
127 |
def _GetWantedInstances(lu, instances):
|
|
127 |
def GetWantedInstances(lu, instances): |
|
128 | 128 |
"""Returns list of checked and expanded instance names. |
129 | 129 |
|
130 | 130 |
@type lu: L{LogicalUnit} |
... | ... | |
138 | 138 |
|
139 | 139 |
""" |
140 | 140 |
if instances: |
141 |
wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
|
|
141 |
wanted = [ExpandInstanceName(lu.cfg, name) for name in instances] |
|
142 | 142 |
else: |
143 | 143 |
wanted = utils.NiceSort(lu.cfg.GetInstanceList()) |
144 | 144 |
return wanted |
145 | 145 |
|
146 | 146 |
|
147 |
def _RunPostHook(lu, node_name):
|
|
147 |
def RunPostHook(lu, node_name): |
|
148 | 148 |
"""Runs the post-hook for an opcode on a single node. |
149 | 149 |
|
150 | 150 |
""" |
... | ... | |
156 | 156 |
node_name, err) |
157 | 157 |
|
158 | 158 |
|
159 |
def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
|
|
159 |
def RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True): |
|
160 | 160 |
"""Distribute additional files which are part of the cluster configuration. |
161 | 161 |
|
162 | 162 |
ConfigWriter takes care of distributing the config and ssconf files, but |
... | ... | |
189 | 189 |
|
190 | 190 |
# Gather file lists |
191 | 191 |
(files_all, _, files_mc, files_vm) = \ |
192 |
_ComputeAncillaryFiles(cluster, True)
|
|
192 |
ComputeAncillaryFiles(cluster, True) |
|
193 | 193 |
|
194 | 194 |
# Never re-distribute configuration file from here |
195 | 195 |
assert not (pathutils.CLUSTER_CONF_FILE in files_all or |
... | ... | |
204 | 204 |
# Upload the files |
205 | 205 |
for (node_list, files) in filemap: |
206 | 206 |
for fname in files: |
207 |
_UploadHelper(lu, node_list, fname)
|
|
207 |
UploadHelper(lu, node_list, fname) |
|
208 | 208 |
|
209 | 209 |
|
210 |
def _ComputeAncillaryFiles(cluster, redist):
|
|
210 |
def ComputeAncillaryFiles(cluster, redist): |
|
211 | 211 |
"""Compute files external to Ganeti which need to be consistent. |
212 | 212 |
|
213 | 213 |
@type redist: boolean |
... | ... | |
286 | 286 |
return (files_all, files_opt, files_mc, files_vm) |
287 | 287 |
|
288 | 288 |
|
289 |
def _UploadHelper(lu, nodes, fname):
|
|
289 |
def UploadHelper(lu, nodes, fname): |
|
290 | 290 |
"""Helper for uploading a file and showing warnings. |
291 | 291 |
|
292 | 292 |
""" |
... | ... | |
300 | 300 |
lu.LogWarning(msg) |
301 | 301 |
|
302 | 302 |
|
303 |
def _MergeAndVerifyHvState(op_input, obj_input):
|
|
303 |
def MergeAndVerifyHvState(op_input, obj_input): |
|
304 | 304 |
"""Combines the hv state from an opcode with the one of the object |
305 | 305 |
|
306 | 306 |
@param op_input: The input dict from the opcode |
... | ... | |
322 | 322 |
return None |
323 | 323 |
|
324 | 324 |
|
325 |
def _MergeAndVerifyDiskState(op_input, obj_input):
|
|
325 |
def MergeAndVerifyDiskState(op_input, obj_input): |
|
326 | 326 |
"""Combines the disk state from an opcode with the one of the object |
327 | 327 |
|
328 | 328 |
@param op_input: The input dict from the opcode |
... | ... | |
345 | 345 |
return None |
346 | 346 |
|
347 | 347 |
|
348 |
def _CheckOSParams(lu, required, nodenames, osname, osparams):
|
|
348 |
def CheckOSParams(lu, required, nodenames, osname, osparams): |
|
349 | 349 |
"""OS parameters validation. |
350 | 350 |
|
351 | 351 |
@type lu: L{LogicalUnit} |
... | ... | |
375 | 375 |
osname, node) |
376 | 376 |
|
377 | 377 |
|
378 |
def _CheckHVParams(lu, nodenames, hvname, hvparams):
|
|
378 |
def CheckHVParams(lu, nodenames, hvname, hvparams): |
|
379 | 379 |
"""Hypervisor parameter validation. |
380 | 380 |
|
381 | 381 |
This function abstract the hypervisor parameter validation to be |
... | ... | |
405 | 405 |
info.Raise("Hypervisor parameter validation failed on node %s" % node) |
406 | 406 |
|
407 | 407 |
|
408 |
def _AdjustCandidatePool(lu, exceptions):
|
|
408 |
def AdjustCandidatePool(lu, exceptions): |
|
409 | 409 |
"""Adjust the candidate pool after node operations. |
410 | 410 |
|
411 | 411 |
""" |
... | ... | |
421 | 421 |
(mc_now, mc_max)) |
422 | 422 |
|
423 | 423 |
|
424 |
def _CheckNodePVs(nresult, exclusive_storage):
|
|
424 |
def CheckNodePVs(nresult, exclusive_storage): |
|
425 | 425 |
"""Check node PVs. |
426 | 426 |
|
427 | 427 |
""" |
... | ... | |
475 | 475 |
return None |
476 | 476 |
|
477 | 477 |
|
478 |
def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
|
|
479 |
nic_count, disk_sizes, spindle_use,
|
|
480 |
disk_template,
|
|
481 |
_compute_fn=_ComputeMinMaxSpec):
|
|
478 |
def ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count, |
|
479 |
nic_count, disk_sizes, spindle_use, |
|
480 |
disk_template, |
|
481 |
_compute_fn=_ComputeMinMaxSpec): |
|
482 | 482 |
"""Verifies ipolicy against provided specs. |
483 | 483 |
|
484 | 484 |
@type ipolicy: dict |
... | ... | |
530 | 530 |
return ret + min_errs |
531 | 531 |
|
532 | 532 |
|
533 |
def _ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
|
|
534 |
_compute_fn=_ComputeIPolicySpecViolation):
|
|
533 |
def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg, |
|
534 |
_compute_fn=ComputeIPolicySpecViolation):
|
|
535 | 535 |
"""Compute if instance meets the specs of ipolicy. |
536 | 536 |
|
537 | 537 |
@type ipolicy: dict |
... | ... | |
541 | 541 |
@type cfg: L{config.ConfigWriter} |
542 | 542 |
@param cfg: Cluster configuration |
543 | 543 |
@param _compute_fn: The function to verify ipolicy (unittest only) |
544 |
@see: L{_ComputeIPolicySpecViolation}
|
|
544 |
@see: L{ComputeIPolicySpecViolation} |
|
545 | 545 |
|
546 | 546 |
""" |
547 | 547 |
be_full = cfg.GetClusterInfo().FillBE(instance) |
... | ... | |
569 | 569 |
|
570 | 570 |
""" |
571 | 571 |
return frozenset([inst.name for inst in instances |
572 |
if _ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
|
|
572 |
if ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)]) |
|
573 | 573 |
|
574 | 574 |
|
575 |
def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
|
|
575 |
def ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg): |
|
576 | 576 |
"""Computes a set of any instances that would violate the new ipolicy. |
577 | 577 |
|
578 | 578 |
@param old_ipolicy: The current (still in-place) ipolicy |
... | ... | |
588 | 588 |
_ComputeViolatingInstances(old_ipolicy, instances, cfg)) |
589 | 589 |
|
590 | 590 |
|
591 |
def _GetUpdatedParams(old_params, update_dict,
|
|
591 |
def GetUpdatedParams(old_params, update_dict, |
|
592 | 592 |
use_default=True, use_none=False): |
593 | 593 |
"""Return the new version of a parameter dictionary. |
594 | 594 |
|
... | ... | |
621 | 621 |
return params_copy |
622 | 622 |
|
623 | 623 |
|
624 |
def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
|
|
624 |
def GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False): |
|
625 | 625 |
"""Return the new version of an instance policy. |
626 | 626 |
|
627 | 627 |
@param group_policy: whether this policy applies to a group and thus |
... | ... | |
660 | 660 |
if group_policy: |
661 | 661 |
msg = "%s cannot appear in group instance specs" % key |
662 | 662 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL) |
663 |
ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
|
|
664 |
use_none=False, use_default=False)
|
|
663 |
ipolicy[key] = GetUpdatedParams(old_ipolicy.get(key, {}), value, |
|
664 |
use_none=False, use_default=False) |
|
665 | 665 |
utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES) |
666 | 666 |
else: |
667 | 667 |
# FIXME: we assume all others are lists; this should be redone |
... | ... | |
675 | 675 |
return ipolicy |
676 | 676 |
|
677 | 677 |
|
678 |
def _AnnotateDiskParams(instance, devs, cfg):
|
|
678 |
def AnnotateDiskParams(instance, devs, cfg): |
|
679 | 679 |
"""Little helper wrapper to the rpc annotation method. |
680 | 680 |
|
681 | 681 |
@param instance: The instance object |
... | ... | |
690 | 690 |
cfg.GetInstanceDiskParams(instance)) |
691 | 691 |
|
692 | 692 |
|
693 |
def _SupportsOob(cfg, node):
|
|
693 |
def SupportsOob(cfg, node): |
|
694 | 694 |
"""Tells if node supports OOB. |
695 | 695 |
|
696 | 696 |
@type cfg: L{config.ConfigWriter} |
... | ... | |
713 | 713 |
|
714 | 714 |
""" |
715 | 715 |
def fn(old, value): |
716 |
new = _GetUpdatedParams(old, value)
|
|
716 |
new = GetUpdatedParams(old, value) |
|
717 | 717 |
utils.ForceDictType(new, type_check) |
718 | 718 |
return new |
719 | 719 |
|
... | ... | |
738 | 738 |
return [name for name in nodenames if name not in vm_nodes] |
739 | 739 |
|
740 | 740 |
|
741 |
def _GetDefaultIAllocator(cfg, ialloc):
|
|
741 |
def GetDefaultIAllocator(cfg, ialloc): |
|
742 | 742 |
"""Decides on which iallocator to use. |
743 | 743 |
|
744 | 744 |
@type cfg: L{config.ConfigWriter} |
... | ... | |
761 | 761 |
return ialloc |
762 | 762 |
|
763 | 763 |
|
764 |
def _CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
|
|
765 |
cur_group_uuid):
|
|
764 |
def CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes, |
|
765 |
cur_group_uuid): |
|
766 | 766 |
"""Checks if node groups for locked instances are still correct. |
767 | 767 |
|
768 | 768 |
@type cfg: L{config.ConfigWriter} |
... | ... | |
781 | 781 |
assert owned_nodes.issuperset(inst.all_nodes), \ |
782 | 782 |
"Instance %s's nodes changed while we kept the lock" % name |
783 | 783 |
|
784 |
inst_groups = _CheckInstanceNodeGroups(cfg, name, owned_groups)
|
|
784 |
inst_groups = CheckInstanceNodeGroups(cfg, name, owned_groups) |
|
785 | 785 |
|
786 | 786 |
assert cur_group_uuid is None or cur_group_uuid in inst_groups, \ |
787 | 787 |
"Instance %s has no node in group %s" % (name, cur_group_uuid) |
788 | 788 |
|
789 | 789 |
|
790 |
def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups,
|
|
791 |
primary_only=False):
|
|
790 |
def CheckInstanceNodeGroups(cfg, instance_name, owned_groups, |
|
791 |
primary_only=False): |
|
792 | 792 |
"""Checks if the owned node groups are still correct for an instance. |
793 | 793 |
|
794 | 794 |
@type cfg: L{config.ConfigWriter} |
... | ... | |
816 | 816 |
return inst_groups |
817 | 817 |
|
818 | 818 |
|
819 |
def _LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
|
|
819 |
def LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes): |
|
820 | 820 |
"""Unpacks the result of change-group and node-evacuate iallocator requests. |
821 | 821 |
|
822 | 822 |
Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and |
... | ... | |
873 | 873 |
return op |
874 | 874 |
|
875 | 875 |
|
876 |
def _MapInstanceDisksToNodes(instances):
|
|
876 |
def MapInstanceDisksToNodes(instances): |
|
877 | 877 |
"""Creates a map from (node, volume) to instance name. |
878 | 878 |
|
879 | 879 |
@type instances: list of L{objects.Instance} |
... | ... | |
886 | 886 |
for vol in vols) |
887 | 887 |
|
888 | 888 |
|
889 |
def _CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels):
|
|
889 |
def CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels): |
|
890 | 890 |
"""Make sure that none of the given paramters is global. |
891 | 891 |
|
892 | 892 |
If a global parameter is found, an L{errors.OpPrereqError} exception is |
... | ... | |
915 | 915 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL) |
916 | 916 |
|
917 | 917 |
|
918 |
def _IsExclusiveStorageEnabledNode(cfg, node):
|
|
918 |
def IsExclusiveStorageEnabledNode(cfg, node): |
|
919 | 919 |
"""Whether exclusive_storage is in effect for the given node. |
920 | 920 |
|
921 | 921 |
@type cfg: L{config.ConfigWriter} |
... | ... | |
929 | 929 |
return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE] |
930 | 930 |
|
931 | 931 |
|
932 |
def _CheckInstanceState(lu, instance, req_states, msg=None):
|
|
932 |
def CheckInstanceState(lu, instance, req_states, msg=None): |
|
933 | 933 |
"""Ensure that an instance is in one of the required states. |
934 | 934 |
|
935 | 935 |
@param lu: the LU on behalf of which we make the check |
... | ... | |
960 | 960 |
" is down") |
961 | 961 |
|
962 | 962 |
|
963 |
def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
|
|
963 |
def CheckIAllocatorOrNode(lu, iallocator_slot, node_slot): |
|
964 | 964 |
"""Check the sanity of iallocator and node arguments and use the |
965 | 965 |
cluster-wide iallocator if appropriate. |
966 | 966 |
|
... | ... | |
996 | 996 |
" iallocator", errors.ECODE_INVAL) |
997 | 997 |
|
998 | 998 |
|
999 |
def _FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_name, prereq):
|
|
999 |
def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_name, prereq): |
|
1000 | 1000 |
faulty = [] |
1001 | 1001 |
|
1002 | 1002 |
for dev in instance.disks: |
... | ... | |
1015 | 1015 |
return faulty |
1016 | 1016 |
|
1017 | 1017 |
|
1018 |
def _CheckNodeOnline(lu, node, msg=None):
|
|
1018 |
def CheckNodeOnline(lu, node, msg=None): |
|
1019 | 1019 |
"""Ensure that a given node is online. |
1020 | 1020 |
|
1021 | 1021 |
@param lu: the LU on behalf of which we make the check |
b/lib/cmdlib/group.py | ||
---|---|---|
31 | 31 |
from ganeti import query |
32 | 32 |
from ganeti import utils |
33 | 33 |
from ganeti.masterd import iallocator |
34 |
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, _QueryBase, \
|
|
34 |
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase, \ |
|
35 | 35 |
ResultWithJobs |
36 |
from ganeti.cmdlib.common import _MergeAndVerifyHvState, \
|
|
37 |
_MergeAndVerifyDiskState, _GetWantedNodes, _GetUpdatedParams, \
|
|
38 |
_CheckNodeGroupInstances, _GetUpdatedIPolicy, \
|
|
39 |
_ComputeNewInstanceViolations, _GetDefaultIAllocator, _ShareAll, \
|
|
40 |
_CheckInstancesNodeGroups, _LoadNodeEvacResult, _MapInstanceDisksToNodes
|
|
36 |
from ganeti.cmdlib.common import MergeAndVerifyHvState, \ |
|
37 |
MergeAndVerifyDiskState, GetWantedNodes, GetUpdatedParams, \
|
|
38 |
CheckNodeGroupInstances, GetUpdatedIPolicy, \
|
|
39 |
ComputeNewInstanceViolations, GetDefaultIAllocator, ShareAll, \
|
|
40 |
CheckInstancesNodeGroups, LoadNodeEvacResult, MapInstanceDisksToNodes
|
|
41 | 41 |
|
42 | 42 |
import ganeti.masterd.instance |
43 | 43 |
|
... | ... | |
79 | 79 |
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES) |
80 | 80 |
|
81 | 81 |
if self.op.hv_state: |
82 |
self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
|
|
82 |
self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, None) |
|
83 | 83 |
else: |
84 | 84 |
self.new_hv_state = None |
85 | 85 |
|
86 | 86 |
if self.op.disk_state: |
87 |
self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
|
|
87 |
self.new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, None) |
|
88 | 88 |
else: |
89 | 89 |
self.new_disk_state = None |
90 | 90 |
|
... | ... | |
152 | 152 |
def ExpandNames(self): |
153 | 153 |
# These raise errors.OpPrereqError on their own: |
154 | 154 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
155 |
self.op.nodes = _GetWantedNodes(self, self.op.nodes)
|
|
155 |
self.op.nodes = GetWantedNodes(self, self.op.nodes) |
|
156 | 156 |
|
157 | 157 |
# We want to lock all the affected nodes and groups. We have readily |
158 | 158 |
# available the list of nodes, and the *destination* group. To gather the |
... | ... | |
276 | 276 |
list(previously_split_instances & all_split_instances)) |
277 | 277 |
|
278 | 278 |
|
279 |
class _GroupQuery(_QueryBase):
|
|
279 |
class GroupQuery(QueryBase):
|
|
280 | 280 |
FIELDS = query.GROUP_FIELDS |
281 | 281 |
|
282 | 282 |
def ExpandNames(self, lu): |
... | ... | |
363 | 363 |
REQ_BGL = False |
364 | 364 |
|
365 | 365 |
def CheckArguments(self): |
366 |
self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
|
|
366 |
self.gq = GroupQuery(qlang.MakeSimpleFilter("name", self.op.names), |
|
367 | 367 |
self.op.output_fields, False) |
368 | 368 |
|
369 | 369 |
def ExpandNames(self): |
... | ... | |
423 | 423 |
"""Updates and verifies disk parameters. |
424 | 424 |
|
425 | 425 |
""" |
426 |
new_params = _GetUpdatedParams(old, new)
|
|
426 |
new_params = GetUpdatedParams(old, new) |
|
427 | 427 |
utils.ForceDictType(new_params, constants.DISK_DT_TYPES) |
428 | 428 |
return new_params |
429 | 429 |
|
... | ... | |
434 | 434 |
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) |
435 | 435 |
|
436 | 436 |
# Check if locked instances are still correct |
437 |
_CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
|
|
437 |
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances) |
|
438 | 438 |
|
439 | 439 |
self.group = self.cfg.GetNodeGroup(self.group_uuid) |
440 | 440 |
cluster = self.cfg.GetClusterInfo() |
... | ... | |
444 | 444 |
(self.op.group_name, self.group_uuid)) |
445 | 445 |
|
446 | 446 |
if self.op.ndparams: |
447 |
new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
|
|
447 |
new_ndparams = GetUpdatedParams(self.group.ndparams, self.op.ndparams) |
|
448 | 448 |
utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES) |
449 | 449 |
self.new_ndparams = new_ndparams |
450 | 450 |
|
... | ... | |
467 | 467 |
errors.ECODE_INVAL) |
468 | 468 |
|
469 | 469 |
if self.op.hv_state: |
470 |
self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
|
|
471 |
self.group.hv_state_static)
|
|
470 |
self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, |
|
471 |
self.group.hv_state_static) |
|
472 | 472 |
|
473 | 473 |
if self.op.disk_state: |
474 | 474 |
self.new_disk_state = \ |
475 |
_MergeAndVerifyDiskState(self.op.disk_state,
|
|
476 |
self.group.disk_state_static)
|
|
475 |
MergeAndVerifyDiskState(self.op.disk_state, |
|
476 |
self.group.disk_state_static) |
|
477 | 477 |
|
478 | 478 |
if self.op.ipolicy: |
479 |
self.new_ipolicy = _GetUpdatedIPolicy(self.group.ipolicy,
|
|
480 |
self.op.ipolicy,
|
|
481 |
group_policy=True)
|
|
479 |
self.new_ipolicy = GetUpdatedIPolicy(self.group.ipolicy, |
|
480 |
self.op.ipolicy, |
|
481 |
group_policy=True) |
|
482 | 482 |
|
483 | 483 |
new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy) |
484 | 484 |
inst_filter = lambda inst: inst.name in owned_instances |
485 | 485 |
instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values() |
486 | 486 |
gmi = ganeti.masterd.instance |
487 | 487 |
violations = \ |
488 |
_ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
|
|
489 |
self.group),
|
|
490 |
new_ipolicy, instances, self.cfg)
|
|
488 |
ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster, |
|
489 |
self.group), |
|
490 |
new_ipolicy, instances, self.cfg) |
|
491 | 491 |
|
492 | 492 |
if violations: |
493 | 493 |
self.LogWarning("After the ipolicy change the following instances" |
... | ... | |
697 | 697 |
utils.CommaJoin(self.req_target_uuids)), |
698 | 698 |
errors.ECODE_INVAL) |
699 | 699 |
|
700 |
self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
|
|
700 |
self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator) |
|
701 | 701 |
|
702 |
self.share_locks = _ShareAll()
|
|
702 |
self.share_locks = ShareAll() |
|
703 | 703 |
self.needed_locks = { |
704 | 704 |
locking.LEVEL_INSTANCE: [], |
705 | 705 |
locking.LEVEL_NODEGROUP: [], |
... | ... | |
757 | 757 |
assert self.group_uuid in owned_groups |
758 | 758 |
|
759 | 759 |
# Check if locked instances are still correct |
760 |
_CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
|
|
760 |
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances) |
|
761 | 761 |
|
762 | 762 |
# Get instance information |
763 | 763 |
self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances)) |
764 | 764 |
|
765 | 765 |
# Check if node groups for locked instances are still correct |
766 |
_CheckInstancesNodeGroups(self.cfg, self.instances,
|
|
767 |
owned_groups, owned_nodes, self.group_uuid)
|
|
766 |
CheckInstancesNodeGroups(self.cfg, self.instances, |
|
767 |
owned_groups, owned_nodes, self.group_uuid) |
|
768 | 768 |
|
769 | 769 |
if self.req_target_uuids: |
770 | 770 |
# User requested specific target groups |
... | ... | |
816 | 816 |
(self.op.iallocator, ial.info), |
817 | 817 |
errors.ECODE_NORES) |
818 | 818 |
|
819 |
jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
|
|
819 |
jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False) |
|
820 | 820 |
|
821 | 821 |
self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s", |
822 | 822 |
len(jobs), self.op.group_name) |
... | ... | |
834 | 834 |
# Raises errors.OpPrereqError on its own if group can't be found |
835 | 835 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
836 | 836 |
|
837 |
self.share_locks = _ShareAll()
|
|
837 |
self.share_locks = ShareAll() |
|
838 | 838 |
self.needed_locks = { |
839 | 839 |
locking.LEVEL_INSTANCE: [], |
840 | 840 |
locking.LEVEL_NODEGROUP: [], |
... | ... | |
887 | 887 |
assert self.group_uuid in owned_groups |
888 | 888 |
|
889 | 889 |
# Check if locked instances are still correct |
890 |
_CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
|
|
890 |
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances) |
|
891 | 891 |
|
892 | 892 |
# Get instance information |
893 | 893 |
self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances)) |
894 | 894 |
|
895 | 895 |
# Check if node groups for locked instances are still correct |
896 |
_CheckInstancesNodeGroups(self.cfg, self.instances,
|
|
897 |
owned_groups, owned_nodes, self.group_uuid)
|
|
896 |
CheckInstancesNodeGroups(self.cfg, self.instances, |
|
897 |
owned_groups, owned_nodes, self.group_uuid) |
|
898 | 898 |
|
899 | 899 |
def Exec(self, feedback_fn): |
900 | 900 |
"""Verify integrity of cluster disks. |
... | ... | |
909 | 909 |
res_instances = set() |
910 | 910 |
res_missing = {} |
911 | 911 |
|
912 |
nv_dict = _MapInstanceDisksToNodes(
|
|
912 |
nv_dict = MapInstanceDisksToNodes( |
|
913 | 913 |
[inst for inst in self.instances.values() |
914 | 914 |
if inst.admin_state == constants.ADMINST_UP]) |
915 | 915 |
|
b/lib/cmdlib/instance.py | ||
---|---|---|
44 | 44 |
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs |
45 | 45 |
|
46 | 46 |
from ganeti.cmdlib.common import INSTANCE_DOWN, \ |
47 |
INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, _CheckNodeOnline, \
|
|
48 |
_ShareAll, _GetDefaultIAllocator, _CheckInstanceNodeGroups, \
|
|
49 |
_LoadNodeEvacResult, _CheckIAllocatorOrNode, _CheckParamsNotGlobal, \
|
|
50 |
_IsExclusiveStorageEnabledNode, _CheckHVParams, _CheckOSParams, \
|
|
51 |
_AnnotateDiskParams, _GetUpdatedParams, _ExpandInstanceName, \
|
|
52 |
_ComputeIPolicySpecViolation, _CheckInstanceState, _ExpandNodeName
|
|
53 |
from ganeti.cmdlib.instance_storage import _CreateDisks, \
|
|
54 |
_CheckNodesFreeDiskPerVG, _WipeDisks, _WaitForSync, \
|
|
55 |
_IsExclusiveStorageEnabledNodeName, _CreateSingleBlockDev, _ComputeDisks, \
|
|
56 |
_CheckRADOSFreeSpace, _ComputeDiskSizePerVG, _GenerateDiskTemplate, \
|
|
57 |
_CreateBlockDev, _StartInstanceDisks, _ShutdownInstanceDisks, \
|
|
58 |
_AssembleInstanceDisks
|
|
59 |
from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \
|
|
60 |
_GetClusterDomainSecret, _BuildInstanceHookEnv, _NICListToTuple, \
|
|
61 |
_NICToTuple, _CheckNodeNotDrained, _RemoveInstance, _CopyLockList, \
|
|
62 |
_ReleaseLocks, _CheckNodeVmCapable, _CheckTargetNodeIPolicy, \
|
|
63 |
_GetInstanceInfoText, _RemoveDisks, _CheckNodeFreeMemory, \
|
|
64 |
_CheckInstanceBridgesExist, _CheckNicsBridgesExist, _CheckNodeHasOS
|
|
47 |
INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \ |
|
48 |
ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
|
|
49 |
LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
|
|
50 |
IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
|
|
51 |
AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
|
|
52 |
ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
|
|
53 |
from ganeti.cmdlib.instance_storage import CreateDisks, \ |
|
54 |
CheckNodesFreeDiskPerVG, WipeDisks, WaitForSync, \
|
|
55 |
IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
|
|
56 |
CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
|
|
57 |
CreateBlockDev, StartInstanceDisks, ShutdownInstanceDisks, \
|
|
58 |
AssembleInstanceDisks |
|
59 |
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \ |
|
60 |
GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
|
|
61 |
NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
|
|
62 |
ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
|
|
63 |
GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
|
|
64 |
CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
|
|
65 | 65 |
|
66 | 66 |
import ganeti.masterd.instance |
67 | 67 |
|
68 | 68 |
|
69 |
#: Type description for changes as returned by L{ApplyContainerMods}'s |
|
69 |
#: Type description for changes as returned by L{_ApplyContainerMods}'s
|
|
70 | 70 |
#: callbacks |
71 | 71 |
_TApplyContModsCbChanges = \ |
72 | 72 |
ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([ |
... | ... | |
266 | 266 |
|
267 | 267 |
def _ComputeIPolicyInstanceSpecViolation( |
268 | 268 |
ipolicy, instance_spec, disk_template, |
269 |
_compute_fn=_ComputeIPolicySpecViolation):
|
|
269 |
_compute_fn=ComputeIPolicySpecViolation): |
|
270 | 270 |
"""Compute if instance specs meets the specs of ipolicy. |
271 | 271 |
|
272 | 272 |
@type ipolicy: dict |
... | ... | |
276 | 276 |
@type disk_template: string |
277 | 277 |
@param disk_template: the disk template of the instance |
278 | 278 |
@param _compute_fn: The function to verify ipolicy (unittest only) |
279 |
@see: L{_ComputeIPolicySpecViolation}
|
|
279 |
@see: L{ComputeIPolicySpecViolation} |
|
280 | 280 |
|
281 | 281 |
""" |
282 | 282 |
mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None) |
... | ... | |
411 | 411 |
opcodes.RequireSharedFileStorage() |
412 | 412 |
|
413 | 413 |
### Node/iallocator related checks |
414 |
_CheckIAllocatorOrNode(self, "iallocator", "pnode")
|
|
414 |
CheckIAllocatorOrNode(self, "iallocator", "pnode") |
|
415 | 415 |
|
416 | 416 |
if self.op.pnode is not None: |
417 | 417 |
if self.op.disk_template in constants.DTS_INT_MIRROR: |
... | ... | |
425 | 425 |
|
426 | 426 |
_CheckOpportunisticLocking(self.op) |
427 | 427 |
|
428 |
self._cds = _GetClusterDomainSecret()
|
|
428 |
self._cds = GetClusterDomainSecret() |
|
429 | 429 |
|
430 | 430 |
if self.op.mode == constants.INSTANCE_IMPORT: |
431 | 431 |
# On import force_variant must be True, because if we forced it at |
... | ... | |
521 | 521 |
self.opportunistic_locks[locking.LEVEL_NODE] = True |
522 | 522 |
self.opportunistic_locks[locking.LEVEL_NODE_RES] = True |
523 | 523 |
else: |
524 |
self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
|
|
524 |
self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode) |
|
525 | 525 |
nodelist = [self.op.pnode] |
526 | 526 |
if self.op.snode is not None: |
527 |
self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
|
|
527 |
self.op.snode = ExpandNodeName(self.cfg, self.op.snode) |
|
528 | 528 |
nodelist.append(self.op.snode) |
529 | 529 |
self.needed_locks[locking.LEVEL_NODE] = nodelist |
530 | 530 |
|
... | ... | |
545 | 545 |
" requires a source node option", |
546 | 546 |
errors.ECODE_INVAL) |
547 | 547 |
else: |
548 |
self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
|
|
548 |
self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node) |
|
549 | 549 |
if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET: |
550 | 550 |
self.needed_locks[locking.LEVEL_NODE].append(src_node) |
551 | 551 |
if not os.path.isabs(src_path): |
... | ... | |
553 | 553 |
utils.PathJoin(pathutils.EXPORT_DIR, src_path) |
554 | 554 |
|
555 | 555 |
self.needed_locks[locking.LEVEL_NODE_RES] = \ |
556 |
_CopyLockList(self.needed_locks[locking.LEVEL_NODE])
|
|
556 |
CopyLockList(self.needed_locks[locking.LEVEL_NODE]) |
|
557 | 557 |
|
558 | 558 |
def _RunAllocator(self): |
559 | 559 |
"""Run the allocator based on input opcode. |
... | ... | |
610 | 610 |
env["SRC_PATH"] = self.op.src_path |
611 | 611 |
env["SRC_IMAGES"] = self.src_images |
612 | 612 |
|
613 |
env.update(_BuildInstanceHookEnv(
|
|
613 |
env.update(BuildInstanceHookEnv( |
|
614 | 614 |
name=self.op.instance_name, |
615 | 615 |
primary_node=self.op.pnode, |
616 | 616 |
secondary_nodes=self.secondaries, |
... | ... | |
619 | 619 |
minmem=self.be_full[constants.BE_MINMEM], |
620 | 620 |
maxmem=self.be_full[constants.BE_MAXMEM], |
621 | 621 |
vcpus=self.be_full[constants.BE_VCPUS], |
622 |
nics=_NICListToTuple(self, self.nics),
|
|
622 |
nics=NICListToTuple(self, self.nics), |
|
623 | 623 |
disk_template=self.op.disk_template, |
624 | 624 |
disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE], |
625 | 625 |
d[constants.IDISK_MODE]) for d in self.disks], |
... | ... | |
669 | 669 |
raise errors.OpPrereqError("No export found for relative path %s" % |
670 | 670 |
src_path, errors.ECODE_INVAL) |
671 | 671 |
|
672 |
_CheckNodeOnline(self, src_node)
|
|
672 |
CheckNodeOnline(self, src_node) |
|
673 | 673 |
result = self.rpc.call_export_info(src_node, src_path) |
674 | 674 |
result.Raise("No export or invalid export found in dir %s" % src_path) |
675 | 675 |
|
... | ... | |
871 | 871 |
hv_type.CheckParameterSyntax(filled_hvp) |
872 | 872 |
self.hv_full = filled_hvp |
873 | 873 |
# check that we don't specify global parameters on an instance |
874 |
_CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
|
|
875 |
"instance", "cluster")
|
|
874 |
CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor", |
|
875 |
"instance", "cluster") |
|
876 | 876 |
|
877 | 877 |
# fill and remember the beparams dict |
878 | 878 |
self.be_full = _ComputeFullBeParams(self.op, cluster) |
... | ... | |
891 | 891 |
|
892 | 892 |
# disk checks/pre-build |
893 | 893 |
default_vg = self.cfg.GetVGName() |
894 |
self.disks = _ComputeDisks(self.op, default_vg)
|
|
894 |
self.disks = ComputeDisks(self.op, default_vg) |
|
895 | 895 |
|
896 | 896 |
if self.op.mode == constants.INSTANCE_IMPORT: |
897 | 897 |
disk_images = [] |
... | ... | |
941 | 941 |
|
942 | 942 |
# Release all unneeded node locks |
943 | 943 |
keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node]) |
944 |
_ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
|
|
945 |
_ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
|
|
946 |
_ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
|
|
944 |
ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks) |
|
945 |
ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks) |
|
946 |
ReleaseLocks(self, locking.LEVEL_NODE_ALLOC) |
|
947 | 947 |
|
948 | 948 |
assert (self.owned_locks(locking.LEVEL_NODE) == |
949 | 949 |
self.owned_locks(locking.LEVEL_NODE_RES)), \ |
... | ... | |
1010 | 1010 |
if self.op.snode == pnode.name: |
1011 | 1011 |
raise errors.OpPrereqError("The secondary node cannot be the" |
1012 | 1012 |
" primary node", errors.ECODE_INVAL) |
1013 |
_CheckNodeOnline(self, self.op.snode)
|
|
1014 |
_CheckNodeNotDrained(self, self.op.snode)
|
|
1015 |
_CheckNodeVmCapable(self, self.op.snode)
|
|
1013 |
CheckNodeOnline(self, self.op.snode) |
|
1014 |
CheckNodeNotDrained(self, self.op.snode) |
|
1015 |
CheckNodeVmCapable(self, self.op.snode) |
|
1016 | 1016 |
self.secondaries.append(self.op.snode) |
1017 | 1017 |
|
1018 | 1018 |
snode = self.cfg.GetNodeInfo(self.op.snode) |
... | ... | |
1026 | 1026 |
nodes = [pnode] |
1027 | 1027 |
if self.op.disk_template in constants.DTS_INT_MIRROR: |
1028 | 1028 |
nodes.append(snode) |
1029 |
has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
|
|
1029 |
has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n) |
|
1030 | 1030 |
if compat.any(map(has_es, nodes)): |
1031 | 1031 |
raise errors.OpPrereqError("Disk template %s not supported with" |
1032 | 1032 |
" exclusive storage" % self.op.disk_template, |
... | ... | |
1039 | 1039 |
# _CheckRADOSFreeSpace() is just a placeholder. |
1040 | 1040 |
# Any function that checks prerequisites can be placed here. |
1041 | 1041 |
# Check if there is enough space on the RADOS cluster. |
1042 |
_CheckRADOSFreeSpace()
|
|
1042 |
CheckRADOSFreeSpace() |
|
1043 | 1043 |
elif self.op.disk_template == constants.DT_EXT: |
1044 | 1044 |
# FIXME: Function that checks prereqs if needed |
1045 | 1045 |
pass |
1046 | 1046 |
else: |
1047 | 1047 |
# Check lv size requirements, if not adopting |
1048 |
req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
|
|
1049 |
_CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
|
|
1048 |
req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks) |
|
1049 |
CheckNodesFreeDiskPerVG(self, nodenames, req_sizes) |
|
1050 | 1050 |
|
1051 | 1051 |
elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data |
1052 | 1052 |
all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG], |
... | ... | |
1139 | 1139 |
(pnode.group, group_info.name, utils.CommaJoin(res))) |
1140 | 1140 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL) |
1141 | 1141 |
|
1142 |
_CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
|
|
1142 |
CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams) |
|
1143 | 1143 |
|
1144 |
_CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
|
|
1144 |
CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant) |
|
1145 | 1145 |
# check OS parameters (remotely) |
1146 |
_CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
|
|
1146 |
CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full) |
|
1147 | 1147 |
|
1148 |
_CheckNicsBridgesExist(self, self.nics, self.pnode.name)
|
|
1148 |
CheckNicsBridgesExist(self, self.nics, self.pnode.name) |
|
1149 | 1149 |
|
1150 | 1150 |
#TODO: _CheckExtParams (remotely) |
1151 | 1151 |
# Check parameters for extstorage |
... | ... | |
1153 | 1153 |
# memory check on primary node |
1154 | 1154 |
#TODO(dynmem): use MINMEM for checking |
1155 | 1155 |
if self.op.start: |
1156 |
_CheckNodeFreeMemory(self, self.pnode.name,
|
|
1157 |
"creating instance %s" % self.op.instance_name,
|
|
1158 |
self.be_full[constants.BE_MAXMEM],
|
|
1159 |
self.op.hypervisor)
|
|
1156 |
CheckNodeFreeMemory(self, self.pnode.name, |
|
1157 |
"creating instance %s" % self.op.instance_name, |
|
1158 |
self.be_full[constants.BE_MAXMEM], |
|
1159 |
self.op.hypervisor) |
|
1160 | 1160 |
|
1161 | 1161 |
self.dry_run_result = list(nodenames) |
1162 | 1162 |
|
... | ... | |
1183 | 1183 |
# has no disks yet (we are generating them right here). |
1184 | 1184 |
node = self.cfg.GetNodeInfo(pnode_name) |
1185 | 1185 |
nodegroup = self.cfg.GetNodeGroup(node.group) |
1186 |
disks = _GenerateDiskTemplate(self,
|
|
1187 |
self.op.disk_template,
|
|
1188 |
instance, pnode_name,
|
|
1189 |
self.secondaries,
|
|
1190 |
self.disks,
|
|
1191 |
self.instance_file_storage_dir,
|
|
1192 |
self.op.file_driver,
|
|
1193 |
0,
|
|
1194 |
feedback_fn,
|
|
1195 |
self.cfg.GetGroupDiskParams(nodegroup))
|
|
1186 |
disks = GenerateDiskTemplate(self, |
|
1187 |
self.op.disk_template, |
|
1188 |
instance, pnode_name, |
|
1189 |
self.secondaries, |
|
1190 |
self.disks, |
|
1191 |
self.instance_file_storage_dir, |
|
1192 |
self.op.file_driver, |
|
1193 |
0, |
|
1194 |
feedback_fn, |
|
1195 |
self.cfg.GetGroupDiskParams(nodegroup)) |
|
1196 | 1196 |
|
1197 | 1197 |
iobj = objects.Instance(name=instance, os=self.op.os_type, |
1198 | 1198 |
primary_node=pnode_name, |
... | ... | |
1226 | 1226 |
else: |
1227 | 1227 |
feedback_fn("* creating instance disks...") |
1228 | 1228 |
try: |
1229 |
_CreateDisks(self, iobj)
|
|
1229 |
CreateDisks(self, iobj) |
|
1230 | 1230 |
except errors.OpExecError: |
1231 | 1231 |
self.LogWarning("Device creation failed") |
1232 | 1232 |
self.cfg.ReleaseDRBDMinors(instance) |
... | ... | |
1242 | 1242 |
|
1243 | 1243 |
if self.op.mode == constants.INSTANCE_IMPORT: |
1244 | 1244 |
# Release unused nodes |
1245 |
_ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
|
|
1245 |
ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node]) |
|
1246 | 1246 |
else: |
1247 | 1247 |
# Release all nodes |
1248 |
_ReleaseLocks(self, locking.LEVEL_NODE)
|
|
1248 |
ReleaseLocks(self, locking.LEVEL_NODE) |
|
1249 | 1249 |
|
1250 | 1250 |
disk_abort = False |
1251 | 1251 |
if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks: |
1252 | 1252 |
feedback_fn("* wiping instance disks...") |
1253 | 1253 |
try: |
1254 |
_WipeDisks(self, iobj)
|
|
1254 |
WipeDisks(self, iobj) |
|
1255 | 1255 |
except errors.OpExecError, err: |
1256 | 1256 |
logging.exception("Wiping disks failed") |
1257 | 1257 |
self.LogWarning("Wiping instance disks failed (%s)", err) |
... | ... | |
1261 | 1261 |
# Something is already wrong with the disks, don't do anything else |
1262 | 1262 |
pass |
1263 | 1263 |
elif self.op.wait_for_sync: |
1264 |
disk_abort = not _WaitForSync(self, iobj)
|
|
1264 |
disk_abort = not WaitForSync(self, iobj) |
|
1265 | 1265 |
elif iobj.disk_template in constants.DTS_INT_MIRROR: |
1266 | 1266 |
# make sure the disks are not degraded (still sync-ing is ok) |
1267 | 1267 |
feedback_fn("* checking mirrors status") |
1268 |
disk_abort = not _WaitForSync(self, iobj, oneshot=True)
|
|
1268 |
disk_abort = not WaitForSync(self, iobj, oneshot=True) |
|
1269 | 1269 |
else: |
1270 | 1270 |
disk_abort = False |
1271 | 1271 |
|
1272 | 1272 |
if disk_abort: |
1273 |
_RemoveDisks(self, iobj)
|
|
1273 |
RemoveDisks(self, iobj) |
|
1274 | 1274 |
self.cfg.RemoveInstance(iobj.name) |
1275 | 1275 |
# Make sure the instance lock gets removed |
1276 | 1276 |
self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name |
... | ... | |
1278 | 1278 |
" this instance") |
1279 | 1279 |
|
1280 | 1280 |
# Release all node resource locks |
1281 |
_ReleaseLocks(self, locking.LEVEL_NODE_RES)
|
|
1281 |
ReleaseLocks(self, locking.LEVEL_NODE_RES) |
|
1282 | 1282 |
|
1283 | 1283 |
if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks: |
1284 | 1284 |
# we need to set the disks ID to the primary node, since the |
... | ... | |
1421 | 1421 |
This runs on master, primary and secondary nodes of the instance. |
1422 | 1422 |
|
1423 | 1423 |
""" |
1424 |
env = _BuildInstanceHookEnvByObject(self, self.instance)
|
|
1424 |
env = BuildInstanceHookEnvByObject(self, self.instance) |
|
1425 | 1425 |
env["INSTANCE_NEW_NAME"] = self.op.new_name |
1426 | 1426 |
return env |
1427 | 1427 |
|
... | ... | |
1438 | 1438 |
This checks that the instance is in the cluster and is not running. |
1439 | 1439 |
|
1440 | 1440 |
""" |
1441 |
self.op.instance_name = _ExpandInstanceName(self.cfg,
|
|
1442 |
self.op.instance_name)
|
|
1441 |
self.op.instance_name = ExpandInstanceName(self.cfg, |
|
1442 |
self.op.instance_name) |
|
1443 | 1443 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
1444 | 1444 |
assert instance is not None |
1445 |
_CheckNodeOnline(self, instance.primary_node)
|
|
1446 |
_CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
|
|
1447 |
msg="cannot rename")
|
|
1445 |
CheckNodeOnline(self, instance.primary_node) |
|
1446 |
CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING, |
|
1447 |
msg="cannot rename") |
|
1448 | 1448 |
self.instance = instance |
1449 | 1449 |
|
1450 | 1450 |
new_name = self.op.new_name |
... | ... | |
1496 | 1496 |
(inst.primary_node, old_file_storage_dir, |
1497 | 1497 |
new_file_storage_dir)) |
1498 | 1498 |
|
1499 |
_StartInstanceDisks(self, inst, None)
|
|
1499 |
StartInstanceDisks(self, inst, None) |
|
1500 | 1500 |
# update info on disks |
1501 |
info = _GetInstanceInfoText(inst)
|
|
1501 |
info = GetInstanceInfoText(inst) |
|
1502 | 1502 |
for (idx, disk) in enumerate(inst.disks): |
1503 | 1503 |
for node in inst.all_nodes: |
1504 | 1504 |
self.cfg.SetDiskID(disk, node) |
... | ... | |
1516 | 1516 |
(inst.name, inst.primary_node, msg)) |
1517 | 1517 |
self.LogWarning(msg) |
1518 | 1518 |
finally: |
1519 |
_ShutdownInstanceDisks(self, inst)
|
|
1519 |
ShutdownInstanceDisks(self, inst) |
|
1520 | 1520 |
|
1521 | 1521 |
return inst.name |
1522 | 1522 |
|
... | ... | |
1541 | 1541 |
elif level == locking.LEVEL_NODE_RES: |
1542 | 1542 |
# Copy node locks |
1543 | 1543 |
self.needed_locks[locking.LEVEL_NODE_RES] = \ |
1544 |
_CopyLockList(self.needed_locks[locking.LEVEL_NODE])
|
|
1544 |
CopyLockList(self.needed_locks[locking.LEVEL_NODE]) |
|
1545 | 1545 |
|
1546 | 1546 |
def BuildHooksEnv(self): |
1547 | 1547 |
"""Build hooks env. |
... | ... | |
1549 | 1549 |
This runs on master, primary and secondary nodes of the instance. |
1550 | 1550 |
|
1551 | 1551 |
""" |
1552 |
env = _BuildInstanceHookEnvByObject(self, self.instance)
|
|
1552 |
env = BuildInstanceHookEnvByObject(self, self.instance) |
|
1553 | 1553 |
env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout |
1554 | 1554 |
return env |
1555 | 1555 |
|
... | ... | |
1597 | 1597 |
self.owned_locks(locking.LEVEL_NODE)), \ |
1598 | 1598 |
"Not owning correct locks" |
1599 | 1599 |
|
1600 |
_RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
|
|
1600 |
RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures) |
|
1601 | 1601 |
|
1602 | 1602 |
|
1603 | 1603 |
class LUInstanceMove(LogicalUnit): |
... | ... | |
1610 | 1610 |
|
1611 | 1611 |
def ExpandNames(self): |
1612 | 1612 |
self._ExpandAndLockInstance() |
1613 |
target_node = _ExpandNodeName(self.cfg, self.op.target_node)
|
|
1613 |
target_node = ExpandNodeName(self.cfg, self.op.target_node) |
|
1614 | 1614 |
self.op.target_node = target_node |
1615 | 1615 |
self.needed_locks[locking.LEVEL_NODE] = [target_node] |
1616 | 1616 |
self.needed_locks[locking.LEVEL_NODE_RES] = [] |
... | ... | |
1622 | 1622 |
elif level == locking.LEVEL_NODE_RES: |
1623 | 1623 |
# Copy node locks |
1624 | 1624 |
self.needed_locks[locking.LEVEL_NODE_RES] = \ |
1625 |
_CopyLockList(self.needed_locks[locking.LEVEL_NODE])
|
|
1625 |
CopyLockList(self.needed_locks[locking.LEVEL_NODE]) |
|
1626 | 1626 |
|
1627 | 1627 |
def BuildHooksEnv(self): |
1628 | 1628 |
"""Build hooks env. |
... | ... | |
1634 | 1634 |
"TARGET_NODE": self.op.target_node, |
1635 | 1635 |
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout, |
1636 | 1636 |
} |
1637 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance))
|
|
1637 |
env.update(BuildInstanceHookEnvByObject(self, self.instance)) |
|
1638 | 1638 |
return env |
1639 | 1639 |
|
1640 | 1640 |
def BuildHooksNodes(self): |
... | ... | |
1680 | 1680 |
raise errors.OpPrereqError("Instance disk %d has a complex layout," |
1681 | 1681 |
" cannot copy" % idx, errors.ECODE_STATE) |
1682 | 1682 |
|
1683 |
_CheckNodeOnline(self, target_node)
|
|
1684 |
_CheckNodeNotDrained(self, target_node)
|
|
1685 |
_CheckNodeVmCapable(self, target_node)
|
|
1683 |
CheckNodeOnline(self, target_node) |
|
1684 |
CheckNodeNotDrained(self, target_node) |
|
1685 |
CheckNodeVmCapable(self, target_node) |
|
1686 | 1686 |
cluster = self.cfg.GetClusterInfo() |
1687 | 1687 |
group_info = self.cfg.GetNodeGroup(node.group) |
1688 | 1688 |
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) |
1689 |
_CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
|
|
1690 |
ignore=self.op.ignore_ipolicy)
|
|
1689 |
CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg, |
|
1690 |
ignore=self.op.ignore_ipolicy) |
|
1691 | 1691 |
|
1692 | 1692 |
if instance.admin_state == constants.ADMINST_UP: |
1693 | 1693 |
# check memory requirements on the secondary node |
Also available in: Unified diff