Revision 5ae4945a lib/cmdlib.py
b/lib/cmdlib.py | ||
---|---|---|
1208 | 1208 |
disk_sizes, spindle_use) |
1209 | 1209 |
|
1210 | 1210 |
|
1211 |
def _ComputeIPolicyInstanceSpecViolation(ipolicy, instance_spec,
|
|
1212 |
_compute_fn=_ComputeIPolicySpecViolation):
|
|
1211 |
def _ComputeIPolicyInstanceSpecViolation( |
|
1212 |
ipolicy, instance_spec, _compute_fn=_ComputeIPolicySpecViolation):
|
|
1213 | 1213 |
"""Compute if instance specs meets the specs of ipolicy. |
1214 | 1214 |
|
1215 | 1215 |
@type ipolicy: dict |
... | ... | |
1920 | 1920 |
# Always depend on global verification |
1921 | 1921 |
depends_fn = lambda: [(-len(jobs), [])] |
1922 | 1922 |
|
1923 |
jobs.extend([opcodes.OpClusterVerifyGroup(group_name=group, |
|
1924 |
ignore_errors=self.op.ignore_errors, |
|
1925 |
depends=depends_fn())] |
|
1926 |
for group in groups) |
|
1923 |
jobs.extend( |
|
1924 |
[opcodes.OpClusterVerifyGroup(group_name=group, |
|
1925 |
ignore_errors=self.op.ignore_errors, |
|
1926 |
depends=depends_fn())] |
|
1927 |
for group in groups) |
|
1927 | 1928 |
|
1928 | 1929 |
# Fix up all parameters |
1929 | 1930 |
for op in itertools.chain(*jobs): # pylint: disable=W0142 |
... | ... | |
2645 | 2646 |
|
2646 | 2647 |
if drbd_helper: |
2647 | 2648 |
helper_result = nresult.get(constants.NV_DRBDHELPER, None) |
2648 |
test = (helper_result == None)
|
|
2649 |
test = (helper_result is None)
|
|
2649 | 2650 |
_ErrorIf(test, constants.CV_ENODEDRBDHELPER, node, |
2650 | 2651 |
"no drbd usermode helper returned") |
2651 | 2652 |
if helper_result: |
... | ... | |
3572 | 3573 |
res_instances = set() |
3573 | 3574 |
res_missing = {} |
3574 | 3575 |
|
3575 |
nv_dict = _MapInstanceDisksToNodes([inst
|
|
3576 |
for inst in self.instances.values()
|
|
3577 |
if inst.admin_state == constants.ADMINST_UP])
|
|
3576 |
nv_dict = _MapInstanceDisksToNodes( |
|
3577 |
[inst for inst in self.instances.values()
|
|
3578 |
if inst.admin_state == constants.ADMINST_UP]) |
|
3578 | 3579 |
|
3579 | 3580 |
if nv_dict: |
3580 | 3581 |
nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) & |
... | ... | |
4330 | 4331 |
files_mc.add(constants.CLUSTER_CONF_FILE) |
4331 | 4332 |
|
4332 | 4333 |
# Files which should only be on VM-capable nodes |
4333 |
files_vm = set(filename |
|
4334 |
files_vm = set( |
|
4335 |
filename |
|
4334 | 4336 |
for hv_name in cluster.enabled_hypervisors |
4335 | 4337 |
for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[0]) |
4336 | 4338 |
|
4337 |
files_opt |= set(filename |
|
4339 |
files_opt |= set( |
|
4340 |
filename |
|
4338 | 4341 |
for hv_name in cluster.enabled_hypervisors |
4339 | 4342 |
for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[1]) |
4340 | 4343 |
|
... | ... | |
4757 | 4760 |
type(result.payload)) |
4758 | 4761 |
|
4759 | 4762 |
if self.op.command in [ |
4760 |
constants.OOB_POWER_ON,
|
|
4761 |
constants.OOB_POWER_OFF,
|
|
4762 |
constants.OOB_POWER_CYCLE,
|
|
4763 |
]:
|
|
4763 |
constants.OOB_POWER_ON, |
|
4764 |
constants.OOB_POWER_OFF, |
|
4765 |
constants.OOB_POWER_CYCLE, |
|
4766 |
]: |
|
4764 | 4767 |
if result.payload is not None: |
4765 | 4768 |
errs.append("%s is expected to not return payload but got '%s'" % |
4766 | 4769 |
(self.op.command, result.payload)) |
... | ... | |
5636 | 5639 |
if not newbie_singlehomed: |
5637 | 5640 |
# check reachability from my secondary ip to newbie's secondary ip |
5638 | 5641 |
if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT, |
5639 |
source=myself.secondary_ip): |
|
5642 |
source=myself.secondary_ip):
|
|
5640 | 5643 |
raise errors.OpPrereqError("Node secondary ip not reachable by TCP" |
5641 | 5644 |
" based ping to node daemon port", |
5642 | 5645 |
errors.ECODE_ENVIRON) |
... | ... | |
5814 | 5817 |
errors.ECODE_INVAL) |
5815 | 5818 |
|
5816 | 5819 |
# Boolean value that tells us whether we might be demoting from MC |
5817 |
self.might_demote = (self.op.master_candidate == False or
|
|
5818 |
self.op.offline == True or
|
|
5819 |
self.op.drained == True or
|
|
5820 |
self.op.master_capable == False)
|
|
5820 |
self.might_demote = (self.op.master_candidate is False or
|
|
5821 |
self.op.offline is True or
|
|
5822 |
self.op.drained is True or
|
|
5823 |
self.op.master_capable is False)
|
|
5821 | 5824 |
|
5822 | 5825 |
if self.op.secondary_ip: |
5823 | 5826 |
if not netutils.IP4Address.IsValid(self.op.secondary_ip): |
... | ... | |
5918 | 5921 |
" it a master candidate" % node.name, |
5919 | 5922 |
errors.ECODE_STATE) |
5920 | 5923 |
|
5921 |
if self.op.vm_capable == False:
|
|
5924 |
if self.op.vm_capable is False:
|
|
5922 | 5925 |
(ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name) |
5923 | 5926 |
if ipri or isec: |
5924 | 5927 |
raise errors.OpPrereqError("Node %s hosts instances, cannot unset" |
... | ... | |
5944 | 5947 |
|
5945 | 5948 |
# Check for ineffective changes |
5946 | 5949 |
for attr in self._FLAGS: |
5947 |
if (getattr(self.op, attr) == False and getattr(node, attr) == False):
|
|
5950 |
if (getattr(self.op, attr) is False and getattr(node, attr) is False):
|
|
5948 | 5951 |
self.LogInfo("Ignoring request to unset flag %s, already unset", attr) |
5949 | 5952 |
setattr(self.op, attr, None) |
5950 | 5953 |
|
... | ... | |
5954 | 5957 |
# TODO: We might query the real power state if it supports OOB |
5955 | 5958 |
if _SupportsOob(self.cfg, node): |
5956 | 5959 |
if self.op.offline is False and not (node.powered or |
5957 |
self.op.powered == True):
|
|
5960 |
self.op.powered is True):
|
|
5958 | 5961 |
raise errors.OpPrereqError(("Node %s needs to be turned on before its" |
5959 | 5962 |
" offline status can be reset") % |
5960 | 5963 |
self.op.node_name, errors.ECODE_STATE) |
... | ... | |
5965 | 5968 |
errors.ECODE_STATE) |
5966 | 5969 |
|
5967 | 5970 |
# If we're being deofflined/drained, we'll MC ourself if needed |
5968 |
if (self.op.drained == False or self.op.offline == False or
|
|
5971 |
if (self.op.drained is False or self.op.offline is False or
|
|
5969 | 5972 |
(self.op.master_capable and not node.master_capable)): |
5970 | 5973 |
if _DecideSelfPromotion(self): |
5971 | 5974 |
self.op.master_candidate = True |
5972 | 5975 |
self.LogInfo("Auto-promoting node to master candidate") |
5973 | 5976 |
|
5974 | 5977 |
# If we're no longer master capable, we'll demote ourselves from MC |
5975 |
if self.op.master_capable == False and node.master_candidate:
|
|
5978 |
if self.op.master_capable is False and node.master_candidate:
|
|
5976 | 5979 |
self.LogInfo("Demoting from master candidate") |
5977 | 5980 |
self.op.master_candidate = False |
5978 | 5981 |
|
... | ... | |
8279 | 8282 |
ial.required_nodes), errors.ECODE_FAULT) |
8280 | 8283 |
self.target_node = ial.result[0] |
8281 | 8284 |
self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s", |
8282 |
self.instance_name, self.lu.op.iallocator, |
|
8283 |
utils.CommaJoin(ial.result)) |
|
8285 |
self.instance_name, self.lu.op.iallocator,
|
|
8286 |
utils.CommaJoin(ial.result))
|
|
8284 | 8287 |
|
8285 | 8288 |
def _WaitUntilSync(self): |
8286 | 8289 |
"""Poll with custom rpc for disk sync. |
... | ... | |
8450 | 8453 |
# Don't raise an exception here, as we stil have to try to revert the |
8451 | 8454 |
# disk status, even if this step failed. |
8452 | 8455 |
|
8453 |
abort_result = self.rpc.call_instance_finalize_migration_src(source_node,
|
|
8454 |
instance, False, self.live)
|
|
8456 |
abort_result = self.rpc.call_instance_finalize_migration_src( |
|
8457 |
source_node, instance, False, self.live)
|
|
8455 | 8458 |
abort_msg = abort_result.fail_msg |
8456 | 8459 |
if abort_msg: |
8457 | 8460 |
logging.error("Aborting migration failed on source node %s: %s", |
... | ... | |
8885 | 8888 |
} |
8886 | 8889 |
|
8887 | 8890 |
|
8888 |
def _GenerateDiskTemplate(lu, template_name, instance_name, primary_node, |
|
8889 |
secondary_nodes, disk_info, file_storage_dir, file_driver, base_index, |
|
8890 |
feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage, |
|
8891 |
_req_shr_file_storage=opcodes.RequireSharedFileStorage): |
|
8891 |
def _GenerateDiskTemplate( |
|
8892 |
lu, template_name, instance_name, primary_node, secondary_nodes, |
|
8893 |
disk_info, file_storage_dir, file_driver, base_index, |
|
8894 |
feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage, |
|
8895 |
_req_shr_file_storage=opcodes.RequireSharedFileStorage): |
|
8892 | 8896 |
"""Generate the entire disk layout for a given template type. |
8893 | 8897 |
|
8894 | 8898 |
""" |
... | ... | |
9825 | 9829 |
enabled_hvs = cluster.enabled_hypervisors |
9826 | 9830 |
if self.op.hypervisor not in enabled_hvs: |
9827 | 9831 |
raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the" |
9828 |
" cluster (%s)" % (self.op.hypervisor,
|
|
9829 |
",".join(enabled_hvs)), |
|
9832 |
" cluster (%s)" % |
|
9833 |
(self.op.hypervisor, ",".join(enabled_hvs)),
|
|
9830 | 9834 |
errors.ECODE_STATE) |
9831 | 9835 |
|
9832 | 9836 |
# Check tag validity |
... | ... | |
10547 | 10551 |
assert not self.needed_locks[locking.LEVEL_NODE] |
10548 | 10552 |
|
10549 | 10553 |
# Lock member nodes of all locked groups |
10550 |
self.needed_locks[locking.LEVEL_NODE] = [node_name |
|
10551 |
for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP) |
|
10552 |
for node_name in self.cfg.GetNodeGroup(group_uuid).members] |
|
10554 |
self.needed_locks[locking.LEVEL_NODE] = \ |
|
10555 |
[node_name |
|
10556 |
for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP) |
|
10557 |
for node_name in self.cfg.GetNodeGroup(group_uuid).members] |
|
10553 | 10558 |
else: |
10554 | 10559 |
self._LockInstancesNodes() |
10555 | 10560 |
elif level == locking.LEVEL_NODE_RES: |
... | ... | |
12337 | 12342 |
if self.op.hvparams: |
12338 | 12343 |
_CheckGlobalHvParams(self.op.hvparams) |
12339 | 12344 |
|
12340 |
self.op.disks = \ |
|
12341 |
self._UpgradeDiskNicMods("disk", self.op.disks, |
|
12342 |
opcodes.OpInstanceSetParams.TestDiskModifications) |
|
12343 |
self.op.nics = \ |
|
12344 |
self._UpgradeDiskNicMods("NIC", self.op.nics, |
|
12345 |
opcodes.OpInstanceSetParams.TestNicModifications) |
|
12345 |
self.op.disks = self._UpgradeDiskNicMods( |
|
12346 |
"disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications) |
|
12347 |
self.op.nics = self._UpgradeDiskNicMods( |
|
12348 |
"NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications) |
|
12346 | 12349 |
|
12347 | 12350 |
# Check disk modifications |
12348 | 12351 |
self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES, |
... | ... | |
12642 | 12645 |
" free memory information" % pnode) |
12643 | 12646 |
elif instance_info.fail_msg: |
12644 | 12647 |
self.warn.append("Can't get instance runtime information: %s" % |
12645 |
instance_info.fail_msg) |
|
12648 |
instance_info.fail_msg)
|
|
12646 | 12649 |
else: |
12647 | 12650 |
if instance_info.payload: |
12648 | 12651 |
current_mem = int(instance_info.payload["memory"]) |
... | ... | |
12694 | 12697 |
self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])): |
12695 | 12698 |
raise errors.OpPrereqError("Instance %s must have memory between %d" |
12696 | 12699 |
" and %d MB of memory unless --force is" |
12697 |
" given" % (instance.name, |
|
12700 |
" given" % |
|
12701 |
(instance.name, |
|
12698 | 12702 |
self.be_proposed[constants.BE_MINMEM], |
12699 | 12703 |
self.be_proposed[constants.BE_MAXMEM]), |
12700 | 12704 |
errors.ECODE_INVAL) |
... | ... | |
15202 | 15206 |
ht.TItems([ht.TNonEmptyString, |
15203 | 15207 |
ht.TNonEmptyString, |
15204 | 15208 |
ht.TListOf(ht.TNonEmptyString), |
15205 |
]))) |
|
15209 |
])))
|
|
15206 | 15210 |
_NEVAC_FAILED = \ |
15207 | 15211 |
ht.TListOf(ht.TAnd(ht.TIsLength(2), |
15208 | 15212 |
ht.TItems([ht.TNonEmptyString, |
15209 | 15213 |
ht.TMaybeString, |
15210 |
]))) |
|
15214 |
])))
|
|
15211 | 15215 |
_NEVAC_RESULT = ht.TAnd(ht.TIsLength(3), |
15212 | 15216 |
ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST])) |
15213 | 15217 |
|
Also available in: Unified diff