"""Module implementing the master-side code."""
-# pylint: disable-msg=W0201,C0302
+# pylint: disable=W0201,C0302
# W0201 since most LU attributes are defined in CheckPrereq or similar
# functions
-# C0302: since we have waaaay to many lines in this module
+# C0302: since we have waaaay too many lines in this module
import os
import os.path
from ganeti import opcodes
from ganeti import ht
-import ganeti.masterd.instance # pylint: disable-msg=W0611
+import ganeti.masterd.instance # pylint: disable=W0611
class ResultWithJobs:
# Used to force good behavior when calling helper functions
self.recalculate_locks = {}
# logging
- self.Log = processor.Log # pylint: disable-msg=C0103
- self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
- self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
- self.LogStep = processor.LogStep # pylint: disable-msg=C0103
+ self.Log = processor.Log # pylint: disable=C0103
+ self.LogWarning = processor.LogWarning # pylint: disable=C0103
+ self.LogInfo = processor.LogInfo # pylint: disable=C0103
+ self.LogStep = processor.LogStep # pylint: disable=C0103
# support for dry-run
self.dry_run_result = None
# support for generic debug attribute
"""
# API must be kept, thus we ignore the unused argument and could
# be a function warnings
- # pylint: disable-msg=W0613,R0201
+ # pylint: disable=W0613,R0201
return lu_result
def _ExpandAndLockInstance(self):
del self.recalculate_locks[locking.LEVEL_NODE]
-class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
+class NoHooksLU(LogicalUnit): # pylint: disable=W0223
"""Simple LU which runs no hooks.
This LU is intended as a parent for other LogicalUnits which will
try:
hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
except:
- # pylint: disable-msg=W0702
+ # pylint: disable=W0702
lu.LogWarning("Errors occurred running hooks on %s" % node_name)
}
if override:
args.update(override)
- return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
+ return _BuildInstanceHookEnv(**args) # pylint: disable=W0142
def _AdjustCandidatePool(lu, exceptions):
# Run post hooks on master node before it's removed
_RunPostHook(self, master)
- result = self.rpc.call_node_stop_master(master, False)
+ result = self.rpc.call_node_deactivate_master_ip(master)
result.Raise("Could not disable the master role")
return master
try:
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
utils.ReadFile(filename))
- except Exception, err: # pylint: disable-msg=W0703
+ except Exception, err: # pylint: disable=W0703
return (LUClusterVerifyConfig.ETYPE_ERROR,
"Failed to load X509 certificate %s: %s" % (filename, err))
if args:
msg = msg % args
# then format the whole message
- if self.op.error_codes: # This is a mix-in. pylint: disable-msg=E1101
+ if self.op.error_codes: # This is a mix-in. pylint: disable=E1101
msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
else:
if item:
item = ""
msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
# and finally report it via the feedback_fn
- self._feedback_fn(" - %s" % msg) # Mix-in. pylint: disable-msg=E1101
+ self._feedback_fn(" - %s" % msg) # Mix-in. pylint: disable=E1101
def _ErrorIf(self, cond, *args, **kwargs):
"""Log an error message if the passed condition is True.
"""
cond = (bool(cond)
- or self.op.debug_simulate_errors) # pylint: disable-msg=E1101
+ or self.op.debug_simulate_errors) # pylint: disable=E1101
if cond:
self._Error(*args, **kwargs)
# do not mark the operation as failed for WARN cases only
for group in groups)
# Fix up all parameters
- for op in itertools.chain(*jobs): # pylint: disable-msg=W0142
+ for op in itertools.chain(*jobs): # pylint: disable=W0142
op.debug_simulate_errors = self.op.debug_simulate_errors
op.verbose = self.op.verbose
op.error_codes = self.op.error_codes
"""
node = ninfo.name
- _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+ _ErrorIf = self._ErrorIf # pylint: disable=C0103
# main result, nresult should be a non-empty dict
test = not nresult or not isinstance(nresult, dict)
"""
node = ninfo.name
- _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+ _ErrorIf = self._ErrorIf # pylint: disable=C0103
ntime = nresult.get(constants.NV_TIME, None)
try:
return
node = ninfo.name
- _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+ _ErrorIf = self._ErrorIf # pylint: disable=C0103
# checks vg existence and size > 20G
vglist = nresult.get(constants.NV_VGLIST, None)
return
node = ninfo.name
- _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+ _ErrorIf = self._ErrorIf # pylint: disable=C0103
missing = nresult.get(constants.NV_BRIDGES, None)
test = not isinstance(missing, list)
"""
node = ninfo.name
- _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+ _ErrorIf = self._ErrorIf # pylint: disable=C0103
test = constants.NV_NODELIST not in nresult
_ErrorIf(test, self.ENODESSH, node,
available on the instance's node.
"""
- _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+ _ErrorIf = self._ErrorIf # pylint: disable=C0103
node_current = instanceconfig.primary_node
node_vol_should = {}
"""
node = ninfo.name
- _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+ _ErrorIf = self._ErrorIf # pylint: disable=C0103
if drbd_helper:
helper_result = nresult.get(constants.NV_DRBDHELPER, None)
"""
node = ninfo.name
- _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+ _ErrorIf = self._ErrorIf # pylint: disable=C0103
remote_os = nresult.get(constants.NV_OSLIST, None)
test = (not isinstance(remote_os, list) or
"""
node = ninfo.name
- _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+ _ErrorIf = self._ErrorIf # pylint: disable=C0103
assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
"""
node = ninfo.name
- _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+ _ErrorIf = self._ErrorIf # pylint: disable=C0103
nimg.lvm_fail = True
lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
"""
node = ninfo.name
- _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+ _ErrorIf = self._ErrorIf # pylint: disable=C0103
# try to read free memory (from the hypervisor)
hv_info = nresult.get(constants.NV_HVINFO, None)
list of tuples (success, payload)
"""
- _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+ _ErrorIf = self._ErrorIf # pylint: disable=C0103
node_disks = {}
node_disks_devonly = {}
"""Verify integrity of the node group, performing various test on nodes.
"""
- # This method has too many local variables. pylint: disable-msg=R0914
+ # This method has too many local variables. pylint: disable=R0914
feedback_fn("* Verifying group '%s'" % self.group_info.name)
if not self.my_node_names:
return True
self.bad = False
- _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+ _ErrorIf = self._ErrorIf # pylint: disable=C0103
verbose = self.op.verbose
self._feedback_fn = feedback_fn
self._ErrorIf(test, self.ENODEHOOKS, node_name,
"Communication failure in hooks execution: %s", msg)
if res.offline or msg:
- # No need to investigate payload if node is offline or gave an error.
- # override manually lu_result here as _ErrorIf only
- # overrides self.bad
- lu_result = 1
+ # No need to investigate payload if node is offline or gave
+ # an error.
continue
for script, hkr, output in res.payload:
test = hkr == constants.HKR_FAIL
if test:
output = self._HOOKS_INDENT_RE.sub(" ", output)
feedback_fn("%s" % output)
- lu_result = 0
+ lu_result = False
return lu_result
# shutdown the master IP
master = self.cfg.GetMasterNode()
- result = self.rpc.call_node_stop_master(master, False)
+ result = self.rpc.call_node_deactivate_master_ip(master)
result.Raise("Could not disable the master role")
try:
pass
_UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
finally:
- result = self.rpc.call_node_start_master(master, False, False)
+ result = self.rpc.call_node_activate_master_ip(master)
msg = result.fail_msg
if msg:
self.LogWarning("Could not re-enable the master role on"
master = self.cfg.GetMasterNode()
feedback_fn("Shutting down master ip on the current netdev (%s)" %
self.cluster.master_netdev)
- result = self.rpc.call_node_stop_master(master, False)
+ result = self.rpc.call_node_deactivate_master_ip(master)
result.Raise("Could not disable the master ip")
feedback_fn("Changing master_netdev from %s to %s" %
(self.cluster.master_netdev, self.op.master_netdev))
if self.op.master_netdev:
feedback_fn("Starting the master ip on the new master netdev (%s)" %
self.op.master_netdev)
- result = self.rpc.call_node_start_master(master, False, False)
+ result = self.rpc.call_node_activate_master_ip(master)
if result.fail_msg:
self.LogWarning("Could not re-enable the master ip on"
" the master, please restart manually: %s",
if not redist:
files_all.update(constants.ALL_CERT_FILES)
files_all.update(ssconf.SimpleStore().GetFileList())
+ else:
+ # we need to ship at least the RAPI certificate
+ files_all.add(constants.RAPI_CERT_FILE)
if cluster.modify_etc_hosts:
files_all.add(constants.ETC_HOSTS)
_RedistributeAncillaryFiles(self)
+class LUClusterActivateMasterIp(NoHooksLU):
+ """Activate the master IP on the master node.
+
+ """
+ def Exec(self, feedback_fn):
+ """Activate the master IP.
+
+ """
+ master = self.cfg.GetMasterNode()
+ self.rpc.call_node_activate_master_ip(master)
+
+
+class LUClusterDeactivateMasterIp(NoHooksLU):
+ """Deactivate the master IP on the master node.
+
+ """
+ def Exec(self, feedback_fn):
+ """Deactivate the master IP.
+
+ """
+ master = self.cfg.GetMasterNode()
+ self.rpc.call_node_deactivate_master_ip(master)
+
+
def _WaitForSync(lu, instance, disks=None, oneshot=False):
"""Sleep and poll for an instance's disk to sync.
"""Logical unit for querying nodes.
"""
- # pylint: disable-msg=W0142
+ # pylint: disable=W0142
REQ_BGL = False
def CheckArguments(self):
for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
elif level == locking.LEVEL_NODE:
- lu._LockInstancesNodes() # pylint: disable-msg=W0212
+ lu._LockInstancesNodes() # pylint: disable=W0212
@staticmethod
def _CheckGroupLocks(lu):
"""Query for resources/items of a certain kind.
"""
- # pylint: disable-msg=W0142
+ # pylint: disable=W0142
REQ_BGL = False
def CheckArguments(self):
"""Query for resources/items of a certain kind.
"""
- # pylint: disable-msg=W0142
+ # pylint: disable=W0142
REQ_BGL = False
def CheckArguments(self):
# later in the procedure; this also means that if the re-add
# fails, we are left with a non-offlined, broken node
if self.op.readd:
- new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
+ new_node.drained = new_node.offline = False # pylint: disable=W0201
self.LogInfo("Readding a node, the offline/drained flags were reset")
# if we demote the node, we do cleanup later in the procedure
new_node.master_candidate = self.master_candidate
errors.ECODE_NORES)
+def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
+ """Checks if nodes have enough physical CPUs
+
+ This function checks if all given nodes have the needed number of
+ physical CPUs. In case any node has less CPUs or we cannot get the
+ information from the node, this function raises an OpPrereqError
+ exception.
+
+ @type lu: C{LogicalUnit}
+ @param lu: a logical unit from which we get configuration data
+ @type nodenames: C{list}
+ @param nodenames: the list of node names to check
+ @type requested: C{int}
+ @param requested: the minimum acceptable number of physical CPUs
+ @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
+ or we cannot check the node
+
+ """
+ nodeinfo = lu.rpc.call_node_info(nodenames, None, hypervisor_name)
+ for node in nodenames:
+ info = nodeinfo[node]
+ info.Raise("Cannot get current information from node %s" % node,
+ prereq=True, ecode=errors.ECODE_ENVIRON)
+ num_cpus = info.payload.get("cpu_total", None)
+ if not isinstance(num_cpus, int):
+ raise errors.OpPrereqError("Can't compute the number of physical CPUs"
+ " on node %s, result was '%s'" %
+ (node, num_cpus), errors.ECODE_ENVIRON)
+ if requested > num_cpus:
+ raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
+ "required" % (node, num_cpus, requested),
+ errors.ECODE_NORES)
+
+
class LUInstanceStartup(LogicalUnit):
"""Starts an instance.
"""Logical unit for querying instances.
"""
- # pylint: disable-msg=W0142
+ # pylint: disable=W0142
REQ_BGL = False
def CheckArguments(self):
@ivar shutdown_timeout: In case of failover timeout of the shutdown
"""
+
+ # Constants
+ _MIGRATION_POLL_INTERVAL = 1 # seconds
+ _MIGRATION_FEEDBACK_INTERVAL = 10 # seconds
+
def __init__(self, lu, instance_name, cleanup=False,
failover=False, fallback=False,
ignore_consistency=False,
"""
instance = self.instance
target_node = self.target_node
+ source_node = self.source_node
migration_info = self.migration_info
- abort_result = self.rpc.call_finalize_migration(target_node,
- instance,
- migration_info,
- False)
+ abort_result = self.rpc.call_instance_finalize_migration_dst(target_node,
+ instance,
+ migration_info,
+ False)
abort_msg = abort_result.fail_msg
if abort_msg:
logging.error("Aborting migration failed on target node %s: %s",
# Don't raise an exception here, as we stil have to try to revert the
# disk status, even if this step failed.
+ abort_result = self.rpc.call_instance_finalize_migration_src(source_node,
+ instance, False, self.live)
+ abort_msg = abort_result.fail_msg
+ if abort_msg:
+ logging.error("Aborting migration failed on source node %s: %s",
+ source_node, abort_msg)
+
def _ExecMigration(self):
"""Migrate an instance.
target_node = self.target_node
source_node = self.source_node
+ # Check for hypervisor version mismatch and warn the user.
+ nodeinfo = self.rpc.call_node_info([source_node, target_node],
+ None, self.instance.hypervisor)
+ src_info = nodeinfo[source_node]
+ dst_info = nodeinfo[target_node]
+
+ if ((constants.HV_NODEINFO_KEY_VERSION in src_info.payload) and
+ (constants.HV_NODEINFO_KEY_VERSION in dst_info.payload)):
+ src_version = src_info.payload[constants.HV_NODEINFO_KEY_VERSION]
+ dst_version = dst_info.payload[constants.HV_NODEINFO_KEY_VERSION]
+ if src_version != dst_version:
+ self.feedback_fn("* warning: hypervisor version mismatch between"
+ " source (%s) and target (%s) node" %
+ (src_version, dst_version))
+
self.feedback_fn("* checking disk consistency between source and target")
for dev in instance.disks:
if not _CheckDiskConsistency(self.lu, dev, target_node, False):
raise errors.OpExecError("Could not migrate instance %s: %s" %
(instance.name, msg))
+ self.feedback_fn("* starting memory transfer")
+ last_feedback = time.time()
+ while True:
+ result = self.rpc.call_instance_get_migration_status(source_node,
+ instance)
+ msg = result.fail_msg
+ ms = result.payload # MigrationStatus instance
+ if msg or (ms.status in constants.HV_MIGRATION_FAILED_STATUSES):
+ logging.error("Instance migration failed, trying to revert"
+ " disk status: %s", msg)
+ self.feedback_fn("Migration failed, aborting")
+ self._AbortMigration()
+ self._RevertDiskStatus()
+ raise errors.OpExecError("Could not migrate instance %s: %s" %
+ (instance.name, msg))
+
+ if result.payload.status != constants.HV_MIGRATION_ACTIVE:
+ self.feedback_fn("* memory transfer complete")
+ break
+
+ if (utils.TimeoutExpired(last_feedback,
+ self._MIGRATION_FEEDBACK_INTERVAL) and
+ ms.transferred_ram is not None):
+ mem_progress = 100 * float(ms.transferred_ram) / float(ms.total_ram)
+ self.feedback_fn("* memory transfer progress: %.2f %%" % mem_progress)
+ last_feedback = time.time()
+
+ time.sleep(self._MIGRATION_POLL_INTERVAL)
+
+ result = self.rpc.call_instance_finalize_migration_src(source_node,
+ instance,
+ True,
+ self.live)
+ msg = result.fail_msg
+ if msg:
+ logging.error("Instance migration succeeded, but finalization failed"
+ " on the source node: %s", msg)
+ raise errors.OpExecError("Could not finalize instance migration: %s" %
+ msg)
+
instance.primary_node = target_node
+
# distribute new instance config to the other nodes
self.cfg.Update(instance, self.feedback_fn)
- result = self.rpc.call_finalize_migration(target_node,
- instance,
- migration_info,
- True)
+ result = self.rpc.call_instance_finalize_migration_dst(target_node,
+ instance,
+ migration_info,
+ True)
msg = result.fail_msg
if msg:
- logging.error("Instance migration succeeded, but finalization failed:"
- " %s", msg)
+ logging.error("Instance migration succeeded, but finalization failed"
+ " on the target node: %s", msg)
raise errors.OpExecError("Could not finalize instance migration: %s" %
msg)
if einfo.has_option(constants.INISECT_INS, "disk_template"):
self.op.disk_template = einfo.get(constants.INISECT_INS,
"disk_template")
+ if self.op.disk_template not in constants.DISK_TEMPLATES:
+ raise errors.OpPrereqError("Disk template specified in configuration"
+ " file is not one of the allowed values:"
+ " %s" % " ".join(constants.DISK_TEMPLATES))
else:
raise errors.OpPrereqError("No disk template specified and the export"
" is missing the disk_template information",
errors.ECODE_INVAL)
if not self.op.disks:
- if einfo.has_option(constants.INISECT_INS, "disk_count"):
- disks = []
- # TODO: import the disk iv_name too
- for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
+ disks = []
+ # TODO: import the disk iv_name too
+ for idx in range(constants.MAX_DISKS):
+ if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
disks.append({constants.IDISK_SIZE: disk_sz})
- self.op.disks = disks
- else:
+ self.op.disks = disks
+ if not disks and self.op.disk_template != constants.DT_DISKLESS:
raise errors.OpPrereqError("No disk info specified and the export"
" is missing the disk information",
errors.ECODE_INVAL)
- if (not self.op.nics and
- einfo.has_option(constants.INISECT_INS, "nic_count")):
+ if not self.op.nics:
nics = []
- for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
- ndict = {}
- for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
- v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
- ndict[name] = v
- nics.append(ndict)
+ for idx in range(constants.MAX_NICS):
+ if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
+ ndict = {}
+ for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
+ v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
+ ndict[name] = v
+ nics.append(ndict)
+ else:
+ break
self.op.nics = nics
if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
joinargs.append(self.op.instance_name)
- # pylint: disable-msg=W0142
+ # pylint: disable=W0142
self.instance_file_storage_dir = utils.PathJoin(*joinargs)
def CheckPrereq(self):
raise errors.OpPrereqError("Cluster does not support lvm-based"
" instances", errors.ECODE_STATE)
- if self.op.hypervisor is None:
+ if (self.op.hypervisor is None or
+ self.op.hypervisor == constants.VALUE_AUTO):
self.op.hypervisor = self.cfg.GetHypervisorType()
cluster = self.cfg.GetClusterInfo()
_CheckGlobalHvParams(self.op.hvparams)
# fill and remember the beparams dict
+ default_beparams = cluster.beparams[constants.PP_DEFAULT]
+ for param, value in self.op.beparams.iteritems():
+ if value == constants.VALUE_AUTO:
+ self.op.beparams[param] = default_beparams[param]
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
self.be_full = cluster.SimpleFillBE(self.op.beparams)
for idx, nic in enumerate(self.op.nics):
nic_mode_req = nic.get(constants.INIC_MODE, None)
nic_mode = nic_mode_req
- if nic_mode is None:
+ if nic_mode is None or nic_mode == constants.VALUE_AUTO:
nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
# in routed mode, for the first nic, the default ip is 'auto'
# Build nic parameters
link = nic.get(constants.INIC_LINK, None)
+ if link == constants.VALUE_AUTO:
+ link = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_LINK]
nicparams = {}
if nic_mode_req:
- nicparams[constants.NIC_MODE] = nic_mode_req
+ nicparams[constants.NIC_MODE] = nic_mode
if link:
nicparams[constants.NIC_LINK] = link
self.disks.append(new_disk)
if self.op.mode == constants.INSTANCE_IMPORT:
-
- # Check that the new instance doesn't have less disks than the export
- instance_disks = len(self.disks)
- export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
- if instance_disks < export_disks:
- raise errors.OpPrereqError("Not enough disks to import."
- " (instance: %d, export: %d)" %
- (instance_disks, export_disks),
- errors.ECODE_INVAL)
-
disk_images = []
- for idx in range(export_disks):
+ for idx in range(len(self.disks)):
option = "disk%d_dump" % idx
if export_info.has_option(constants.INISECT_INS, option):
# FIXME: are the old os-es, disk sizes, etc. useful?
self.src_images = disk_images
old_name = export_info.get(constants.INISECT_INS, "name")
- try:
- exp_nic_count = export_info.getint(constants.INISECT_INS, "nic_count")
- except (TypeError, ValueError), err:
- raise errors.OpPrereqError("Invalid export file, nic_count is not"
- " an integer: %s" % str(err),
- errors.ECODE_STATE)
if self.op.instance_name == old_name:
for idx, nic in enumerate(self.nics):
- if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
+ if nic.mac == constants.VALUE_AUTO:
nic_mac_ini = "nic%d_mac" % idx
nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
feedback_fn("* running the instance OS create scripts...")
# FIXME: pass debug option from opcode to backend
- result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
- self.op.debug_level)
+ os_add_result = \
+ self.rpc.call_instance_os_add(pnode_name, iobj, False,
+ self.op.debug_level)
if pause_sync:
feedback_fn("* resuming disk sync")
result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
logging.warn("resume-sync of instance %s for disk %d failed",
instance, idx)
- result.Raise("Could not add os for instance %s"
- " on node %s" % (instance, pnode_name))
+ os_add_result.Raise("Could not add os for instance %s"
+ " on node %s" % (instance, pnode_name))
elif self.op.mode == constants.INSTANCE_IMPORT:
feedback_fn("* running the instance OS import scripts...")
self.lu.LogWarning("Can't remove old LV: %s" % msg,
hint="remove unused LVs manually")
- def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable-msg=W0613
+ def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
"""Replace a disk on the primary or secondary for DRBD 8.
The algorithm for replace is quite complicated:
"""
steps_total = 6
+ pnode = self.instance.primary_node
+
# Step: check device activation
self.lu.LogStep(1, steps_total, "Check device existence")
self._CheckDisksExistence([self.instance.primary_node])
" soon as possible"))
self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
- result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
- self.node_secondary_ip,
- self.instance.disks)\
- [self.instance.primary_node]
+ result = self.rpc.call_drbd_disconnect_net([pnode], self.node_secondary_ip,
+ self.instance.disks)[pnode]
msg = result.fail_msg
if msg:
# local check
hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
_CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
- self.hv_new = hv_new # the new actual values
+ self.hv_proposed = self.hv_new = hv_new # the new actual values
self.hv_inst = i_hvdict # the new dict (without defaults)
else:
+ self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
+ instance.hvparams)
self.hv_new = self.hv_inst = {}
# beparams processing
use_none=True)
utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
be_new = cluster.SimpleFillBE(i_bedict)
- self.be_new = be_new # the new actual values
+ self.be_proposed = self.be_new = be_new # the new actual values
self.be_inst = i_bedict # the new dict (without defaults)
else:
self.be_new = self.be_inst = {}
+ self.be_proposed = cluster.SimpleFillBE(instance.beparams)
be_old = cluster.FillBE(instance)
+ # CPU param validation -- checking every time a paramtere is
+ # changed to cover all cases where either CPU mask or vcpus have
+ # changed
+ if (constants.BE_VCPUS in self.be_proposed and
+ constants.HV_CPU_MASK in self.hv_proposed):
+ cpu_list = \
+ utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
+ # Verify mask is consistent with number of vCPUs. Can skip this
+ # test if only 1 entry in the CPU mask, which means same mask
+ # is applied to all vCPUs.
+ if (len(cpu_list) > 1 and
+ len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
+ raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
+ " CPU mask [%s]" %
+ (self.be_proposed[constants.BE_VCPUS],
+ self.hv_proposed[constants.HV_CPU_MASK]),
+ errors.ECODE_INVAL)
+
+ # Only perform this test if a new CPU mask is given
+ if constants.HV_CPU_MASK in self.hv_new:
+ # Calculate the largest CPU number requested
+ max_requested_cpu = max(map(max, cpu_list))
+ # Check that all of the instance's nodes have enough physical CPUs to
+ # satisfy the requested CPU mask
+ _CheckNodesPhysicalCPUs(self, instance.all_nodes,
+ max_requested_cpu + 1, instance.hypervisor)
+
# osparams processing
if self.op.osparams:
i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
return ResultWithJobs(jobs)
-class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
+class TagsLU(NoHooksLU): # pylint: disable=W0223
"""Generic tags LU.
This is an abstract class which is the parent of all the other tags LUs.
# Wait for client to close
try:
try:
- # pylint: disable-msg=E1101
+ # pylint: disable=E1101
# Instance of '_socketobject' has no ... member
conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
conn.recv(1)
easy usage
"""
- # pylint: disable-msg=R0902
+ # pylint: disable=R0902
# lots of instance attributes
def __init__(self, cfg, rpc, mode, **kwargs):
_STRING_LIST = ht.TListOf(ht.TString)
_JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
- # pylint: disable-msg=E1101
+ # pylint: disable=E1101
# Class '...' has no 'OP_ID' member
"OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
opcodes.OpInstanceMigrate.OP_ID,