"""Module implementing the master-side code."""
-# pylint: disable-msg=W0613,W0201
+# pylint: disable-msg=W0201
+
+# W0201 since most LU attributes are defined in CheckPrereq or similar
+# functions
import os
import os.path
self.recalculate_locks = {}
self.__ssh = None
# logging
- self.LogWarning = processor.LogWarning
- self.LogInfo = processor.LogInfo
- self.LogStep = processor.LogStep
+ self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
+ self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
+ self.LogStep = processor.LogStep # pylint: disable-msg=C0103
# support for dry-run
self.dry_run_result = None
and hook results
"""
+ # API must be kept, thus we ignore the unused argument and could
+ # be a function warnings
+ # pylint: disable-msg=W0613,R0201
return lu_result
def _ExpandAndLockInstance(self):
del self.recalculate_locks[locking.LEVEL_NODE]
-class NoHooksLU(LogicalUnit):
+class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
"""Simple LU which runs no hooks.
This LU is intended as a parent for other LogicalUnits which will
HPATH = None
HTYPE = None
+ def BuildHooksEnv(self):
+ """Empty BuildHooksEnv for NoHooksLu.
+
+ This just raises an error.
+
+ """
+ assert False, "BuildHooksEnv called for NoHooksLUs"
+
class Tasklet:
"""Tasklet base class.
if used_globals:
msg = ("The following hypervisor parameters are global and cannot"
" be customized at instance level, please modify them at"
- " cluster level: %s" % ", ".join(used_globals))
+ " cluster level: %s" % utils.CommaJoin(used_globals))
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
}
if override:
args.update(override)
- return _BuildInstanceHookEnv(**args)
+ return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
def _AdjustCandidatePool(lu, exceptions):
mod_list = lu.cfg.MaintainCandidatePool(exceptions)
if mod_list:
lu.LogInfo("Promoted nodes to master candidate role: %s",
- ", ".join(node.name for node in mod_list))
+ utils.CommaJoin(node.name for node in mod_list))
for name in mod_list:
lu.context.ReaddNode(name)
mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
try:
hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
except:
+ # pylint: disable-msg=W0702
self.LogWarning("Errors occurred running hooks on %s" % master)
result = self.rpc.call_node_stop_master(master, False)
ENODESSH = (TNODE, "ENODESSH")
ENODEVERSION = (TNODE, "ENODEVERSION")
ENODESETUP = (TNODE, "ENODESETUP")
+ ENODETIME = (TNODE, "ENODETIME")
ETYPE_FIELD = "code"
ETYPE_ERROR = "ERROR"
"""
node = nodeinfo.name
- _ErrorIf = self._ErrorIf
+ _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
# main result, node_result should be a non-empty dict
test = not node_result or not isinstance(node_result, dict)
# check that ':' is not present in PV names, since it's a
# special character for lvcreate (denotes the range of PEs to
# use on the PV)
- for size, pvname, owner_vg in pvlist:
+ for _, pvname, owner_vg in pvlist:
test = ":" in pvname
_ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
" '%s' of VG '%s'", pvname, owner_vg)
available on the instance's node.
"""
- _ErrorIf = self._ErrorIf
+ _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
node_current = instanceconfig.primary_node
node_vol_should = {}
"""
self.bad = False
- _ErrorIf = self._ErrorIf
+ _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
verbose = self.op.verbose
self._feedback_fn = feedback_fn
feedback_fn("* Verifying global settings")
constants.NV_VERSION: None,
constants.NV_HVINFO: self.cfg.GetHypervisorType(),
constants.NV_NODESETUP: None,
+ constants.NV_TIME: None,
}
+
if vg_name is not None:
node_verify_param[constants.NV_VGLIST] = None
node_verify_param[constants.NV_LVLIST] = vg_name
node_verify_param[constants.NV_PVLIST] = [vg_name]
node_verify_param[constants.NV_DRBDLIST] = None
+
+ # Due to the way our RPC system works, exact response times cannot be
+ # guaranteed (e.g. a broken node could run into a timeout). By keeping the
+ # time before and after executing the request, we can at least have a time
+ # window.
+ nvinfo_starttime = time.time()
all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
self.cfg.GetClusterName())
+ nvinfo_endtime = time.time()
cluster = self.cfg.GetClusterInfo()
master_node = self.cfg.GetMasterNode()
else:
instance = instanceinfo[instance]
node_drbd[minor] = (instance.name, instance.admin_up)
+
self._VerifyNode(node_i, file_names, local_checksums,
nresult, master_files, node_drbd, vg_name)
if test:
continue
+ # Node time
+ ntime = nresult.get(constants.NV_TIME, None)
+ try:
+ ntime_merged = utils.MergeTime(ntime)
+ except (ValueError, TypeError):
+ _ErrorIf(test, self.ENODETIME, node, "Node returned invalid time")
+
+ if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
+ ntime_diff = abs(nvinfo_starttime - ntime_merged)
+ elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
+ ntime_diff = abs(ntime_merged - nvinfo_endtime)
+ else:
+ ntime_diff = None
+
+ _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
+ "Node time diverges by at least %0.1fs from master node time",
+ ntime_diff)
+
+ if ntime_diff is not None:
+ continue
+
try:
node_info[node] = {
"mfree": int(nodeinfo['memory_free']),
# warn that the instance lives on offline nodes
_ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
"instance lives on offline node(s) %s",
- ", ".join(inst_nodes_offline))
+ utils.CommaJoin(inst_nodes_offline))
feedback_fn("* Verifying orphan volumes")
self._VerifyOrphanVolumes(node_vol_should, node_volume)
assert hooks_results, "invalid result from hooks"
for node_name in hooks_results:
- show_node_header = True
res = hooks_results[node_name]
msg = res.fail_msg
test = msg and not res.offline
self._ErrorIf(test, self.ENODEHOOKS, node_name,
"Communication failure in hooks execution: %s", msg)
- if test:
+ if res.offline or msg:
+ # No need to investigate payload if node is offline or gave an error.
# override manually lu_result here as _ErrorIf only
# overrides self.bad
lu_result = 1
continue
lvs = node_res.payload
- for lv_name, (_, lv_inactive, lv_online) in lvs.items():
+ for lv_name, (_, _, lv_online) in lvs.items():
inst = nv_dict.pop((node, lv_name), None)
if (not lv_online and inst is not None
and inst.name not in res_instances):
"NEW_NAME": self.op.name,
}
mn = self.cfg.GetMasterNode()
- return env, [mn], [mn]
+ all_nodes = self.cfg.GetNodeList()
+ return env, [mn], all_nodes
def CheckPrereq(self):
"""Verify that the passed name is a valid one.
"""
@staticmethod
- def _DiagnoseByOS(node_list, rlist):
+ def _DiagnoseByOS(rlist):
"""Remaps a per-node return list into an a per-os per-node dictionary
- @param node_list: a list with the names of all nodes
@param rlist: a map with node names as keys and OS objects as values
@rtype: dict
"""
valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
node_data = self.rpc.call_os_diagnose(valid_nodes)
- pol = self._DiagnoseByOS(valid_nodes, node_data)
+ pol = self._DiagnoseByOS(node_data)
output = []
calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
calc_variants = "variants" in self.op.output_fields
"NODE_NAME": self.op.node_name,
}
all_nodes = self.cfg.GetNodeList()
- if self.op.node_name in all_nodes:
+ try:
all_nodes.remove(self.op.node_name)
+ except ValueError:
+ logging.warning("Node %s which is about to be removed not found"
+ " in the all nodes list", self.op.node_name)
return env, all_nodes, all_nodes
def CheckPrereq(self):
# Run post hooks on the node before it's removed
hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
try:
- h_results = hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
+ hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
except:
+ # pylint: disable-msg=W0702
self.LogWarning("Errors occurred running hooks on %s" % node.name)
result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
"""Logical unit for querying nodes.
"""
+ # pylint: disable-msg=W0142
_OP_REQP = ["output_fields", "names", "use_locking"]
REQ_BGL = False
inst_fields = frozenset(("pinst_cnt", "pinst_list",
"sinst_cnt", "sinst_list"))
if inst_fields & frozenset(self.op.output_fields):
- instancelist = self.cfg.GetInstanceList()
+ inst_data = self.cfg.GetAllInstancesInfo()
- for instance_name in instancelist:
- inst = self.cfg.GetInstanceInfo(instance_name)
+ for inst in inst_data.values():
if inst.primary_node in node_to_primary:
node_to_primary[inst.primary_node].add(inst.name)
for secnode in inst.secondary_nodes:
# later in the procedure; this also means that if the re-add
# fails, we are left with a non-offlined, broken node
if self.op.readd:
- new_node.drained = new_node.offline = False
+ new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
self.LogInfo("Readding a node, the offline/drained flags were reset")
# if we demote the node, we do cleanup later in the procedure
new_node.master_candidate = self.master_candidate
# If we're being deofflined/drained, we'll MC ourself if needed
if (deoffline_or_drain and not offline_or_drain and not
- self.op.master_candidate == True):
+ self.op.master_candidate == True and not node.master_candidate):
self.op.master_candidate = _DecideSelfPromotion(self)
if self.op.master_candidate:
self.LogInfo("Autopromoting node to master candidate")
_StartInstanceDisks(self, inst, None)
try:
feedback_fn("Running the instance OS create scripts...")
- result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
+ # FIXME: pass debug option from opcode to backend
+ result = self.rpc.call_instance_os_add(inst.primary_node, inst, True, 0)
result.Raise("Could not install OS for instance %s on node %s" %
(inst.name, inst.primary_node))
finally:
"""
to_skip = []
- for idx, disk in enumerate(self.instance.disks):
+ for idx, _ in enumerate(self.instance.disks):
if idx not in self.op.disks: # disk idx has not been passed in
to_skip.append(idx)
continue
_StartInstanceDisks(self, inst, None)
try:
result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
- old_name)
+ old_name, 0)
msg = result.fail_msg
if msg:
msg = ("Could not run OS rename script for instance %s on node %s"
"""Logical unit for querying instances.
"""
+ # pylint: disable-msg=W0142
_OP_REQP = ["output_fields", "names", "use_locking"]
REQ_BGL = False
_SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
"""Computes the list of nodes and their attributes.
"""
+ # pylint: disable-msg=R0912
+ # way too many branches here
all_info = self.cfg.GetAllInstancesInfo()
if self.wanted == locking.ALL_SET:
# caller didn't specify instance names, so ordering is not important
for idx, dsk in enumerate(instance.disks):
if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
raise errors.OpPrereqError("Instance disk %d has a complex layout,"
- " cannot copy", errors.ECODE_STATE)
+ " cannot copy" % idx, errors.ECODE_STATE)
_CheckNodeOnline(self, target_node)
_CheckNodeNotDrained(self, target_node)
"hvparams", "beparams"]
REQ_BGL = False
+ def CheckArguments(self):
+ """Check arguments.
+
+ """
+ # do not require name_check to ease forward/backward compatibility
+ # for tools
+ if not hasattr(self.op, "name_check"):
+ self.op.name_check = True
+ if self.op.ip_check and not self.op.name_check:
+ # TODO: make the ip check more flexible and not depend on the name check
+ raise errors.OpPrereqError("Cannot do ip checks without a name check",
+ errors.ECODE_INVAL)
+
def _ExpandNode(self, node):
"""Expands and checks one node name.
#### instance parameters check
# instance name verification
- hostname1 = utils.GetHostInfo(self.op.instance_name)
- self.op.instance_name = instance_name = hostname1.name
+ if self.op.name_check:
+ hostname1 = utils.GetHostInfo(self.op.instance_name)
+ self.op.instance_name = instance_name = hostname1.name
+ # used in CheckPrereq for ip ping check
+ self.check_ip = hostname1.ip
+ else:
+ instance_name = self.op.instance_name
+ self.check_ip = None
# this is just a preventive check, but someone might still add this
# instance in the meantime, and creation will fail at lock-add time
if ip is None or ip.lower() == constants.VALUE_NONE:
nic_ip = None
elif ip.lower() == constants.VALUE_AUTO:
+ if not self.op.name_check:
+ raise errors.OpPrereqError("IP address set to auto but name checks"
+ " have been skipped. Aborting.",
+ errors.ECODE_INVAL)
nic_ip = hostname1.ip
else:
if not utils.IsValidIP(ip):
# MAC address verification
mac = nic.get("mac", constants.VALUE_AUTO)
if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
- if not utils.IsValidMac(mac.lower()):
- raise errors.OpPrereqError("Invalid MAC address specified: %s" %
- mac, errors.ECODE_INVAL)
- else:
- try:
- self.cfg.ReserveMAC(mac, self.proc.GetECId())
- except errors.ReservationError:
- raise errors.OpPrereqError("MAC address %s already in use"
- " in cluster" % mac,
- errors.ECODE_NOTUNIQUE)
+ mac = utils.NormalizeAndValidateMac(mac)
+
+ try:
+ self.cfg.ReserveMAC(mac, self.proc.GetECId())
+ except errors.ReservationError:
+ raise errors.OpPrereqError("MAC address %s already in use"
+ " in cluster" % mac,
+ errors.ECODE_NOTUNIQUE)
# bridge verification
bridge = nic.get("bridge", None)
raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
try:
size = int(size)
- except ValueError:
+ except (TypeError, ValueError):
raise errors.OpPrereqError("Invalid disk size '%s'" % size,
errors.ECODE_INVAL)
self.disks.append({"size": size, "mode": mode})
- # used in CheckPrereq for ip ping check
- self.check_ip = hostname1.ip
-
# file storage checks
if (self.op.file_driver and
not self.op.file_driver in constants.FILE_DRIVER):
self.op.pnode = ial.nodes[0]
self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
self.op.instance_name, self.op.iallocator,
- ", ".join(ial.nodes))
+ utils.CommaJoin(ial.nodes))
if ial.required_nodes == 2:
self.op.snode = ial.nodes[1]
nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
# ENDIF: self.op.mode == constants.INSTANCE_IMPORT
- # ip ping checks (we use the same ip that was resolved in ExpandNames)
- if self.op.start and not self.op.ip_check:
- raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
- " adding an instance in start mode",
- errors.ECODE_INVAL)
+ # ip ping checks (we use the same ip that was resolved in ExpandNames)
if self.op.ip_check:
if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
raise errors.OpPrereqError("IP %s of instance %s already in use" %
if iobj.disk_template != constants.DT_DISKLESS:
if self.op.mode == constants.INSTANCE_CREATE:
feedback_fn("* running the instance OS create scripts...")
- result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
+ # FIXME: pass debug option from opcode to backend
+ result = self.rpc.call_instance_os_add(pnode_name, iobj, False, 0)
result.Raise("Could not add os for instance %s"
" on node %s" % (instance, pnode_name))
src_node = self.op.src_node
src_images = self.src_images
cluster_name = self.cfg.GetClusterName()
+ # FIXME: pass debug option from opcode to backend
import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
src_node, src_images,
- cluster_name)
+ cluster_name, 0)
msg = import_result.fail_msg
if msg:
self.LogWarning("Error while importing the disk images for instance"
self.op.remote_node = None
if not hasattr(self.op, "iallocator"):
self.op.iallocator = None
+ if not hasattr(self.op, "early_release"):
+ self.op.early_release = False
TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
self.op.iallocator)
self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
self.op.iallocator, self.op.remote_node,
- self.op.disks)
+ self.op.disks, False, self.op.early_release)
self.tasklets = [self.replacer]
self.op.remote_node = None
if not hasattr(self.op, "iallocator"):
self.op.iallocator = None
+ if not hasattr(self.op, "early_release"):
+ self.op.early_release = False
TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG,
self.op.remote_node,
names.append(inst.name)
replacer = TLReplaceDisks(self, inst.name, constants.REPLACE_DISK_CHG,
- self.op.iallocator, self.op.remote_node, [])
+ self.op.iallocator, self.op.remote_node, [],
+ True, self.op.early_release)
tasklets.append(replacer)
self.tasklets = tasklets
"""
def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
- disks):
+ disks, delay_iallocator, early_release):
"""Initializes this class.
"""
self.iallocator_name = iallocator_name
self.remote_node = remote_node
self.disks = disks
+ self.delay_iallocator = delay_iallocator
+ self.early_release = early_release
# Runtime data
self.instance = None
if len(ial.nodes) != ial.required_nodes:
raise errors.OpPrereqError("iallocator '%s' returned invalid number"
" of nodes (%s), required %s" %
- (len(ial.nodes), ial.required_nodes),
+ (iallocator_name,
+ len(ial.nodes), ial.required_nodes),
errors.ECODE_FAULT)
remote_node_name = ial.nodes[0]
len(instance.secondary_nodes),
errors.ECODE_FAULT)
+ if not self.delay_iallocator:
+ self._CheckPrereq2()
+
+ def _CheckPrereq2(self):
+ """Check prerequisites, second part.
+
+ This function should always be part of CheckPrereq. It was separated and is
+ now called from Exec because during node evacuation iallocator was only
+ called with an unmodified cluster model, not taking planned changes into
+ account.
+
+ """
+ instance = self.instance
secondary_node = instance.secondary_nodes[0]
if self.iallocator_name is None:
This dispatches the disk replacement to the appropriate handler.
"""
+ if self.delay_iallocator:
+ self._CheckPrereq2()
+
if not self.disks:
feedback_fn("No disks need replacement")
return
feedback_fn("Replacing disk(s) %s for %s" %
- (", ".join([str(i) for i in self.disks]), self.instance.name))
+ (utils.CommaJoin(self.disks), self.instance.name))
activate_disks = (not self.instance.admin_up)
return iv_names
def _CheckDevices(self, node_name, iv_names):
- for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
+ for name, (dev, _, _) in iv_names.iteritems():
self.cfg.SetDiskID(dev, node_name)
result = self.rpc.call_blockdev_find(node_name, dev)
raise errors.OpExecError("DRBD device %s is degraded!" % name)
def _RemoveOldStorage(self, node_name, iv_names):
- for name, (dev, old_lvs, _) in iv_names.iteritems():
+ for name, (_, old_lvs, _) in iv_names.iteritems():
self.lu.LogInfo("Remove logical volumes for %s" % name)
for lv in old_lvs:
self.lu.LogWarning("Can't remove old LV: %s" % msg,
hint="remove unused LVs manually")
+ def _ReleaseNodeLock(self, node_name):
+ """Releases the lock for a given node."""
+ self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
+
def _ExecDrbd8DiskOnly(self, feedback_fn):
"""Replace a disk on the primary or secondary for DRBD 8.
self.cfg.Update(self.instance, feedback_fn)
+ cstep = 5
+ if self.early_release:
+ self.lu.LogStep(cstep, steps_total, "Removing old storage")
+ cstep += 1
+ self._RemoveOldStorage(self.target_node, iv_names)
+ # only release the lock if we're doing secondary replace, since
+ # we use the primary node later
+ if self.target_node != self.instance.primary_node:
+ self._ReleaseNodeLock(self.target_node)
+
# Wait for sync
# This can fail as the old devices are degraded and _WaitForSync
# does a combined result over all disks, so we don't check its return value
- self.lu.LogStep(5, steps_total, "Sync devices")
+ self.lu.LogStep(cstep, steps_total, "Sync devices")
+ cstep += 1
_WaitForSync(self.lu, self.instance)
# Check all devices manually
self._CheckDevices(self.instance.primary_node, iv_names)
# Step: remove old storage
- self.lu.LogStep(6, steps_total, "Removing old storage")
- self._RemoveOldStorage(self.target_node, iv_names)
+ if not self.early_release:
+ self.lu.LogStep(cstep, steps_total, "Removing old storage")
+ cstep += 1
+ self._RemoveOldStorage(self.target_node, iv_names)
def _ExecDrbd8Secondary(self, feedback_fn):
"""Replace the secondary node for DRBD 8.
if self.instance.primary_node == o_node1:
p_minor = o_minor1
else:
+ assert self.instance.primary_node == o_node2, "Three-node instance?"
p_minor = o_minor2
new_alone_id = (self.instance.primary_node, self.new_node, None,
to_node, msg,
hint=("please do a gnt-instance info to see the"
" status of disks"))
+ cstep = 5
+ if self.early_release:
+ self.lu.LogStep(cstep, steps_total, "Removing old storage")
+ cstep += 1
+ self._RemoveOldStorage(self.target_node, iv_names)
+ self._ReleaseNodeLock([self.target_node, self.new_node])
# Wait for sync
# This can fail as the old devices are degraded and _WaitForSync
# does a combined result over all disks, so we don't check its return value
- self.lu.LogStep(5, steps_total, "Sync devices")
+ self.lu.LogStep(cstep, steps_total, "Sync devices")
+ cstep += 1
_WaitForSync(self.lu, self.instance)
# Check all devices manually
self._CheckDevices(self.instance.primary_node, iv_names)
# Step: remove old storage
- self.lu.LogStep(6, steps_total, "Removing old storage")
- self._RemoveOldStorage(self.target_node, iv_names)
+ if not self.early_release:
+ self.lu.LogStep(cstep, steps_total, "Removing old storage")
+ self._RemoveOldStorage(self.target_node, iv_names)
class LURepairNodeStorage(NoHooksLU):
self.cfg.SetDiskID(disk, node)
result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
result.Raise("Grow request failed to node %s" % node)
+
+ # TODO: Rewrite code to work properly
+ # DRBD goes into sync mode for a short amount of time after executing the
+ # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
+ # calling "resize" in sync mode fails. Sleeping for a short amount of
+ # time is a work-around.
+ time.sleep(5)
+
disk.RecordGrow(self.op.amount)
self.cfg.Update(instance, feedback_fn)
if self.op.wait_for_sync:
errors.ECODE_INVAL)
try:
size = int(size)
- except ValueError, err:
+ except (TypeError, ValueError), err:
raise errors.OpPrereqError("Invalid disk size parameter: %s" %
str(err), errors.ECODE_INVAL)
disk_dict['size'] = size
if 'mac' in nic_dict:
nic_mac = nic_dict['mac']
if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
- if not utils.IsValidMac(nic_mac):
- raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac,
- errors.ECODE_INVAL)
+ nic_mac = utils.NormalizeAndValidateMac(nic_mac)
+
if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
raise errors.OpPrereqError("'auto' is not a valid MAC address when"
" modifying an existing nic",
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
return env, nl, nl
- def _GetUpdatedParams(self, old_params, update_dict,
+ @staticmethod
+ def _GetUpdatedParams(old_params, update_dict,
default_values, parameter_types):
"""Return the new params dict for the given params.
raise errors.OpPrereqError("Disk operations not supported for"
" diskless instances",
errors.ECODE_INVAL)
- for disk_op, disk_dict in self.op.disks:
+ for disk_op, _ in self.op.disks:
if disk_op == constants.DDM_REMOVE:
if len(instance.disks) == 1:
raise errors.OpPrereqError("Cannot remove the last disk of"
result = []
instance = self.instance
- cluster = self.cluster
# disk changes
for disk_op, disk_dict in self.op.disks:
if disk_op == constants.DDM_REMOVE:
feedback_fn("Exporting snapshot %s from %s to %s" %
(idx, src_node, dst_node.name))
if dev:
+ # FIXME: pass debug from opcode to backend
result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
- instance, cluster_name, idx)
+ instance, cluster_name,
+ idx, 0)
msg = result.fail_msg
if msg:
self.LogWarning("Could not export disk/%s from node %s to"
" Domain Name.")
-class TagsLU(NoHooksLU):
+class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
"""Generic tags LU.
This is an abstract class which is the parent of all the other tags LUs.
easy usage
"""
+ # pylint: disable-msg=R0902
+ # lots of instance attributes
_ALLO_KEYS = [
"mem_size", "disks", "disk_template",
"os", "tags", "nics", "vcpus", "hypervisor",