X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/b87165962822a34998f999e8a5223a8abf990eef..5627f3753b4793a77be266d75a3aa9dcf0d3feab:/lib/cmdlib.py?ds=sidebyside diff --git a/lib/cmdlib.py b/lib/cmdlib.py index 7ed082e..afffbfb 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -21,7 +21,10 @@ """Module implementing the master-side code.""" -# pylint: disable-msg=W0613,W0201 +# pylint: disable-msg=W0201 + +# W0201 since most LU attributes are defined in CheckPrereq or similar +# functions import os import os.path @@ -87,11 +90,15 @@ class LogicalUnit(object): self.recalculate_locks = {} self.__ssh = None # logging - self.LogWarning = processor.LogWarning - self.LogInfo = processor.LogInfo - self.LogStep = processor.LogStep + self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103 + self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103 + self.LogStep = processor.LogStep # pylint: disable-msg=C0103 # support for dry-run self.dry_run_result = None + # support for generic debug attribute + if (not hasattr(self.op, "debug_level") or + not isinstance(self.op.debug_level, int)): + self.op.debug_level = 0 # Tasklets self.tasklets = None @@ -277,6 +284,9 @@ class LogicalUnit(object): and hook results """ + # API must be kept, thus we ignore the unused argument and could + # be a function warnings + # pylint: disable-msg=W0613,R0201 return lu_result def _ExpandAndLockInstance(self): @@ -294,12 +304,9 @@ class LogicalUnit(object): else: assert locking.LEVEL_INSTANCE not in self.needed_locks, \ "_ExpandAndLockInstance called with instance-level locks set" - expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name) - if expanded_name is None: - raise errors.OpPrereqError("Instance '%s' not known" % - self.op.instance_name, errors.ECODE_NOENT) - self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name - self.op.instance_name = expanded_name + self.op.instance_name = _ExpandInstanceName(self.cfg, + self.op.instance_name) + self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name def _LockInstancesNodes(self, primary_only=False): """Helper function to declare instances' nodes for locking. @@ -347,7 +354,7 @@ class LogicalUnit(object): del self.recalculate_locks[locking.LEVEL_NODE] -class NoHooksLU(LogicalUnit): +class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223 """Simple LU which runs no hooks. This LU is intended as a parent for other LogicalUnits which will @@ -357,6 +364,14 @@ class NoHooksLU(LogicalUnit): HPATH = None HTYPE = None + def BuildHooksEnv(self): + """Empty BuildHooksEnv for NoHooksLu. + + This just raises an error. + + """ + assert False, "BuildHooksEnv called for NoHooksLUs" + class Tasklet: """Tasklet base class. @@ -413,7 +428,7 @@ def _GetWantedNodes(lu, nodes): @param nodes: list of node names or None for all nodes @rtype: list @return: the list of nodes, sorted - @raise errors.OpProgrammerError: if the nodes parameter is wrong type + @raise errors.ProgrammerError: if the nodes parameter is wrong type """ if not isinstance(nodes, list): @@ -424,14 +439,7 @@ def _GetWantedNodes(lu, nodes): raise errors.ProgrammerError("_GetWantedNodes should only be called with a" " non-empty list of nodes whose name is to be expanded.") - wanted = [] - for name in nodes: - node = lu.cfg.ExpandNodeName(name) - if node is None: - raise errors.OpPrereqError("No such node name '%s'" % name, - errors.ECODE_NOENT) - wanted.append(node) - + wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes] return utils.NiceSort(wanted) @@ -453,15 +461,7 @@ def _GetWantedInstances(lu, instances): errors.ECODE_INVAL) if instances: - wanted = [] - - for name in instances: - instance = lu.cfg.ExpandInstanceName(name) - if instance is None: - raise errors.OpPrereqError("No such instance name '%s'" % name, - errors.ECODE_NOENT) - wanted.append(instance) - + wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances] else: wanted = utils.NiceSort(lu.cfg.GetInstanceList()) return wanted @@ -511,7 +511,7 @@ def _CheckGlobalHvParams(params): if used_globals: msg = ("The following hypervisor parameters are global and cannot" " be customized at instance level, please modify them at" - " cluster level: %s" % ", ".join(used_globals)) + " cluster level: %s" % utils.CommaJoin(used_globals)) raise errors.OpPrereqError(msg, errors.ECODE_INVAL) @@ -541,6 +541,33 @@ def _CheckNodeNotDrained(lu, node): errors.ECODE_INVAL) +def _ExpandItemName(fn, name, kind): + """Expand an item name. + + @param fn: the function to use for expansion + @param name: requested item name + @param kind: text description ('Node' or 'Instance') + @return: the resolved (full) name + @raise errors.OpPrereqError: if the item is not found + + """ + full_name = fn(name) + if full_name is None: + raise errors.OpPrereqError("%s '%s' not known" % (kind, name), + errors.ECODE_NOENT) + return full_name + + +def _ExpandNodeName(cfg, name): + """Wrapper over L{_ExpandItemName} for nodes.""" + return _ExpandItemName(cfg.ExpandNodeName, name, "Node") + + +def _ExpandInstanceName(cfg, name): + """Wrapper over L{_ExpandItemName} for instance.""" + return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance") + + def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status, memory, vcpus, nics, disk_template, disks, bep, hvp, hypervisor_name): @@ -688,7 +715,7 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None): } if override: args.update(override) - return _BuildInstanceHookEnv(**args) + return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142 def _AdjustCandidatePool(lu, exceptions): @@ -698,7 +725,7 @@ def _AdjustCandidatePool(lu, exceptions): mod_list = lu.cfg.MaintainCandidatePool(exceptions) if mod_list: lu.LogInfo("Promoted nodes to master candidate role: %s", - ", ".join(node.name for node in mod_list)) + utils.CommaJoin(node.name for node in mod_list)) for name in mod_list: lu.context.ReaddNode(name) mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions) @@ -898,6 +925,7 @@ class LUDestroyCluster(LogicalUnit): try: hm.RunPhase(constants.HOOKS_PHASE_POST, [master]) except: + # pylint: disable-msg=W0702 self.LogWarning("Errors occurred running hooks on %s" % master) result = self.rpc.call_node_stop_master(master, False) @@ -944,6 +972,7 @@ class LUVerifyCluster(LogicalUnit): ENODESSH = (TNODE, "ENODESSH") ENODEVERSION = (TNODE, "ENODEVERSION") ENODESETUP = (TNODE, "ENODESETUP") + ENODETIME = (TNODE, "ENODETIME") ETYPE_FIELD = "code" ETYPE_ERROR = "ERROR" @@ -1017,7 +1046,7 @@ class LUVerifyCluster(LogicalUnit): """ node = nodeinfo.name - _ErrorIf = self._ErrorIf + _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 # main result, node_result should be a non-empty dict test = not node_result or not isinstance(node_result, dict) @@ -1151,7 +1180,7 @@ class LUVerifyCluster(LogicalUnit): # check that ':' is not present in PV names, since it's a # special character for lvcreate (denotes the range of PEs to # use on the PV) - for size, pvname, owner_vg in pvlist: + for _, pvname, owner_vg in pvlist: test = ":" in pvname _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV" " '%s' of VG '%s'", pvname, owner_vg) @@ -1164,7 +1193,7 @@ class LUVerifyCluster(LogicalUnit): available on the instance's node. """ - _ErrorIf = self._ErrorIf + _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 node_current = instanceconfig.primary_node node_vol_should = {} @@ -1279,7 +1308,7 @@ class LUVerifyCluster(LogicalUnit): """ self.bad = False - _ErrorIf = self._ErrorIf + _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 verbose = self.op.verbose self._feedback_fn = feedback_fn feedback_fn("* Verifying global settings") @@ -1326,14 +1355,23 @@ class LUVerifyCluster(LogicalUnit): constants.NV_VERSION: None, constants.NV_HVINFO: self.cfg.GetHypervisorType(), constants.NV_NODESETUP: None, + constants.NV_TIME: None, } + if vg_name is not None: node_verify_param[constants.NV_VGLIST] = None node_verify_param[constants.NV_LVLIST] = vg_name node_verify_param[constants.NV_PVLIST] = [vg_name] node_verify_param[constants.NV_DRBDLIST] = None + + # Due to the way our RPC system works, exact response times cannot be + # guaranteed (e.g. a broken node could run into a timeout). By keeping the + # time before and after executing the request, we can at least have a time + # window. + nvinfo_starttime = time.time() all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param, self.cfg.GetClusterName()) + nvinfo_endtime = time.time() cluster = self.cfg.GetClusterInfo() master_node = self.cfg.GetMasterNode() @@ -1380,6 +1418,7 @@ class LUVerifyCluster(LogicalUnit): else: instance = instanceinfo[instance] node_drbd[minor] = (instance.name, instance.admin_up) + self._VerifyNode(node_i, file_names, local_checksums, nresult, master_files, node_drbd, vg_name) @@ -1413,6 +1452,27 @@ class LUVerifyCluster(LogicalUnit): if test: continue + # Node time + ntime = nresult.get(constants.NV_TIME, None) + try: + ntime_merged = utils.MergeTime(ntime) + except (ValueError, TypeError): + _ErrorIf(test, self.ENODETIME, node, "Node returned invalid time") + + if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW): + ntime_diff = abs(nvinfo_starttime - ntime_merged) + elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW): + ntime_diff = abs(ntime_merged - nvinfo_endtime) + else: + ntime_diff = None + + _ErrorIf(ntime_diff is not None, self.ENODETIME, node, + "Node time diverges by at least %0.1fs from master node time", + ntime_diff) + + if ntime_diff is not None: + continue + try: node_info[node] = { "mfree": int(nodeinfo['memory_free']), @@ -1498,7 +1558,7 @@ class LUVerifyCluster(LogicalUnit): # warn that the instance lives on offline nodes _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance, "instance lives on offline node(s) %s", - ", ".join(inst_nodes_offline)) + utils.CommaJoin(inst_nodes_offline)) feedback_fn("* Verifying orphan volumes") self._VerifyOrphanVolumes(node_vol_should, node_volume) @@ -1551,13 +1611,13 @@ class LUVerifyCluster(LogicalUnit): assert hooks_results, "invalid result from hooks" for node_name in hooks_results: - show_node_header = True res = hooks_results[node_name] msg = res.fail_msg test = msg and not res.offline self._ErrorIf(test, self.ENODEHOOKS, node_name, "Communication failure in hooks execution: %s", msg) - if test: + if res.offline or msg: + # No need to investigate payload if node is offline or gave an error. # override manually lu_result here as _ErrorIf only # overrides self.bad lu_result = 1 @@ -1641,7 +1701,7 @@ class LUVerifyDisks(NoHooksLU): continue lvs = node_res.payload - for lv_name, (_, lv_inactive, lv_online) in lvs.items(): + for lv_name, (_, _, lv_online) in lvs.items(): inst = nv_dict.pop((node, lv_name), None) if (not lv_online and inst is not None and inst.name not in res_instances): @@ -1672,10 +1732,7 @@ class LURepairDiskSizes(NoHooksLU): if self.op.instances: self.wanted_names = [] for name in self.op.instances: - full_name = self.cfg.ExpandInstanceName(name) - if full_name is None: - raise errors.OpPrereqError("Instance '%s' not known" % name, - errors.ECODE_NOENT) + full_name = _ExpandInstanceName(self.cfg, name) self.wanted_names.append(full_name) self.needed_locks = { locking.LEVEL_NODE: [], @@ -1797,7 +1854,8 @@ class LURenameCluster(LogicalUnit): "NEW_NAME": self.op.name, } mn = self.cfg.GetMasterNode() - return env, [mn], [mn] + all_nodes = self.cfg.GetNodeList() + return env, [mn], all_nodes def CheckPrereq(self): """Verify that the passed name is a valid one. @@ -2015,7 +2073,8 @@ class LUSetClusterParams(LogicalUnit): invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES if invalid_hvs: raise errors.OpPrereqError("Enabled hypervisors contains invalid" - " entries: %s" % " ,".join(invalid_hvs), + " entries: %s" % + utils.CommaJoin(invalid_hvs), errors.ECODE_INVAL) else: self.hv_list = cluster.enabled_hypervisors @@ -2134,7 +2193,7 @@ class LURedistributeConfig(NoHooksLU): _RedistributeAncillaryFiles(self) -def _WaitForSync(lu, instance, oneshot=False, unlock=False): +def _WaitForSync(lu, instance, oneshot=False): """Sleep and poll for an instance's disk to sync. """ @@ -2274,10 +2333,9 @@ class LUDiagnoseOS(NoHooksLU): """ @staticmethod - def _DiagnoseByOS(node_list, rlist): + def _DiagnoseByOS(rlist): """Remaps a per-node return list into an a per-os per-node dictionary - @param node_list: a list with the names of all nodes @param rlist: a map with node names as keys and OS objects as values @rtype: dict @@ -2315,7 +2373,7 @@ class LUDiagnoseOS(NoHooksLU): """ valid_nodes = [node for node in self.cfg.GetOnlineNodeList()] node_data = self.rpc.call_os_diagnose(valid_nodes) - pol = self._DiagnoseByOS(valid_nodes, node_data) + pol = self._DiagnoseByOS(node_data) output = [] calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields) calc_variants = "variants" in self.op.output_fields @@ -2377,8 +2435,11 @@ class LURemoveNode(LogicalUnit): "NODE_NAME": self.op.node_name, } all_nodes = self.cfg.GetNodeList() - if self.op.node_name in all_nodes: + try: all_nodes.remove(self.op.node_name) + except ValueError: + logging.warning("Node %s which is about to be removed not found" + " in the all nodes list", self.op.node_name) return env, all_nodes, all_nodes def CheckPrereq(self): @@ -2392,10 +2453,9 @@ class LURemoveNode(LogicalUnit): Any errors are signaled by raising errors.OpPrereqError. """ - node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name)) - if node is None: - raise errors.OpPrereqError("Node '%s' is unknown." % self.op.node_name, - errors.ECODE_NOENT) + self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) + node = self.cfg.GetNodeInfo(self.op.node_name) + assert node is not None instance_list = self.cfg.GetInstanceList() @@ -2431,8 +2491,9 @@ class LURemoveNode(LogicalUnit): # Run post hooks on the node before it's removed hm = self.proc.hmclass(self.rpc.call_hooks_runner, self) try: - h_results = hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name]) + hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name]) except: + # pylint: disable-msg=W0702 self.LogWarning("Errors occurred running hooks on %s" % node.name) result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup) @@ -2446,6 +2507,7 @@ class LUQueryNodes(NoHooksLU): """Logical unit for querying nodes. """ + # pylint: disable-msg=W0142 _OP_REQP = ["output_fields", "names", "use_locking"] REQ_BGL = False @@ -2546,10 +2608,9 @@ class LUQueryNodes(NoHooksLU): inst_fields = frozenset(("pinst_cnt", "pinst_list", "sinst_cnt", "sinst_list")) if inst_fields & frozenset(self.op.output_fields): - instancelist = self.cfg.GetInstanceList() + inst_data = self.cfg.GetAllInstancesInfo() - for instance_name in instancelist: - inst = self.cfg.GetInstanceInfo(instance_name) + for inst in inst_data.values(): if inst.primary_node in node_to_primary: node_to_primary[inst.primary_node].add(inst.name) for secnode in inst.secondary_nodes: @@ -2794,12 +2855,7 @@ class LUModifyNodeStorage(NoHooksLU): REQ_BGL = False def CheckArguments(self): - node_name = self.cfg.ExpandNodeName(self.op.node_name) - if node_name is None: - raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name, - errors.ECODE_NOENT) - - self.op.node_name = node_name + self.opnode_name = _ExpandNodeName(self.cfg, self.op.node_name) storage_type = self.op.storage_type if storage_type not in constants.VALID_STORAGE_TYPES: @@ -2977,7 +3033,7 @@ class LUAddNode(LogicalUnit): # later in the procedure; this also means that if the re-add # fails, we are left with a non-offlined, broken node if self.op.readd: - new_node.drained = new_node.offline = False + new_node.drained = new_node.offline = False # pylint: disable-msg=W0201 self.LogInfo("Readding a node, the offline/drained flags were reset") # if we demote the node, we do cleanup later in the procedure new_node.master_candidate = self.master_candidate @@ -3073,11 +3129,7 @@ class LUSetNodeParams(LogicalUnit): REQ_BGL = False def CheckArguments(self): - node_name = self.cfg.ExpandNodeName(self.op.node_name) - if node_name is None: - raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name, - errors.ECODE_INVAL) - self.op.node_name = node_name + self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) _CheckBooleanOpField(self.op, 'master_candidate') _CheckBooleanOpField(self.op, 'offline') _CheckBooleanOpField(self.op, 'drained') @@ -3155,7 +3207,7 @@ class LUSetNodeParams(LogicalUnit): # If we're being deofflined/drained, we'll MC ourself if needed if (deoffline_or_drain and not offline_or_drain and not - self.op.master_candidate == True): + self.op.master_candidate == True and not node.master_candidate): self.op.master_candidate = _DecideSelfPromotion(self) if self.op.master_candidate: self.LogInfo("Autopromoting node to master candidate") @@ -3226,12 +3278,8 @@ class LUPowercycleNode(NoHooksLU): REQ_BGL = False def CheckArguments(self): - node_name = self.cfg.ExpandNodeName(self.op.node_name) - if node_name is None: - raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name, - errors.ECODE_NOENT) - self.op.node_name = node_name - if node_name == self.cfg.GetMasterNode() and not self.op.force: + self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) + if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force: raise errors.OpPrereqError("The node is the master and the force" " parameter was not set", errors.ECODE_INVAL) @@ -3913,14 +3961,10 @@ class LUReinstallInstance(LogicalUnit): self.op.force_variant = getattr(self.op, "force_variant", False) if self.op.os_type is not None: # OS verification - pnode = self.cfg.GetNodeInfo( - self.cfg.ExpandNodeName(instance.primary_node)) - if pnode is None: - raise errors.OpPrereqError("Primary node '%s' is unknown" % - self.op.pnode, errors.ECODE_NOENT) - result = self.rpc.call_os_get(pnode.name, self.op.os_type) + pnode = _ExpandNodeName(self.cfg, instance.primary_node) + result = self.rpc.call_os_get(pnode, self.op.os_type) result.Raise("OS '%s' not in supported OS list for primary node %s" % - (self.op.os_type, pnode.name), + (self.op.os_type, pnode), prereq=True, ecode=errors.ECODE_INVAL) if not self.op.force_variant: _CheckOSVariant(result.payload, self.op.os_type) @@ -3941,7 +3985,9 @@ class LUReinstallInstance(LogicalUnit): _StartInstanceDisks(self, inst, None) try: feedback_fn("Running the instance OS create scripts...") - result = self.rpc.call_instance_os_add(inst.primary_node, inst, True) + # FIXME: pass debug option from opcode to backend + result = self.rpc.call_instance_os_add(inst.primary_node, inst, True, + self.op.debug_level) result.Raise("Could not install OS for instance %s on node %s" % (inst.name, inst.primary_node)) finally: @@ -4024,7 +4070,7 @@ class LURecreateInstanceDisks(LogicalUnit): """ to_skip = [] - for idx, disk in enumerate(self.instance.disks): + for idx, _ in enumerate(self.instance.disks): if idx not in self.op.disks: # disk idx has not been passed in to_skip.append(idx) continue @@ -4057,11 +4103,10 @@ class LURenameInstance(LogicalUnit): This checks that the instance is in the cluster and is not running. """ - instance = self.cfg.GetInstanceInfo( - self.cfg.ExpandInstanceName(self.op.instance_name)) - if instance is None: - raise errors.OpPrereqError("Instance '%s' not known" % - self.op.instance_name, errors.ECODE_NOENT) + self.op.instance_name = _ExpandInstanceName(self.cfg, + self.op.instance_name) + instance = self.cfg.GetInstanceInfo(self.op.instance_name) + assert instance is not None _CheckNodeOnline(self, instance.primary_node) if instance.admin_up: @@ -4125,7 +4170,7 @@ class LURenameInstance(LogicalUnit): _StartInstanceDisks(self, inst, None) try: result = self.rpc.call_instance_run_rename(inst.primary_node, inst, - old_name) + old_name, self.op.debug_level) msg = result.fail_msg if msg: msg = ("Could not run OS rename script for instance %s on node %s" @@ -4170,7 +4215,8 @@ class LURemoveInstance(LogicalUnit): env = _BuildInstanceHookEnvByObject(self, self.instance) env["SHUTDOWN_TIMEOUT"] = self.shutdown_timeout nl = [self.cfg.GetMasterNode()] - return env, nl, nl + nl_post = list(self.instance.all_nodes) + nl + return env, nl, nl_post def CheckPrereq(self): """Check prerequisites. @@ -4219,6 +4265,7 @@ class LUQueryInstances(NoHooksLU): """Logical unit for querying instances. """ + # pylint: disable-msg=W0142 _OP_REQP = ["output_fields", "names", "use_locking"] REQ_BGL = False _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor", @@ -4280,6 +4327,8 @@ class LUQueryInstances(NoHooksLU): """Computes the list of nodes and their attributes. """ + # pylint: disable-msg=R0912 + # way too many branches here all_info = self.cfg.GetAllInstancesInfo() if self.wanted == locking.ALL_SET: # caller didn't specify instance names, so ordering is not important @@ -4526,13 +4575,22 @@ class LUFailoverInstance(LogicalUnit): This runs on master, primary and secondary nodes of the instance. """ + instance = self.instance + source_node = instance.primary_node + target_node = instance.secondary_nodes[0] env = { "IGNORE_CONSISTENCY": self.op.ignore_consistency, "SHUTDOWN_TIMEOUT": self.shutdown_timeout, + "OLD_PRIMARY": source_node, + "OLD_SECONDARY": target_node, + "NEW_PRIMARY": target_node, + "NEW_SECONDARY": source_node, } - env.update(_BuildInstanceHookEnvByObject(self, self.instance)) - nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes) - return env, nl, nl + env.update(_BuildInstanceHookEnvByObject(self, instance)) + nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes) + nl_post = list(nl) + nl_post.append(source_node) + return env, nl, nl_post def CheckPrereq(self): """Check prerequisites. @@ -4674,11 +4732,21 @@ class LUMigrateInstance(LogicalUnit): """ instance = self._migrater.instance + source_node = instance.primary_node + target_node = instance.secondary_nodes[0] env = _BuildInstanceHookEnvByObject(self, instance) env["MIGRATE_LIVE"] = self.op.live env["MIGRATE_CLEANUP"] = self.op.cleanup + env.update({ + "OLD_PRIMARY": source_node, + "OLD_SECONDARY": target_node, + "NEW_PRIMARY": target_node, + "NEW_SECONDARY": source_node, + }) nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes) - return env, nl, nl + nl_post = list(nl) + nl_post.append(source_node) + return env, nl, nl_post class LUMoveInstance(LogicalUnit): @@ -4699,10 +4767,7 @@ class LUMoveInstance(LogicalUnit): def ExpandNames(self): self._ExpandAndLockInstance() - target_node = self.cfg.ExpandNodeName(self.op.target_node) - if target_node is None: - raise errors.OpPrereqError("Node '%s' not known" % - self.op.target_node, errors.ECODE_NOENT) + target_node = _ExpandNodeName(self.cfg, self.op.target_node) self.op.target_node = target_node self.needed_locks[locking.LEVEL_NODE] = [target_node] self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND @@ -4752,7 +4817,7 @@ class LUMoveInstance(LogicalUnit): for idx, dsk in enumerate(instance.disks): if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE): raise errors.OpPrereqError("Instance disk %d has a complex layout," - " cannot copy", errors.ECODE_STATE) + " cannot copy" % idx, errors.ECODE_STATE) _CheckNodeOnline(self, target_node) _CheckNodeNotDrained(self, target_node) @@ -4876,10 +4941,7 @@ class LUMigrateNode(LogicalUnit): REQ_BGL = False def ExpandNames(self): - self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name) - if self.op.node_name is None: - raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name, - errors.ECODE_NOENT) + self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) self.needed_locks = { locking.LEVEL_NODE: [self.op.node_name], @@ -4939,11 +5001,9 @@ class TLMigrateInstance(Tasklet): This checks that the instance is in the cluster. """ - instance = self.cfg.GetInstanceInfo( - self.cfg.ExpandInstanceName(self.instance_name)) - if instance is None: - raise errors.OpPrereqError("Instance '%s' not known" % - self.instance_name, errors.ECODE_NOENT) + instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name) + instance = self.cfg.GetInstanceInfo(instance_name) + assert instance is not None if instance.disk_template != constants.DT_DRBD8: raise errors.OpPrereqError("Instance's disk layout is not" @@ -5586,14 +5646,18 @@ class LUCreateInstance(LogicalUnit): "hvparams", "beparams"] REQ_BGL = False - def _ExpandNode(self, node): - """Expands and checks one node name. + def CheckArguments(self): + """Check arguments. """ - node_full = self.cfg.ExpandNodeName(node) - if node_full is None: - raise errors.OpPrereqError("Unknown node %s" % node, errors.ECODE_NOENT) - return node_full + # do not require name_check to ease forward/backward compatibility + # for tools + if not hasattr(self.op, "name_check"): + self.op.name_check = True + if self.op.ip_check and not self.op.name_check: + # TODO: make the ip check more flexible and not depend on the name check + raise errors.OpPrereqError("Cannot do ip checks without a name check", + errors.ECODE_INVAL) def ExpandNames(self): """ExpandNames for CreateInstance. @@ -5650,8 +5714,14 @@ class LUCreateInstance(LogicalUnit): #### instance parameters check # instance name verification - hostname1 = utils.GetHostInfo(self.op.instance_name) - self.op.instance_name = instance_name = hostname1.name + if self.op.name_check: + hostname1 = utils.GetHostInfo(self.op.instance_name) + self.op.instance_name = instance_name = hostname1.name + # used in CheckPrereq for ip ping check + self.check_ip = hostname1.ip + else: + instance_name = self.op.instance_name + self.check_ip = None # this is just a preventive check, but someone might still add this # instance in the meantime, and creation will fail at lock-add time @@ -5680,6 +5750,10 @@ class LUCreateInstance(LogicalUnit): if ip is None or ip.lower() == constants.VALUE_NONE: nic_ip = None elif ip.lower() == constants.VALUE_AUTO: + if not self.op.name_check: + raise errors.OpPrereqError("IP address set to auto but name checks" + " have been skipped. Aborting.", + errors.ECODE_INVAL) nic_ip = hostname1.ip else: if not utils.IsValidIP(ip): @@ -5696,16 +5770,14 @@ class LUCreateInstance(LogicalUnit): # MAC address verification mac = nic.get("mac", constants.VALUE_AUTO) if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): - if not utils.IsValidMac(mac.lower()): - raise errors.OpPrereqError("Invalid MAC address specified: %s" % - mac, errors.ECODE_INVAL) - else: - try: - self.cfg.ReserveMAC(mac, self.proc.GetECId()) - except errors.ReservationError: - raise errors.OpPrereqError("MAC address %s already in use" - " in cluster" % mac, - errors.ECODE_NOTUNIQUE) + mac = utils.NormalizeAndValidateMac(mac) + + try: + self.cfg.ReserveMAC(mac, self.proc.GetECId()) + except errors.ReservationError: + raise errors.OpPrereqError("MAC address %s already in use" + " in cluster" % mac, + errors.ECODE_NOTUNIQUE) # bridge verification bridge = nic.get("bridge", None) @@ -5742,14 +5814,11 @@ class LUCreateInstance(LogicalUnit): raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL) try: size = int(size) - except ValueError: + except (TypeError, ValueError): raise errors.OpPrereqError("Invalid disk size '%s'" % size, errors.ECODE_INVAL) self.disks.append({"size": size, "mode": mode}) - # used in CheckPrereq for ip ping check - self.check_ip = hostname1.ip - # file storage checks if (self.op.file_driver and not self.op.file_driver in constants.FILE_DRIVER): @@ -5769,10 +5838,10 @@ class LUCreateInstance(LogicalUnit): if self.op.iallocator: self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET else: - self.op.pnode = self._ExpandNode(self.op.pnode) + self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode) nodelist = [self.op.pnode] if self.op.snode is not None: - self.op.snode = self._ExpandNode(self.op.snode) + self.op.snode = _ExpandNodeName(self.cfg, self.op.snode) nodelist.append(self.op.snode) self.needed_locks[locking.LEVEL_NODE] = nodelist @@ -5792,7 +5861,7 @@ class LUCreateInstance(LogicalUnit): " path requires a source node option.", errors.ECODE_INVAL) else: - self.op.src_node = src_node = self._ExpandNode(src_node) + self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node) if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET: self.needed_locks[locking.LEVEL_NODE].append(src_node) if not os.path.isabs(src_path): @@ -5843,7 +5912,7 @@ class LUCreateInstance(LogicalUnit): self.op.pnode = ial.nodes[0] self.LogInfo("Selected nodes for instance %s via iallocator %s: %s", self.op.instance_name, self.op.iallocator, - ", ".join(ial.nodes)) + utils.CommaJoin(ial.nodes)) if ial.required_nodes == 2: self.op.snode = ial.nodes[1] @@ -5960,12 +6029,8 @@ class LUCreateInstance(LogicalUnit): nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini) # ENDIF: self.op.mode == constants.INSTANCE_IMPORT - # ip ping checks (we use the same ip that was resolved in ExpandNames) - if self.op.start and not self.op.ip_check: - raise errors.OpPrereqError("Cannot ignore IP address conflicts when" - " adding an instance in start mode", - errors.ECODE_INVAL) + # ip ping checks (we use the same ip that was resolved in ExpandNames) if self.op.ip_check: if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT): raise errors.OpPrereqError("IP %s of instance %s already in use" % @@ -6161,7 +6226,9 @@ class LUCreateInstance(LogicalUnit): if iobj.disk_template != constants.DT_DISKLESS: if self.op.mode == constants.INSTANCE_CREATE: feedback_fn("* running the instance OS create scripts...") - result = self.rpc.call_instance_os_add(pnode_name, iobj, False) + # FIXME: pass debug option from opcode to backend + result = self.rpc.call_instance_os_add(pnode_name, iobj, False, + self.op.debug_level) result.Raise("Could not add os for instance %s" " on node %s" % (instance, pnode_name)) @@ -6170,9 +6237,11 @@ class LUCreateInstance(LogicalUnit): src_node = self.op.src_node src_images = self.src_images cluster_name = self.cfg.GetClusterName() + # FIXME: pass debug option from opcode to backend import_result = self.rpc.call_instance_os_import(pnode_name, iobj, src_node, src_images, - cluster_name) + cluster_name, + self.op.debug_level) msg = import_result.fail_msg if msg: self.LogWarning("Error while importing the disk images for instance" @@ -6260,6 +6329,8 @@ class LUReplaceDisks(LogicalUnit): self.op.remote_node = None if not hasattr(self.op, "iallocator"): self.op.iallocator = None + if not hasattr(self.op, "early_release"): + self.op.early_release = False TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node, self.op.iallocator) @@ -6271,11 +6342,7 @@ class LUReplaceDisks(LogicalUnit): self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET elif self.op.remote_node is not None: - remote_node = self.cfg.ExpandNodeName(self.op.remote_node) - if remote_node is None: - raise errors.OpPrereqError("Node '%s' not known" % - self.op.remote_node, errors.ECODE_NOENT) - + remote_node = _ExpandNodeName(self.cfg, self.op.remote_node) self.op.remote_node = remote_node # Warning: do not remove the locking of the new secondary here @@ -6291,7 +6358,7 @@ class LUReplaceDisks(LogicalUnit): self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode, self.op.iallocator, self.op.remote_node, - self.op.disks) + self.op.disks, False, self.op.early_release) self.tasklets = [self.replacer] @@ -6338,16 +6405,15 @@ class LUEvacuateNode(LogicalUnit): self.op.remote_node = None if not hasattr(self.op, "iallocator"): self.op.iallocator = None + if not hasattr(self.op, "early_release"): + self.op.early_release = False TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG, self.op.remote_node, self.op.iallocator) def ExpandNames(self): - self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name) - if self.op.node_name is None: - raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name, - errors.ECODE_NOENT) + self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) self.needed_locks = {} @@ -6356,18 +6422,13 @@ class LUEvacuateNode(LogicalUnit): self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET elif self.op.remote_node is not None: - remote_node = self.cfg.ExpandNodeName(self.op.remote_node) - if remote_node is None: - raise errors.OpPrereqError("Node '%s' not known" % - self.op.remote_node, errors.ECODE_NOENT) - - self.op.remote_node = remote_node + self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node) # Warning: do not remove the locking of the new secondary here # unless DRBD8.AddChildren is changed to work in parallel; # currently it doesn't since parallel invocations of # FindUnusedMinor will conflict - self.needed_locks[locking.LEVEL_NODE] = [remote_node] + self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node] self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND else: @@ -6383,7 +6444,8 @@ class LUEvacuateNode(LogicalUnit): names.append(inst.name) replacer = TLReplaceDisks(self, inst.name, constants.REPLACE_DISK_CHG, - self.op.iallocator, self.op.remote_node, []) + self.op.iallocator, self.op.remote_node, [], + True, self.op.early_release) tasklets.append(replacer) self.tasklets = tasklets @@ -6425,7 +6487,7 @@ class TLReplaceDisks(Tasklet): """ def __init__(self, lu, instance_name, mode, iallocator_name, remote_node, - disks): + disks, delay_iallocator, early_release): """Initializes this class. """ @@ -6437,6 +6499,8 @@ class TLReplaceDisks(Tasklet): self.iallocator_name = iallocator_name self.remote_node = remote_node self.disks = disks + self.delay_iallocator = delay_iallocator + self.early_release = early_release # Runtime data self.instance = None @@ -6488,7 +6552,8 @@ class TLReplaceDisks(Tasklet): if len(ial.nodes) != ial.required_nodes: raise errors.OpPrereqError("iallocator '%s' returned invalid number" " of nodes (%s), required %s" % - (len(ial.nodes), ial.required_nodes), + (iallocator_name, + len(ial.nodes), ial.required_nodes), errors.ECODE_FAULT) remote_node_name = ial.nodes[0] @@ -6522,6 +6587,19 @@ class TLReplaceDisks(Tasklet): len(instance.secondary_nodes), errors.ECODE_FAULT) + if not self.delay_iallocator: + self._CheckPrereq2() + + def _CheckPrereq2(self): + """Check prerequisites, second part. + + This function should always be part of CheckPrereq. It was separated and is + now called from Exec because during node evacuation iallocator was only + called with an unmodified cluster model, not taking planned changes into + account. + + """ + instance = self.instance secondary_node = instance.secondary_nodes[0] if self.iallocator_name is None: @@ -6595,6 +6673,14 @@ class TLReplaceDisks(Tasklet): _CheckNodeNotDrained(self.lu, remote_node) + old_node_info = self.cfg.GetNodeInfo(secondary_node) + assert old_node_info is not None + if old_node_info.offline and not self.early_release: + # doesn't make sense to delay the release + self.early_release = True + self.lu.LogInfo("Old secondary %s is offline, automatically enabling" + " early-release mode", secondary_node) + else: raise errors.ProgrammerError("Unhandled disk replace mode (%s)" % self.mode) @@ -6625,12 +6711,15 @@ class TLReplaceDisks(Tasklet): This dispatches the disk replacement to the appropriate handler. """ + if self.delay_iallocator: + self._CheckPrereq2() + if not self.disks: feedback_fn("No disks need replacement") return feedback_fn("Replacing disk(s) %s for %s" % - (", ".join([str(i) for i in self.disks]), self.instance.name)) + (utils.CommaJoin(self.disks), self.instance.name)) activate_disks = (not self.instance.admin_up) @@ -6735,7 +6824,7 @@ class TLReplaceDisks(Tasklet): return iv_names def _CheckDevices(self, node_name, iv_names): - for name, (dev, old_lvs, new_lvs) in iv_names.iteritems(): + for name, (dev, _, _) in iv_names.iteritems(): self.cfg.SetDiskID(dev, node_name) result = self.rpc.call_blockdev_find(node_name, dev) @@ -6751,7 +6840,7 @@ class TLReplaceDisks(Tasklet): raise errors.OpExecError("DRBD device %s is degraded!" % name) def _RemoveOldStorage(self, node_name, iv_names): - for name, (dev, old_lvs, _) in iv_names.iteritems(): + for name, (_, old_lvs, _) in iv_names.iteritems(): self.lu.LogInfo("Remove logical volumes for %s" % name) for lv in old_lvs: @@ -6762,6 +6851,10 @@ class TLReplaceDisks(Tasklet): self.lu.LogWarning("Can't remove old LV: %s" % msg, hint="remove unused LVs manually") + def _ReleaseNodeLock(self, node_name): + """Releases the lock for a given node.""" + self.lu.context.glm.release(locking.LEVEL_NODE, node_name) + def _ExecDrbd8DiskOnly(self, feedback_fn): """Replace a disk on the primary or secondary for DRBD 8. @@ -6872,18 +6965,30 @@ class TLReplaceDisks(Tasklet): self.cfg.Update(self.instance, feedback_fn) + cstep = 5 + if self.early_release: + self.lu.LogStep(cstep, steps_total, "Removing old storage") + cstep += 1 + self._RemoveOldStorage(self.target_node, iv_names) + # WARNING: we release both node locks here, do not do other RPCs + # than WaitForSync to the primary node + self._ReleaseNodeLock([self.target_node, self.other_node]) + # Wait for sync # This can fail as the old devices are degraded and _WaitForSync # does a combined result over all disks, so we don't check its return value - self.lu.LogStep(5, steps_total, "Sync devices") - _WaitForSync(self.lu, self.instance, unlock=True) + self.lu.LogStep(cstep, steps_total, "Sync devices") + cstep += 1 + _WaitForSync(self.lu, self.instance) # Check all devices manually self._CheckDevices(self.instance.primary_node, iv_names) # Step: remove old storage - self.lu.LogStep(6, steps_total, "Removing old storage") - self._RemoveOldStorage(self.target_node, iv_names) + if not self.early_release: + self.lu.LogStep(cstep, steps_total, "Removing old storage") + cstep += 1 + self._RemoveOldStorage(self.target_node, iv_names) def _ExecDrbd8Secondary(self, feedback_fn): """Replace the secondary node for DRBD 8. @@ -6946,6 +7051,7 @@ class TLReplaceDisks(Tasklet): if self.instance.primary_node == o_node1: p_minor = o_minor1 else: + assert self.instance.primary_node == o_node2, "Three-node instance?" p_minor = o_minor2 new_alone_id = (self.instance.primary_node, self.new_node, None, @@ -7016,19 +7122,31 @@ class TLReplaceDisks(Tasklet): to_node, msg, hint=("please do a gnt-instance info to see the" " status of disks")) + cstep = 5 + if self.early_release: + self.lu.LogStep(cstep, steps_total, "Removing old storage") + cstep += 1 + self._RemoveOldStorage(self.target_node, iv_names) + # WARNING: we release all node locks here, do not do other RPCs + # than WaitForSync to the primary node + self._ReleaseNodeLock([self.instance.primary_node, + self.target_node, + self.new_node]) # Wait for sync # This can fail as the old devices are degraded and _WaitForSync # does a combined result over all disks, so we don't check its return value - self.lu.LogStep(5, steps_total, "Sync devices") - _WaitForSync(self.lu, self.instance, unlock=True) + self.lu.LogStep(cstep, steps_total, "Sync devices") + cstep += 1 + _WaitForSync(self.lu, self.instance) # Check all devices manually self._CheckDevices(self.instance.primary_node, iv_names) # Step: remove old storage - self.lu.LogStep(6, steps_total, "Removing old storage") - self._RemoveOldStorage(self.target_node, iv_names) + if not self.early_release: + self.lu.LogStep(cstep, steps_total, "Removing old storage") + self._RemoveOldStorage(self.target_node, iv_names) class LURepairNodeStorage(NoHooksLU): @@ -7039,12 +7157,7 @@ class LURepairNodeStorage(NoHooksLU): REQ_BGL = False def CheckArguments(self): - node_name = self.cfg.ExpandNodeName(self.op.node_name) - if node_name is None: - raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name, - errors.ECODE_NOENT) - - self.op.node_name = node_name + self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) def ExpandNames(self): self.needed_locks = { @@ -7128,10 +7241,7 @@ class LUGrowDisk(LogicalUnit): "AMOUNT": self.op.amount, } env.update(_BuildInstanceHookEnvByObject(self, self.instance)) - nl = [ - self.cfg.GetMasterNode(), - self.instance.primary_node, - ] + nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) return env, nl, nl def CheckPrereq(self): @@ -7181,6 +7291,14 @@ class LUGrowDisk(LogicalUnit): self.cfg.SetDiskID(disk, node) result = self.rpc.call_blockdev_grow(node, disk, self.op.amount) result.Raise("Grow request failed to node %s" % node) + + # TODO: Rewrite code to work properly + # DRBD goes into sync mode for a short amount of time after executing the + # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby + # calling "resize" in sync mode fails. Sleeping for a short amount of + # time is a work-around. + time.sleep(5) + disk.RecordGrow(self.op.amount) self.cfg.Update(instance, feedback_fn) if self.op.wait_for_sync: @@ -7208,10 +7326,7 @@ class LUQueryInstanceData(NoHooksLU): if self.op.instances: self.wanted_names = [] for name in self.op.instances: - full_name = self.cfg.ExpandInstanceName(name) - if full_name is None: - raise errors.OpPrereqError("Instance '%s' not known" % name, - errors.ECODE_NOENT) + full_name = _ExpandInstanceName(self.cfg, name) self.wanted_names.append(full_name) self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names else: @@ -7402,7 +7517,7 @@ class LUSetInstanceParams(LogicalUnit): errors.ECODE_INVAL) try: size = int(size) - except ValueError, err: + except (TypeError, ValueError), err: raise errors.OpPrereqError("Invalid disk size parameter: %s" % str(err), errors.ECODE_INVAL) disk_dict['size'] = size @@ -7459,9 +7574,8 @@ class LUSetInstanceParams(LogicalUnit): if 'mac' in nic_dict: nic_mac = nic_dict['mac'] if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): - if not utils.IsValidMac(nic_mac): - raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac, - errors.ECODE_INVAL) + nic_mac = utils.NormalizeAndValidateMac(nic_mac) + if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO: raise errors.OpPrereqError("'auto' is not a valid MAC address when" " modifying an existing nic", @@ -7531,7 +7645,8 @@ class LUSetInstanceParams(LogicalUnit): nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) return env, nl, nl - def _GetUpdatedParams(self, old_params, update_dict, + @staticmethod + def _GetUpdatedParams(old_params, update_dict, default_values, parameter_types): """Return the new params dict for the given params. @@ -7671,10 +7786,14 @@ class LUSetInstanceParams(LogicalUnit): continue if nic_op != constants.DDM_ADD: # an existing nic + if not instance.nics: + raise errors.OpPrereqError("Invalid NIC index %s, instance has" + " no NICs" % nic_op, + errors.ECODE_INVAL) if nic_op < 0 or nic_op >= len(instance.nics): raise errors.OpPrereqError("Invalid NIC index %s, valid values" " are 0 to %d" % - (nic_op, len(instance.nics)), + (nic_op, len(instance.nics) - 1), errors.ECODE_INVAL) old_nic_params = instance.nics[nic_op].nicparams old_nic_ip = instance.nics[nic_op].ip @@ -7737,7 +7856,7 @@ class LUSetInstanceParams(LogicalUnit): raise errors.OpPrereqError("Disk operations not supported for" " diskless instances", errors.ECODE_INVAL) - for disk_op, disk_dict in self.op.disks: + for disk_op, _ in self.op.disks: if disk_op == constants.DDM_REMOVE: if len(instance.disks) == 1: raise errors.OpPrereqError("Cannot remove the last disk of" @@ -7781,7 +7900,6 @@ class LUSetInstanceParams(LogicalUnit): result = [] instance = self.instance - cluster = self.cluster # disk changes for disk_op, disk_dict in self.op.disks: if disk_op == constants.DDM_REMOVE: @@ -7980,13 +8098,10 @@ class LUExportInstance(LogicalUnit): "Cannot retrieve locked instance %s" % self.op.instance_name _CheckNodeOnline(self, self.instance.primary_node) - self.dst_node = self.cfg.GetNodeInfo( - self.cfg.ExpandNodeName(self.op.target_node)) + self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node) + self.dst_node = self.cfg.GetNodeInfo(self.op.target_node) + assert self.dst_node is not None - if self.dst_node is None: - # This is wrong node name, not a non-locked node - raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node, - errors.ECODE_NOENT) _CheckNodeOnline(self, self.dst_node.name) _CheckNodeNotDrained(self, self.dst_node.name) @@ -8067,8 +8182,10 @@ class LUExportInstance(LogicalUnit): feedback_fn("Exporting snapshot %s from %s to %s" % (idx, src_node, dst_node.name)) if dev: + # FIXME: pass debug from opcode to backend result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name, - instance, cluster_name, idx) + instance, cluster_name, + idx, self.op.debug_level) msg = result.fail_msg if msg: self.LogWarning("Could not export disk/%s from node %s to" @@ -8172,7 +8289,7 @@ class LURemoveExport(NoHooksLU): " Domain Name.") -class TagsLU(NoHooksLU): +class TagsLU(NoHooksLU): # pylint: disable-msg=W0223 """Generic tags LU. This is an abstract class which is the parent of all the other tags LUs. @@ -8182,19 +8299,11 @@ class TagsLU(NoHooksLU): def ExpandNames(self): self.needed_locks = {} if self.op.kind == constants.TAG_NODE: - name = self.cfg.ExpandNodeName(self.op.name) - if name is None: - raise errors.OpPrereqError("Invalid node name (%s)" % - (self.op.name,), errors.ECODE_NOENT) - self.op.name = name - self.needed_locks[locking.LEVEL_NODE] = name + self.op.name = _ExpandNodeName(self.cfg, self.op.name) + self.needed_locks[locking.LEVEL_NODE] = self.op.name elif self.op.kind == constants.TAG_INSTANCE: - name = self.cfg.ExpandInstanceName(self.op.name) - if name is None: - raise errors.OpPrereqError("Invalid instance name (%s)" % - (self.op.name,), errors.ECODE_NOENT) - self.op.name = name - self.needed_locks[locking.LEVEL_INSTANCE] = name + self.op.name = _ExpandInstanceName(self.cfg, self.op.name) + self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name def CheckPrereq(self): """Check prerequisites. @@ -8383,6 +8492,8 @@ class IAllocator(object): easy usage """ + # pylint: disable-msg=R0902 + # lots of instance attributes _ALLO_KEYS = [ "mem_size", "disks", "disk_template", "os", "tags", "nics", "vcpus", "hypervisor", @@ -8726,10 +8837,7 @@ class LUTestAllocator(NoHooksLU): if not hasattr(self.op, "name"): raise errors.OpPrereqError("Missing attribute 'name' on opcode input", errors.ECODE_INVAL) - fname = self.cfg.ExpandInstanceName(self.op.name) - if fname is None: - raise errors.OpPrereqError("Instance '%s' not found for relocation" % - self.op.name, errors.ECODE_NOENT) + fname = _ExpandInstanceName(self.cfg, self.op.name) self.op.name = fname self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes else: