X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/a8173e827b17d6cdd699d99a7b5053524d84175a..e26096bda064e01472774ddfb17ea0f6a20d54e2:/lib/cmdlib.py diff --git a/lib/cmdlib.py b/lib/cmdlib.py index 4fd22d1..d951208 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -453,11 +453,11 @@ class _QueryBase: #: Attribute holding field definitions FIELDS = None - def __init__(self, names, fields, use_locking): + def __init__(self, filter_, fields, use_locking): """Initializes this class. """ - self.names = names + self.names = ReadSimpleFilter("name", filter_) self.use_locking = use_locking self.query = query.Query(self.FIELDS, fields) @@ -872,7 +872,7 @@ def _NICListToTuple(lu, nics): """Build a list of nic information tuples. This list is suitable to be passed to _BuildInstanceHookEnv or as a return - value in LUQueryInstanceData. + value in LUInstanceQueryData. @type lu: L{LogicalUnit} @param lu: the logical unit on whose behalf we execute @@ -1254,8 +1254,8 @@ class LUClusterVerify(LogicalUnit): @ivar instances: a list of running instances (runtime) @ivar pinst: list of configured primary instances (config) @ivar sinst: list of configured secondary instances (config) - @ivar sbp: diction of {secondary-node: list of instances} of all peers - of this node (config) + @ivar sbp: dictionary of {primary-node: list of instances} for all + instances for which this node is secondary (config) @ivar mfree: free memory, as reported by hypervisor (runtime) @ivar dfree: free disk, as reported by the node (runtime) @ivar offline: the offline status (config) @@ -1397,6 +1397,13 @@ class LUClusterVerify(LogicalUnit): _ErrorIf(test, self.ENODEHV, node, "hypervisor %s verify failure: '%s'", hv_name, hv_result) + hvp_result = nresult.get(constants.NV_HVPARAMS, None) + if ninfo.vm_capable and isinstance(hvp_result, list): + for item, hv_name, hv_result in hvp_result: + _ErrorIf(True, self.ENODEHV, node, + "hypervisor %s parameter verify failure (source %s): %s", + hv_name, item, hv_result) + test = nresult.get(constants.NV_NODESETUP, ["Missing NODESETUP results"]) _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s", @@ -1547,7 +1554,7 @@ class LUClusterVerify(LogicalUnit): node_current) for node, n_img in node_image.items(): - if (not node == node_current): + if node != node_current: test = instance in n_img.instances _ErrorIf(test, self.EINSTANCEWRONGNODE, instance, "instance should not run on node %s", node) @@ -1557,7 +1564,11 @@ class LUClusterVerify(LogicalUnit): for idx, (success, status) in enumerate(disks)] for nname, success, bdev_status, idx in diskdata: - _ErrorIf(instanceconfig.admin_up and not success, + # the 'ghost node' construction in Exec() ensures that we have a + # node here + snode = node_image[nname] + bad_snode = snode.ghost or snode.offline + _ErrorIf(instanceconfig.admin_up and not success and not bad_snode, self.EINSTANCEFAULTYDISK, instance, "couldn't retrieve status for disk/%s on %s: %s", idx, nname, bdev_status) @@ -1606,6 +1617,7 @@ class LUClusterVerify(LogicalUnit): instances it was primary for. """ + cluster_info = self.cfg.GetClusterInfo() for node, n_img in node_image.items(): # This code checks that every node which is now listed as # secondary has enough memory to host all instances it is @@ -1615,10 +1627,16 @@ class LUClusterVerify(LogicalUnit): # WARNING: we currently take into account down instances as well # as up ones, considering that even if they're down someone # might want to start them even in the event of a node failure. + if n_img.offline: + # we're skipping offline nodes from the N+1 warning, since + # most likely we don't have good memory infromation from them; + # we already list instances living on such nodes, and that's + # enough warning + continue for prinode, instances in n_img.sbp.items(): needed_mem = 0 for instance in instances: - bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance]) + bep = cluster_info.FillBE(instance_cfg[instance]) if bep[constants.BE_AUTO_BALANCE]: needed_mem += bep[constants.BE_MEMORY] test = n_img.mfree < needed_mem @@ -2029,6 +2047,21 @@ class LUClusterVerify(LogicalUnit): return instdisk + def _VerifyHVP(self, hvp_data): + """Verifies locally the syntax of the hypervisor parameters. + + """ + for item, hv_name, hv_params in hvp_data: + msg = ("hypervisor %s parameters syntax check (source %s): %%s" % + (item, hv_name)) + try: + hv_class = hypervisor.GetHypervisor(hv_name) + utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) + hv_class.CheckParameterSyntax(hv_params) + except errors.GenericError, err: + self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err)) + + def BuildHooksEnv(self): """Build hooks env. @@ -2094,12 +2127,32 @@ class LUClusterVerify(LogicalUnit): local_checksums = utils.FingerprintFiles(file_names) + # Compute the set of hypervisor parameters + hvp_data = [] + for hv_name in hypervisors: + hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name))) + for os_name, os_hvp in cluster.os_hvp.items(): + for hv_name, hv_params in os_hvp.items(): + if not hv_params: + continue + full_params = cluster.GetHVDefaults(hv_name, os_name=os_name) + hvp_data.append(("os %s" % os_name, hv_name, full_params)) + # TODO: collapse identical parameter values in a single one + for instance in instanceinfo.values(): + if not instance.hvparams: + continue + hvp_data.append(("instance %s" % instance.name, instance.hypervisor, + cluster.FillHV(instance))) + # and verify them locally + self._VerifyHVP(hvp_data) + feedback_fn("* Gathering data (%d nodes)" % len(nodelist)) node_verify_param = { constants.NV_FILELIST: file_names, constants.NV_NODELIST: [node.name for node in nodeinfo if not node.offline], constants.NV_HYPERVISOR: hypervisors, + constants.NV_HVPARAMS: hvp_data, constants.NV_NODENETTEST: [(node.name, node.primary_ip, node.secondary_ip) for node in nodeinfo if not node.offline], @@ -2248,8 +2301,8 @@ class LUClusterVerify(LogicalUnit): self.ENODERPC, pnode, "instance %s, connection to" " primary node failed", instance) - if pnode_img.offline: - inst_nodes_offline.append(pnode) + _ErrorIf(pnode_img.offline, self.EINSTANCEBADNODE, instance, + "instance lives on offline node %s", inst_config.primary_node) # If the instance is non-redundant we cannot survive losing its primary # node, so we are not N+1 compliant. On the other hand we have no disk @@ -2298,7 +2351,7 @@ class LUClusterVerify(LogicalUnit): # warn that the instance lives on offline nodes _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance, - "instance lives on offline node(s) %s", + "instance has offline secondary node(s) %s", utils.CommaJoin(inst_nodes_offline)) # ... or ghost/non-vm_capable nodes for node in inst_config.all_nodes: @@ -2405,15 +2458,13 @@ class LUClusterVerifyDisks(NoHooksLU): """ result = res_nodes, res_instances, res_missing = {}, [], {} - nodes = utils.NiceSort(self.cfg.GetNodeList()) - instances = [self.cfg.GetInstanceInfo(name) - for name in self.cfg.GetInstanceList()] + nodes = utils.NiceSort(self.cfg.GetVmCapableNodeList()) + instances = self.cfg.GetAllInstancesInfo().values() nv_dict = {} for inst in instances: inst_lvs = {} - if (not inst.admin_up or - inst.disk_template not in constants.DTS_NET_MIRROR): + if not inst.admin_up: continue inst.MapLVsByNode(inst_lvs) # transform { iname: {node: [vol,],},} to {(node, vol): iname} @@ -2424,13 +2475,8 @@ class LUClusterVerifyDisks(NoHooksLU): if not nv_dict: return result - vg_names = self.rpc.call_vg_list(nodes) - vg_names.Raise("Cannot get list of VGs") - - for node in nodes: - # node_volume - node_res = self.rpc.call_lv_list([node], - vg_names[node].payload.keys())[node] + node_lvs = self.rpc.call_lv_list(nodes, []) + for node, node_res in node_lvs.items(): if node_res.offline: continue msg = node_res.fail_msg @@ -2539,16 +2585,18 @@ class LUClusterRepairDiskSizes(NoHooksLU): newl = [v[2].Copy() for v in dskl] for dsk in newl: self.cfg.SetDiskID(dsk, node) - result = self.rpc.call_blockdev_getsizes(node, newl) + result = self.rpc.call_blockdev_getsize(node, newl) if result.fail_msg: - self.LogWarning("Failure in blockdev_getsizes call to node" + self.LogWarning("Failure in blockdev_getsize call to node" " %s, ignoring", node) continue - if len(result.data) != len(dskl): + if len(result.payload) != len(dskl): + logging.warning("Invalid result from node %s: len(dksl)=%d," + " result.payload=%s", node, len(dskl), result.payload) self.LogWarning("Invalid result from node %s, ignoring node results", node) continue - for ((instance, idx, disk), size) in zip(dskl, result.data): + for ((instance, idx, disk), size) in zip(dskl, result.payload): if size is None: self.LogWarning("Disk %d of instance %s did not return size" " information, ignoring", idx, instance.name) @@ -3180,6 +3228,7 @@ class LUOobCommand(NoHooksLU): """ REG_BGL = False + _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE) def CheckPrereq(self): """Check prerequisites. @@ -3191,72 +3240,127 @@ class LUOobCommand(NoHooksLU): Any errors are signaled by raising errors.OpPrereqError. """ - self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) - node = self.cfg.GetNodeInfo(self.op.node_name) + self.nodes = [] + self.master_node = self.cfg.GetMasterNode() + + if self.op.node_names: + if self.op.command in self._SKIP_MASTER: + if self.master_node in self.op.node_names: + master_node_obj = self.cfg.GetNodeInfo(self.master_node) + master_oob_handler = _SupportsOob(self.cfg, master_node_obj) + + if master_oob_handler: + additional_text = ("Run '%s %s %s' if you want to operate on the" + " master regardless") % (master_oob_handler, + self.op.command, + self.master_node) + else: + additional_text = "The master node does not support out-of-band" - if node is None: - raise errors.OpPrereqError("Node %s not found" % self.op.node_name) + raise errors.OpPrereqError(("Operating on the master node %s is not" + " allowed for %s\n%s") % + (self.master_node, self.op.command, + additional_text), errors.ECODE_INVAL) + else: + self.op.node_names = self.cfg.GetNodeList() + if self.op.command in self._SKIP_MASTER: + self.op.node_names.remove(self.master_node) - self.oob_program = _SupportsOob(self.cfg, node) + if self.op.command in self._SKIP_MASTER: + assert self.master_node not in self.op.node_names - if not self.oob_program: - raise errors.OpPrereqError("OOB is not supported for node %s" % - self.op.node_name) + for node_name in self.op.node_names: + node = self.cfg.GetNodeInfo(node_name) - if self.op.command == constants.OOB_POWER_OFF and not node.offline: - raise errors.OpPrereqError(("Cannot power off node %s because it is" - " not marked offline") % self.op.node_name) + if node is None: + raise errors.OpPrereqError("Node %s not found" % node_name, + errors.ECODE_NOENT) + else: + self.nodes.append(node) - self.node = node + if (not self.op.ignore_status and + (self.op.command == constants.OOB_POWER_OFF and not node.offline)): + raise errors.OpPrereqError(("Cannot power off node %s because it is" + " not marked offline") % node_name, + errors.ECODE_STATE) def ExpandNames(self): """Gather locks we need. """ - node_name = _ExpandNodeName(self.cfg, self.op.node_name) + if self.op.node_names: + self.op.node_names = [_ExpandNodeName(self.cfg, name) + for name in self.op.node_names] + lock_names = self.op.node_names + else: + lock_names = locking.ALL_SET + self.needed_locks = { - locking.LEVEL_NODE: [node_name], + locking.LEVEL_NODE: lock_names, } def Exec(self, feedback_fn): """Execute OOB and return result if we expect any. """ - master_node = self.cfg.GetMasterNode() - node = self.node + master_node = self.master_node + ret = [] - logging.info("Executing out-of-band command '%s' using '%s' on %s", - self.op.command, self.oob_program, self.op.node_name) - result = self.rpc.call_run_oob(master_node, self.oob_program, - self.op.command, self.op.node_name, - self.op.timeout) + for node in self.nodes: + node_entry = [(constants.RS_NORMAL, node.name)] + ret.append(node_entry) - result.Raise("An error occurred on execution of OOB helper") + oob_program = _SupportsOob(self.cfg, node) - self._CheckPayload(result) - - if self.op.command == constants.OOB_HEALTH: - # For health we should log important events - for item, status in result.payload: - if status in [constants.OOB_STATUS_WARNING, - constants.OOB_STATUS_CRITICAL]: - logging.warning("On node '%s' item '%s' has status '%s'", - self.op.node_name, item, status) - - if self.op.command == constants.OOB_POWER_ON: - node.powered = True - elif self.op.command == constants.OOB_POWER_OFF: - node.powered = False - elif self.op.command == constants.OOB_POWER_STATUS: - powered = result.payload[constants.OOB_POWER_STATUS_POWERED] - if powered != self.node.powered: - logging.warning(("Recorded power state (%s) of node '%s' does not match" - " actual power state (%s)"), node.powered, - self.op.node_name, powered) + if not oob_program: + node_entry.append((constants.RS_UNAVAIL, None)) + continue - self.cfg.Update(node, feedback_fn) + logging.info("Executing out-of-band command '%s' using '%s' on %s", + self.op.command, oob_program, node.name) + result = self.rpc.call_run_oob(master_node, oob_program, + self.op.command, node.name, + self.op.timeout) - return result.payload + if result.fail_msg: + self.LogWarning("On node '%s' out-of-band RPC failed with: %s", + node.name, result.fail_msg) + node_entry.append((constants.RS_NODATA, None)) + else: + try: + self._CheckPayload(result) + except errors.OpExecError, err: + self.LogWarning("The payload returned by '%s' is not valid: %s", + node.name, err) + node_entry.append((constants.RS_NODATA, None)) + else: + if self.op.command == constants.OOB_HEALTH: + # For health we should log important events + for item, status in result.payload: + if status in [constants.OOB_STATUS_WARNING, + constants.OOB_STATUS_CRITICAL]: + self.LogWarning("On node '%s' item '%s' has status '%s'", + node.name, item, status) + + if self.op.command == constants.OOB_POWER_ON: + node.powered = True + elif self.op.command == constants.OOB_POWER_OFF: + node.powered = False + elif self.op.command == constants.OOB_POWER_STATUS: + powered = result.payload[constants.OOB_POWER_STATUS_POWERED] + if powered != node.powered: + logging.warning(("Recorded power state (%s) of node '%s' does not" + " match actual power state (%s)"), node.powered, + node.name, powered) + + # For configuration changing commands we should update the node + if self.op.command in (constants.OOB_POWER_ON, + constants.OOB_POWER_OFF): + self.cfg.Update(node, feedback_fn) + + node_entry.append((constants.RS_NORMAL, result.payload)) + + return ret def _CheckPayload(self, result): """Checks if the payload is valid. @@ -3270,10 +3374,11 @@ class LUOobCommand(NoHooksLU): if not isinstance(result.payload, list): errs.append("command 'health' is expected to return a list but got %s" % type(result.payload)) - for item, status in result.payload: - if status not in constants.OOB_STATUSES: - errs.append("health item '%s' has invalid status '%s'" % - (item, status)) + else: + for item, status in result.payload: + if status not in constants.OOB_STATUSES: + errs.append("health item '%s' has invalid status '%s'" % + (item, status)) if self.op.command == constants.OOB_POWER_STATUS: if not isinstance(result.payload, dict): @@ -3295,7 +3400,7 @@ class LUOobCommand(NoHooksLU): -class LUDiagnoseOS(NoHooksLU): +class LUOsDiagnose(NoHooksLU): """Logical unit for OS diagnose/query. """ @@ -3368,7 +3473,9 @@ class LUDiagnoseOS(NoHooksLU): """Compute the list of OSes. """ - valid_nodes = [node for node in self.cfg.GetOnlineNodeList()] + valid_nodes = [node.name + for node in self.cfg.GetAllNodesInfo().values() + if not node.offline and node.vm_capable] node_data = self.rpc.call_os_diagnose(valid_nodes) pol = self._DiagnoseByOS(node_data) output = [] @@ -3429,7 +3536,7 @@ class LUDiagnoseOS(NoHooksLU): return output -class LURemoveNode(LogicalUnit): +class LUNodeRemove(LogicalUnit): """Logical unit for removing a node. """ @@ -3557,7 +3664,10 @@ class _NodeQuery(_QueryBase): # Gather data as requested if query.NQ_LIVE in self.requested_data: - node_data = lu.rpc.call_node_info(nodenames, lu.cfg.GetVGName(), + # filter out non-vm_capable nodes + toquery_nodes = [name for name in nodenames if all_info[name].vm_capable] + + node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(), lu.cfg.GetHypervisorType()) live_data = dict((name, nresult.payload) for (name, nresult) in node_data.items() @@ -3598,7 +3708,7 @@ class _NodeQuery(_QueryBase): oob_support, lu.cfg.GetClusterInfo()) -class LUQueryNodes(NoHooksLU): +class LUNodeQuery(NoHooksLU): """Logical unit for querying nodes. """ @@ -3606,8 +3716,8 @@ class LUQueryNodes(NoHooksLU): REQ_BGL = False def CheckArguments(self): - self.nq = _NodeQuery(self.op.names, self.op.output_fields, - self.op.use_locking) + self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names), + self.op.output_fields, self.op.use_locking) def ExpandNames(self): self.nq.ExpandNames(self) @@ -3616,7 +3726,7 @@ class LUQueryNodes(NoHooksLU): return self.nq.OldStyleQuery(self) -class LUQueryNodeVolumes(NoHooksLU): +class LUNodeQueryvols(NoHooksLU): """Logical unit for getting volumes on node(s). """ @@ -3694,7 +3804,7 @@ class LUQueryNodeVolumes(NoHooksLU): return output -class LUQueryNodeStorage(NoHooksLU): +class LUNodeQueryStorage(NoHooksLU): """Logical unit for getting information on storage units on node(s). """ @@ -3805,18 +3915,21 @@ class _InstanceQuery(_QueryBase): """Computes the list of instances and their attributes. """ + cluster = lu.cfg.GetClusterInfo() all_info = lu.cfg.GetAllInstancesInfo() instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE) instance_list = [all_info[name] for name in instance_names] - nodes = frozenset([inst.primary_node for inst in instance_list]) + nodes = frozenset(itertools.chain(*(inst.all_nodes + for inst in instance_list))) hv_list = list(set([inst.hypervisor for inst in instance_list])) bad_nodes = [] offline_nodes = [] + wrongnode_inst = set() # Gather data as requested - if query.IQ_LIVE in self.requested_data: + if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]): live_data = {} node_data = lu.rpc.call_all_instances_info(nodes, hv_list) for name in nodes: @@ -3828,7 +3941,11 @@ class _InstanceQuery(_QueryBase): if result.fail_msg: bad_nodes.append(name) elif result.payload: - live_data.update(result.payload) + for inst in result.payload: + if all_info[inst].primary_node == name: + live_data.update(result.payload) + else: + wrongnode_inst.add(inst) # else no instance is alive else: live_data = {} @@ -3842,9 +3959,21 @@ class _InstanceQuery(_QueryBase): else: disk_usage = None + if query.IQ_CONSOLE in self.requested_data: + consinfo = {} + for inst in instance_list: + if inst.name in live_data: + # Instance is running + consinfo[inst.name] = _GetInstanceConsole(cluster, inst) + else: + consinfo[inst.name] = None + assert set(consinfo.keys()) == set(instance_names) + else: + consinfo = None + return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(), disk_usage, offline_nodes, bad_nodes, - live_data) + live_data, wrongnode_inst, consinfo) class LUQuery(NoHooksLU): @@ -3856,9 +3985,8 @@ class LUQuery(NoHooksLU): def CheckArguments(self): qcls = _GetQueryImplementation(self.op.what) - names = qlang.ReadSimpleFilter("name", self.op.filter) - self.impl = qcls(names, self.op.fields, False) + self.impl = qcls(self.op.filter, self.op.fields, False) def ExpandNames(self): self.impl.ExpandNames(self) @@ -3887,7 +4015,7 @@ class LUQueryFields(NoHooksLU): return self.qcls.FieldsQuery(self.op.fields) -class LUModifyNodeStorage(NoHooksLU): +class LUNodeModifyStorage(NoHooksLU): """Logical unit for modifying a storage volume on a node. """ @@ -3929,7 +4057,7 @@ class LUModifyNodeStorage(NoHooksLU): (self.op.name, self.op.node_name)) -class LUAddNode(LogicalUnit): +class LUNodeAdd(LogicalUnit): """Logical unit for adding node to the cluster. """ @@ -4191,7 +4319,7 @@ class LUAddNode(LogicalUnit): self.context.AddNode(new_node, self.proc.GetECId()) -class LUSetNodeParams(LogicalUnit): +class LUNodeSetParams(LogicalUnit): """Modifies the parameters of a node. @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline) @@ -4318,15 +4446,15 @@ class LUSetNodeParams(LogicalUnit): errors.ECODE_STATE) if node.master_candidate and self.might_demote and not self.lock_all: - assert not self.op.auto_promote, "auto-promote set but lock_all not" + assert not self.op.auto_promote, "auto_promote set but lock_all not" # check if after removing the current node, we're missing master # candidates (mc_remaining, mc_should, _) = \ self.cfg.GetMasterCandidateStats(exceptions=[node.name]) if mc_remaining < mc_should: raise errors.OpPrereqError("Not enough master candidates, please" - " pass auto_promote to allow promotion", - errors.ECODE_STATE) + " pass auto promote option to allow" + " promotion", errors.ECODE_STATE) self.old_flags = old_flags = (node.master_candidate, node.drained, node.offline) @@ -4484,7 +4612,7 @@ class LUSetNodeParams(LogicalUnit): return result -class LUPowercycleNode(NoHooksLU): +class LUNodePowercycle(NoHooksLU): """Powercycles a node. """ @@ -4577,6 +4705,8 @@ class LUClusterQuery(NoHooksLU): "reserved_lvs": cluster.reserved_lvs, "primary_ip_version": primary_ip_version, "prealloc_wipe_disks": cluster.prealloc_wipe_disks, + "hidden_os": cluster.hidden_os, + "blacklisted_os": cluster.blacklisted_os, } return result @@ -4621,7 +4751,7 @@ class LUClusterConfigQuery(NoHooksLU): return values -class LUActivateInstanceDisks(NoHooksLU): +class LUInstanceActivateDisks(NoHooksLU): """Bring up an instance's disks. """ @@ -4699,13 +4829,13 @@ def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False, # SyncSource, etc.) # 1st pass, assemble on all nodes in secondary mode - for inst_disk in disks: + for idx, inst_disk in enumerate(disks): for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): if ignore_size: node_disk = node_disk.Copy() node_disk.UnsetSize() lu.cfg.SetDiskID(node_disk, node) - result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False) + result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx) msg = result.fail_msg if msg: lu.proc.LogWarning("Could not prepare block device %s on node %s" @@ -4717,7 +4847,7 @@ def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False, # FIXME: race condition on drbd migration to primary # 2nd pass, do only the primary node - for inst_disk in disks: + for idx, inst_disk in enumerate(disks): dev_path = None for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): @@ -4727,7 +4857,7 @@ def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False, node_disk = node_disk.Copy() node_disk.UnsetSize() lu.cfg.SetDiskID(node_disk, node) - result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True) + result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx) msg = result.fail_msg if msg: lu.proc.LogWarning("Could not prepare block device %s on node %s" @@ -4763,7 +4893,7 @@ def _StartInstanceDisks(lu, instance, force): raise errors.OpExecError("Disk consistency error") -class LUDeactivateInstanceDisks(NoHooksLU): +class LUInstanceDeactivateDisks(NoHooksLU): """Shutdown an instance's disks. """ @@ -4793,7 +4923,10 @@ class LUDeactivateInstanceDisks(NoHooksLU): """ instance = self.instance - _SafeShutdownInstanceDisks(self, instance) + if self.op.force: + _ShutdownInstanceDisks(self, instance) + else: + _SafeShutdownInstanceDisks(self, instance) def _SafeShutdownInstanceDisks(lu, instance, disks=None): @@ -4948,7 +5081,7 @@ def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested): errors.ECODE_NORES) -class LUStartupInstance(LogicalUnit): +class LUInstanceStartup(LogicalUnit): """Starts an instance. """ @@ -5049,7 +5182,7 @@ class LUStartupInstance(LogicalUnit): raise errors.OpExecError("Could not start instance: %s" % msg) -class LURebootInstance(LogicalUnit): +class LUInstanceReboot(LogicalUnit): """Reboot an instance. """ @@ -5124,7 +5257,7 @@ class LURebootInstance(LogicalUnit): self.cfg.MarkInstanceUp(instance.name) -class LUShutdownInstance(LogicalUnit): +class LUInstanceShutdown(LogicalUnit): """Shutdown an instance. """ @@ -5186,7 +5319,7 @@ class LUShutdownInstance(LogicalUnit): _ShutdownInstanceDisks(self, instance) -class LUReinstallInstance(LogicalUnit): +class LUInstanceReinstall(LogicalUnit): """Reinstall an instance. """ @@ -5272,7 +5405,7 @@ class LUReinstallInstance(LogicalUnit): _ShutdownInstanceDisks(self, inst) -class LURecreateInstanceDisks(LogicalUnit): +class LUInstanceRecreateDisks(LogicalUnit): """Recreate an instance's missing disks. """ @@ -5332,7 +5465,7 @@ class LURecreateInstanceDisks(LogicalUnit): _CreateDisks(self, self.instance, to_skip=to_skip) -class LURenameInstance(LogicalUnit): +class LUInstanceRename(LogicalUnit): """Rename an instance. """ @@ -5437,7 +5570,7 @@ class LURenameInstance(LogicalUnit): return inst.name -class LURemoveInstance(LogicalUnit): +class LUInstanceRemove(LogicalUnit): """Remove an instance. """ @@ -5520,7 +5653,7 @@ def _RemoveInstance(lu, feedback_fn, instance, ignore_failures): lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name -class LUQueryInstances(NoHooksLU): +class LUInstanceQuery(NoHooksLU): """Logical unit for querying instances. """ @@ -5528,8 +5661,8 @@ class LUQueryInstances(NoHooksLU): REQ_BGL = False def CheckArguments(self): - self.iq = _InstanceQuery(self.op.names, self.op.output_fields, - self.op.use_locking) + self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names), + self.op.output_fields, self.op.use_locking) def ExpandNames(self): self.iq.ExpandNames(self) @@ -5541,7 +5674,7 @@ class LUQueryInstances(NoHooksLU): return self.iq.OldStyleQuery(self) -class LUFailoverInstance(LogicalUnit): +class LUInstanceFailover(LogicalUnit): """Failover an instance. """ @@ -5688,7 +5821,7 @@ class LUFailoverInstance(LogicalUnit): (instance.name, target_node, msg)) -class LUMigrateInstance(LogicalUnit): +class LUInstanceMigrate(LogicalUnit): """Migrate an instance. This is migration without shutting down, compared to the failover, @@ -5737,7 +5870,7 @@ class LUMigrateInstance(LogicalUnit): return env, nl, nl_post -class LUMoveInstance(LogicalUnit): +class LUInstanceMove(LogicalUnit): """Move an instance by data-copying. """ @@ -5862,7 +5995,7 @@ class LUMoveInstance(LogicalUnit): for idx, disk in enumerate(instance.disks): self.LogInfo("Copying data for disk %d", idx) result = self.rpc.call_blockdev_assemble(target_node, disk, - instance.name, True) + instance.name, True, idx) if result.fail_msg: self.LogWarning("Can't assemble newly created disk %d: %s", idx, result.fail_msg) @@ -5912,7 +6045,7 @@ class LUMoveInstance(LogicalUnit): (instance.name, target_node, msg)) -class LUMigrateNode(LogicalUnit): +class LUNodeMigrate(LogicalUnit): """Migrate all instances from a node. """ @@ -6608,7 +6741,7 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None): " node %s" % (file_storage_dir, pnode)) # Note: this needs to be kept in sync with adding of disks in - # LUSetInstanceParams + # LUInstanceSetParams for idx, device in enumerate(instance.disks): if to_skip and idx in to_skip: continue @@ -6719,6 +6852,21 @@ def _ComputeDiskSize(disk_template, disks): return req_size_dict[disk_template] +def _FilterVmNodes(lu, nodenames): + """Filters out non-vm_capable nodes from a list. + + @type lu: L{LogicalUnit} + @param lu: the logical unit for which we check + @type nodenames: list + @param nodenames: the list of nodes on which we should check + @rtype: list + @return: the list of vm-capable nodes + + """ + vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList()) + return [name for name in nodenames if name not in vm_nodes] + + def _CheckHVParams(lu, nodenames, hvname, hvparams): """Hypervisor parameter validation. @@ -6736,6 +6884,7 @@ def _CheckHVParams(lu, nodenames, hvname, hvparams): @raise errors.OpPrereqError: if the parameters are not valid """ + nodenames = _FilterVmNodes(lu, nodenames) hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames, hvname, hvparams) @@ -6763,6 +6912,7 @@ def _CheckOSParams(lu, required, nodenames, osname, osparams): @raise errors.OpPrereqError: if the parameters are not valid """ + nodenames = _FilterVmNodes(lu, nodenames) result = lu.rpc.call_os_validate(required, nodenames, osname, [constants.OS_VALIDATE_PARAMETERS], osparams) @@ -6775,7 +6925,7 @@ def _CheckOSParams(lu, required, nodenames, osname, osparams): osname, node) -class LUCreateInstance(LogicalUnit): +class LUInstanceCreate(LogicalUnit): """Create an instance. """ @@ -7444,12 +7594,11 @@ class LUCreateInstance(LogicalUnit): raise errors.OpPrereqError("LV named %s used by another instance" % lv_name, errors.ECODE_NOTUNIQUE) - vg_names = self.rpc.call_vg_list([pnode.name]) + vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name] vg_names.Raise("Cannot get VG information from node %s" % pnode.name) node_lvs = self.rpc.call_lv_list([pnode.name], - vg_names[pnode.name].payload.keys() - )[pnode.name] + vg_names.payload.keys())[pnode.name] node_lvs.Raise("Cannot get LV information from node %s" % pnode.name) node_lvs = node_lvs.payload @@ -7686,7 +7835,7 @@ class LUCreateInstance(LogicalUnit): return list(iobj.all_nodes) -class LUConnectConsole(NoHooksLU): +class LUInstanceConsole(NoHooksLU): """Connect to an instance's console. This is somewhat special in that it returns the command line that @@ -7731,21 +7880,31 @@ class LUConnectConsole(NoHooksLU): logging.debug("Connecting to console of %s on %s", instance.name, node) - hyper = hypervisor.GetHypervisor(instance.hypervisor) - cluster = self.cfg.GetClusterInfo() - # beparams and hvparams are passed separately, to avoid editing the - # instance and then saving the defaults in the instance itself. - hvparams = cluster.FillHV(instance) - beparams = cluster.FillBE(instance) - console = hyper.GetInstanceConsole(instance, hvparams, beparams) + return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance) + + +def _GetInstanceConsole(cluster, instance): + """Returns console information for an instance. + + @type cluster: L{objects.Cluster} + @type instance: L{objects.Instance} + @rtype: dict + + """ + hyper = hypervisor.GetHypervisor(instance.hypervisor) + # beparams and hvparams are passed separately, to avoid editing the + # instance and then saving the defaults in the instance itself. + hvparams = cluster.FillHV(instance) + beparams = cluster.FillBE(instance) + console = hyper.GetInstanceConsole(instance, hvparams, beparams) - assert console.instance == instance.name - assert console.Validate() + assert console.instance == instance.name + assert console.Validate() - return console.ToDict() + return console.ToDict() -class LUReplaceDisks(LogicalUnit): +class LUInstanceReplaceDisks(LogicalUnit): """Replace the disks of an instance. """ @@ -8545,7 +8704,7 @@ class LURepairNodeStorage(NoHooksLU): (self.op.name, self.op.node_name)) -class LUNodeEvacuationStrategy(NoHooksLU): +class LUNodeEvacStrategy(NoHooksLU): """Computes the node evacuation strategy. """ @@ -8589,7 +8748,7 @@ class LUNodeEvacuationStrategy(NoHooksLU): return result -class LUGrowDisk(LogicalUnit): +class LUInstanceGrowDisk(LogicalUnit): """Grow a disk of an instance. """ @@ -8685,7 +8844,7 @@ class LUGrowDisk(LogicalUnit): " sync mode was requested.") -class LUQueryInstanceData(NoHooksLU): +class LUInstanceQueryData(NoHooksLU): """Query runtime instance data. """ @@ -8839,7 +8998,7 @@ class LUQueryInstanceData(NoHooksLU): return result -class LUSetInstanceParams(LogicalUnit): +class LUInstanceSetParams(LogicalUnit): """Modifies an instances's parameters. """ @@ -9253,7 +9412,7 @@ class LUSetInstanceParams(LogicalUnit): _CheckInstanceDown(self, instance, "cannot remove disks") if (disk_op == constants.DDM_ADD and - len(instance.nics) >= constants.MAX_DISKS): + len(instance.disks) >= constants.MAX_DISKS): raise errors.OpPrereqError("Instance has too many disks (%d), cannot" " add more" % constants.MAX_DISKS, errors.ECODE_STATE) @@ -10090,7 +10249,6 @@ class LUGroupAssignNodes(NoHooksLU): class _GroupQuery(_QueryBase): - FIELDS = query.GROUP_FIELDS def ExpandNames(self, lu): @@ -10173,7 +10331,8 @@ class LUGroupQuery(NoHooksLU): REQ_BGL = False def CheckArguments(self): - self.gq = _GroupQuery(self.op.names, self.op.output_fields, False) + self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names), + self.op.output_fields, False) def ExpandNames(self): self.gq.ExpandNames(self) @@ -10182,7 +10341,7 @@ class LUGroupQuery(NoHooksLU): return self.gq.OldStyleQuery(self) -class LUSetGroupParams(LogicalUnit): +class LUGroupSetParams(LogicalUnit): """Modifies the parameters of a node group. """ @@ -10286,9 +10445,9 @@ class LUGroupRemove(LogicalUnit): # Verify the cluster would not be left group-less. if len(self.cfg.GetNodeGroupList()) == 1: - raise errors.OpPrereqError("Group '%s' is the last group in the cluster," - " which cannot be left without at least one" - " group" % self.op.group_name, + raise errors.OpPrereqError("Group '%s' is the only group," + " cannot be removed" % + self.op.group_name, errors.ECODE_STATE) def BuildHooksEnv(self): @@ -10321,7 +10480,7 @@ class LUGroupRename(LogicalUnit): def ExpandNames(self): # This raises errors.OpPrereqError on its own: - self.group_uuid = self.cfg.LookupNodeGroup(self.op.old_name) + self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) self.needed_locks = { locking.LEVEL_NODEGROUP: [self.group_uuid], @@ -10330,8 +10489,7 @@ class LUGroupRename(LogicalUnit): def CheckPrereq(self): """Check prerequisites. - This checks that the given old_name exists as a node group, and that - new_name doesn't. + Ensures requested new name is not yet used. """ try: @@ -10349,7 +10507,7 @@ class LUGroupRename(LogicalUnit): """ env = { - "OLD_NAME": self.op.old_name, + "OLD_NAME": self.op.group_name, "NEW_NAME": self.op.new_name, } @@ -10372,7 +10530,7 @@ class LUGroupRename(LogicalUnit): if group is None: raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" % - (self.op.old_name, self.group_uuid)) + (self.op.group_name, self.group_uuid)) group.name = self.op.new_name self.cfg.Update(group, feedback_fn) @@ -10414,7 +10572,7 @@ class TagsLU(NoHooksLU): # pylint: disable-msg=W0223 str(self.op.kind), errors.ECODE_INVAL) -class LUGetTags(TagsLU): +class LUTagsGet(TagsLU): """Returns the tags of a given object. """ @@ -10433,7 +10591,7 @@ class LUGetTags(TagsLU): return list(self.target.GetTags()) -class LUSearchTags(NoHooksLU): +class LUTagsSearch(NoHooksLU): """Searches the tags for a given pattern. """ @@ -10472,7 +10630,7 @@ class LUSearchTags(NoHooksLU): return results -class LUAddTags(TagsLU): +class LUTagsSet(TagsLU): """Sets a tag on a given object. """ @@ -10500,7 +10658,7 @@ class LUAddTags(TagsLU): self.cfg.Update(self.target, feedback_fn) -class LUDelTags(TagsLU): +class LUTagsDel(TagsLU): """Delete a list of tags from a given object. """ @@ -10582,7 +10740,7 @@ class LUTestDelay(NoHooksLU): self._TestDelay() -class LUTestJobqueue(NoHooksLU): +class LUTestJqueue(NoHooksLU): """Utility LU to test some aspects of the job queue. """ @@ -10929,8 +11087,7 @@ class IAllocator(object): "i_pri_up_memory": i_p_up_mem, } pnr_dyn.update(node_results[nname]) - - node_results[nname] = pnr_dyn + node_results[nname] = pnr_dyn return node_results