X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/d599d686dbde880026f4cf2fe934281e78c94f0f..20e01edd2811c9914724936624a5ff0818ce9024:/lib/cmdlib.py diff --git a/lib/cmdlib.py b/lib/cmdlib.py index acde43d..ce5edc6 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -32,6 +32,7 @@ import re import platform import logging import copy +import random from ganeti import ssh from ganeti import utils @@ -42,6 +43,7 @@ from ganeti import constants from ganeti import objects from ganeti import opcodes from ganeti import serializer +from ganeti import ssconf class LogicalUnit(object): @@ -413,6 +415,32 @@ def _CheckOutputFields(static, dynamic, selected): % ",".join(delta)) +def _CheckBooleanOpField(op, name): + """Validates boolean opcode parameters. + + This will ensure that an opcode parameter is either a boolean value, + or None (but that it always exists). + + """ + val = getattr(op, name, None) + if not (val is None or isinstance(val, bool)): + raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" % + (name, str(val))) + setattr(op, name, val) + + +def _CheckNodeOnline(lu, node): + """Ensure that a given node is online. + + @param lu: the LU on behalf of which we make the check + @param node: the node to check + @raise errors.OpPrereqError: if the nodes is offline + + """ + if lu.cfg.GetNodeInfo(node).offline: + raise errors.OpPrereqError("Can't use offline node %s" % node) + + def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status, memory, vcpus, nics): """Builds instance related env variables for hooks @@ -498,14 +526,32 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None): return _BuildInstanceHookEnv(**args) +def _AdjustCandidatePool(lu): + """Adjust the candidate pool after node operations. + + """ + mod_list = lu.cfg.MaintainCandidatePool() + if mod_list: + lu.LogInfo("Promoted nodes to master candidate role: %s", + ", ".join(node.name for node in mod_list)) + for name in mod_list: + lu.context.ReaddNode(name) + mc_now, mc_max = lu.cfg.GetMasterCandidateStats() + if mc_now > mc_max: + lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" % + (mc_now, mc_max)) + + def _CheckInstanceBridgesExist(lu, instance): """Check that the brigdes needed by an instance exist. """ # check bridges existance brlist = [nic.bridge for nic in instance.nics] - if not lu.rpc.call_bridges_exist(instance.primary_node, brlist): - raise errors.OpPrereqError("one or more target bridges %s does not" + result = lu.rpc.call_bridges_exist(instance.primary_node, brlist) + result.Raise() + if not result.data: + raise errors.OpPrereqError("One or more target bridges %s does not" " exist on destination node '%s'" % (brlist, instance.primary_node)) @@ -540,7 +586,9 @@ class LUDestroyCluster(NoHooksLU): """ master = self.cfg.GetMasterNode() - if not self.rpc.call_node_stop_master(master, False): + result = self.rpc.call_node_stop_master(master, False) + result.Raise() + if not result.data: raise errors.OpExecError("Could not disable the master role") priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS) utils.CreateBackup(priv_key) @@ -564,30 +612,36 @@ class LUVerifyCluster(LogicalUnit): } self.share_locks = dict(((i, 1) for i in locking.LEVELS)) - def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result, - remote_version, feedback_fn): + def _VerifyNode(self, nodeinfo, file_list, local_cksum, + node_result, feedback_fn, master_files): """Run multiple tests against a node. - Test list:: + Test list: - compares ganeti version - checks vg existance and size > 20G - checks config file checksum - checks ssh to other nodes - @type node: string - @param node: the name of the node to check + @type nodeinfo: L{objects.Node} + @param nodeinfo: the node to check @param file_list: required list of files @param local_cksum: dictionary of local files and their checksums - @type vglist: dict - @param vglist: dictionary of volume group names and their size @param node_result: the results from the node - @param remote_version: the RPC version from the remote node @param feedback_fn: function used to accumulate results + @param master_files: list of files that only masters should have """ + node = nodeinfo.name + + # main result, node_result should be a non-empty dict + if not node_result or not isinstance(node_result, dict): + feedback_fn(" - ERROR: unable to verify node %s." % (node,)) + return True + # compares ganeti version local_version = constants.PROTOCOL_VERSION + remote_version = node_result.get('version', None) if not remote_version: feedback_fn(" - ERROR: connection to %s failed" % (node)) return True @@ -600,6 +654,7 @@ class LUVerifyCluster(LogicalUnit): # checks vg existance and size > 20G bad = False + vglist = node_result.get(constants.NV_VGLIST, None) if not vglist: feedback_fn(" - ERROR: unable to check volume groups on node %s." % (node,)) @@ -611,47 +666,59 @@ class LUVerifyCluster(LogicalUnit): feedback_fn(" - ERROR: %s on node %s" % (vgstatus, node)) bad = True - if not node_result: - feedback_fn(" - ERROR: unable to verify node %s." % (node,)) - return True - # checks config file checksum - # checks ssh to any - if 'filelist' not in node_result: + remote_cksum = node_result.get(constants.NV_FILELIST, None) + if not isinstance(remote_cksum, dict): bad = True feedback_fn(" - ERROR: node hasn't returned file checksum data") else: - remote_cksum = node_result['filelist'] for file_name in file_list: + node_is_mc = nodeinfo.master_candidate + must_have_file = file_name not in master_files if file_name not in remote_cksum: - bad = True - feedback_fn(" - ERROR: file '%s' missing" % file_name) + if node_is_mc or must_have_file: + bad = True + feedback_fn(" - ERROR: file '%s' missing" % file_name) elif remote_cksum[file_name] != local_cksum[file_name]: - bad = True - feedback_fn(" - ERROR: file '%s' has wrong checksum" % file_name) + if node_is_mc or must_have_file: + bad = True + feedback_fn(" - ERROR: file '%s' has wrong checksum" % file_name) + else: + # not candidate and this is not a must-have file + bad = True + feedback_fn(" - ERROR: non master-candidate has old/wrong file" + " '%s'" % file_name) + else: + # all good, except non-master/non-must have combination + if not node_is_mc and not must_have_file: + feedback_fn(" - ERROR: file '%s' should not exist on non master" + " candidates" % file_name) - if 'nodelist' not in node_result: + # checks ssh to any + + if constants.NV_NODELIST not in node_result: bad = True feedback_fn(" - ERROR: node hasn't returned node ssh connectivity data") else: - if node_result['nodelist']: + if node_result[constants.NV_NODELIST]: bad = True - for node in node_result['nodelist']: + for node in node_result[constants.NV_NODELIST]: feedback_fn(" - ERROR: ssh communication with node '%s': %s" % - (node, node_result['nodelist'][node])) - if 'node-net-test' not in node_result: + (node, node_result[constants.NV_NODELIST][node])) + + if constants.NV_NODENETTEST not in node_result: bad = True feedback_fn(" - ERROR: node hasn't returned node tcp connectivity data") else: - if node_result['node-net-test']: + if node_result[constants.NV_NODENETTEST]: bad = True - nlist = utils.NiceSort(node_result['node-net-test'].keys()) + nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys()) for node in nlist: feedback_fn(" - ERROR: tcp communication with node '%s': %s" % - (node, node_result['node-net-test'][node])) + (node, node_result[constants.NV_NODENETTEST][node])) - hyp_result = node_result.get('hypervisor', None) + hyp_result = node_result.get(constants.NV_HYPERVISOR, None) if isinstance(hyp_result, dict): for hv_name, hv_result in hyp_result.iteritems(): if hv_result is not None: @@ -660,7 +727,7 @@ class LUVerifyCluster(LogicalUnit): return bad def _VerifyInstance(self, instance, instanceconfig, node_vol_is, - node_instance, feedback_fn): + node_instance, feedback_fn, n_offline): """Verify an instance. This function checks to see if the required block devices are @@ -675,6 +742,9 @@ class LUVerifyCluster(LogicalUnit): instanceconfig.MapLVsByNode(node_vol_should) for node in node_vol_should: + if node in n_offline: + # ignore missing volumes on offline nodes + continue for volume in node_vol_should[node]: if node not in node_vol_is or volume not in node_vol_is[node]: feedback_fn(" - ERROR: volume %s missing on node %s" % @@ -682,8 +752,9 @@ class LUVerifyCluster(LogicalUnit): bad = True if not instanceconfig.status == 'down': - if (node_current not in node_instance or - not instance in node_instance[node_current]): + if ((node_current not in node_instance or + not instance in node_instance[node_current]) and + node_current not in n_offline): feedback_fn(" - ERROR: instance %s not running on node %s" % (instance, node_current)) bad = True @@ -798,6 +869,7 @@ class LUVerifyCluster(LogicalUnit): instancelist = utils.NiceSort(self.cfg.GetInstanceList()) i_non_redundant = [] # Non redundant instances i_non_a_balanced = [] # Non auto-balanced instances + n_offline = [] # List of offline nodes node_volume = {} node_instance = {} node_info = {} @@ -805,71 +877,95 @@ class LUVerifyCluster(LogicalUnit): # FIXME: verify OS list # do local checksums - file_names = [] + master_files = [constants.CLUSTER_CONF_FILE] + + file_names = ssconf.SimpleStore().GetFileList() file_names.append(constants.SSL_CERT_FILE) - file_names.append(constants.CLUSTER_CONF_FILE) + file_names.append(constants.RAPI_CERT_FILE) + file_names.extend(master_files) + local_checksums = utils.FingerprintFiles(file_names) feedback_fn("* Gathering data (%d nodes)" % len(nodelist)) - all_volumeinfo = self.rpc.call_volume_list(nodelist, vg_name) - all_instanceinfo = self.rpc.call_instance_list(nodelist, hypervisors) - all_vglist = self.rpc.call_vg_list(nodelist) node_verify_param = { - 'filelist': file_names, - 'nodelist': nodelist, - 'hypervisor': hypervisors, - 'node-net-test': [(node.name, node.primary_ip, node.secondary_ip) - for node in nodeinfo] + constants.NV_FILELIST: file_names, + constants.NV_NODELIST: [node.name for node in nodeinfo + if not node.offline], + constants.NV_HYPERVISOR: hypervisors, + constants.NV_NODENETTEST: [(node.name, node.primary_ip, + node.secondary_ip) for node in nodeinfo + if not node.offline], + constants.NV_LVLIST: vg_name, + constants.NV_INSTANCELIST: hypervisors, + constants.NV_VGLIST: None, + constants.NV_VERSION: None, + constants.NV_HVINFO: self.cfg.GetHypervisorType(), } all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param, self.cfg.GetClusterName()) - all_rversion = self.rpc.call_version(nodelist) - all_ninfo = self.rpc.call_node_info(nodelist, self.cfg.GetVGName(), - self.cfg.GetHypervisorType()) cluster = self.cfg.GetClusterInfo() - for node in nodelist: - feedback_fn("* Verifying node %s" % node) - result = self._VerifyNode(node, file_names, local_checksums, - all_vglist[node], all_nvinfo[node], - all_rversion[node], feedback_fn) - bad = bad or result + master_node = self.cfg.GetMasterNode() + for node_i in nodeinfo: + node = node_i.name + nresult = all_nvinfo[node].data + + if node_i.offline: + feedback_fn("* Skipping offline node %s" % (node,)) + n_offline.append(node) + continue - # node_volume - volumeinfo = all_volumeinfo[node] + if node == master_node: + ntype = "master" + elif node_i.master_candidate: + ntype = "master candidate" + else: + ntype = "regular" + feedback_fn("* Verifying node %s (%s)" % (node, ntype)) + + if all_nvinfo[node].failed or not isinstance(nresult, dict): + feedback_fn(" - ERROR: connection to %s failed" % (node,)) + bad = True + continue - if isinstance(volumeinfo, basestring): + result = self._VerifyNode(node_i, file_names, local_checksums, + nresult, feedback_fn, master_files) + bad = bad or result + + lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data") + if isinstance(lvdata, basestring): feedback_fn(" - ERROR: LVM problem on node %s: %s" % - (node, volumeinfo[-400:].encode('string_escape'))) + (node, lvdata.encode('string_escape'))) bad = True node_volume[node] = {} - elif not isinstance(volumeinfo, dict): - feedback_fn(" - ERROR: connection to %s failed" % (node,)) + elif not isinstance(lvdata, dict): + feedback_fn(" - ERROR: connection to %s failed (lvlist)" % (node,)) bad = True continue else: - node_volume[node] = volumeinfo + node_volume[node] = lvdata # node_instance - nodeinstance = all_instanceinfo[node] - if type(nodeinstance) != list: - feedback_fn(" - ERROR: connection to %s failed" % (node,)) + idata = nresult.get(constants.NV_INSTANCELIST, None) + if not isinstance(idata, list): + feedback_fn(" - ERROR: connection to %s failed (instancelist)" % + (node,)) bad = True continue - node_instance[node] = nodeinstance + node_instance[node] = idata # node_info - nodeinfo = all_ninfo[node] + nodeinfo = nresult.get(constants.NV_HVINFO, None) if not isinstance(nodeinfo, dict): - feedback_fn(" - ERROR: connection to %s failed" % (node,)) + feedback_fn(" - ERROR: connection to %s failed (hvinfo)" % (node,)) bad = True continue try: node_info[node] = { "mfree": int(nodeinfo['memory_free']), - "dfree": int(nodeinfo['vg_free']), + "dfree": int(nresult[constants.NV_VGLIST][vg_name]), "pinst": [], "sinst": [], # dictionary holding all instances this node is secondary for, @@ -891,8 +987,9 @@ class LUVerifyCluster(LogicalUnit): feedback_fn("* Verifying instance %s" % instance) inst_config = self.cfg.GetInstanceInfo(instance) result = self._VerifyInstance(instance, inst_config, node_volume, - node_instance, feedback_fn) + node_instance, feedback_fn, n_offline) bad = bad or result + inst_nodes_offline = [] inst_config.MapLVsByNode(node_vol_should) @@ -901,11 +998,14 @@ class LUVerifyCluster(LogicalUnit): pnode = inst_config.primary_node if pnode in node_info: node_info[pnode]['pinst'].append(instance) - else: + elif pnode not in n_offline: feedback_fn(" - ERROR: instance %s, connection to primary node" " %s failed" % (instance, pnode)) bad = True + if pnode in n_offline: + inst_nodes_offline.append(pnode) + # If the instance is non-redundant we cannot survive losing its primary # node, so we are not N+1 compliant. On the other hand we have no disk # templates with more than one secondary so that situation is not well @@ -926,9 +1026,18 @@ class LUVerifyCluster(LogicalUnit): if pnode not in node_info[snode]['sinst-by-pnode']: node_info[snode]['sinst-by-pnode'][pnode] = [] node_info[snode]['sinst-by-pnode'][pnode].append(instance) - else: + elif snode not in n_offline: feedback_fn(" - ERROR: instance %s, connection to secondary node" " %s failed" % (instance, snode)) + bad = True + if snode in n_offline: + inst_nodes_offline.append(snode) + + if inst_nodes_offline: + # warn that the instance lives on offline nodes, and set bad=True + feedback_fn(" - ERROR: instance lives on offline node(s) %s" % + ", ".join(inst_nodes_offline)) + bad = True feedback_fn("* Verifying orphan volumes") result = self._VerifyOrphanVolumes(node_vol_should, node_volume, @@ -954,6 +1063,9 @@ class LUVerifyCluster(LogicalUnit): feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found." % len(i_non_a_balanced)) + if n_offline: + feedback_fn(" - NOTICE: %d offline node(s) found." % len(n_offline)) + return not bad def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result): @@ -984,11 +1096,14 @@ class LUVerifyCluster(LogicalUnit): for node_name in hooks_results: show_node_header = True res = hooks_results[node_name] - if res is False or not isinstance(res, list): - feedback_fn(" Communication failure") + if res.failed or res.data is False or not isinstance(res.data, list): + if res.offline: + # no need to warn or set fail return value + continue + feedback_fn(" Communication failure in hooks execution") lu_result = 1 continue - for script, hkr, output in res: + for script, hkr, output in res.data: if hkr == constants.HKR_FAIL: # The node header is only shown once, if there are # failing hooks on that node @@ -1057,7 +1172,12 @@ class LUVerifyDisks(NoHooksLU): for node in nodes: # node_volume lvs = node_lvs[node] - + if lvs.failed: + if not lvs.offline: + self.LogWarning("Connection to node %s failed: %s" % + (node, lvs.data)) + continue + lvs = lvs.data if isinstance(lvs, basestring): logging.warning("Error enumerating LVs on node %s: %s", node, lvs) res_nlvm[node] = lvs @@ -1132,31 +1252,33 @@ class LURenameCluster(LogicalUnit): # shutdown the master IP master = self.cfg.GetMasterNode() - if not self.rpc.call_node_stop_master(master, False): + result = self.rpc.call_node_stop_master(master, False) + if result.failed or not result.data: raise errors.OpExecError("Could not disable the master role") try: - # modify the sstore - # TODO: sstore - ss.SetKey(ss.SS_MASTER_IP, ip) - ss.SetKey(ss.SS_CLUSTER_NAME, clustername) - - # Distribute updated ss config to all nodes - myself = self.cfg.GetNodeInfo(master) - dist_nodes = self.cfg.GetNodeList() - if myself.name in dist_nodes: - dist_nodes.remove(myself.name) - - logging.debug("Copying updated ssconf data to all nodes") - for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]: - fname = ss.KeyToFilename(keyname) - result = self.rpc.call_upload_file(dist_nodes, fname) - for to_node in dist_nodes: - if not result[to_node]: - self.LogWarning("Copy of file %s to node %s failed", - fname, to_node) + cluster = self.cfg.GetClusterInfo() + cluster.cluster_name = clustername + cluster.master_ip = ip + self.cfg.Update(cluster) + + # update the known hosts file + ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE) + node_list = self.cfg.GetNodeList() + try: + node_list.remove(master) + except ValueError: + pass + result = self.rpc.call_upload_file(node_list, + constants.SSH_KNOWN_HOSTS_FILE) + for to_node, to_result in result.iteritems(): + if to_result.failed or not to_result.data: + logging.error("Copy of file %s to node %s failed", + constants.SSH_KNOWN_HOSTS_FILE, to_node) + finally: - if not self.rpc.call_node_start_master(master, False): + result = self.rpc.call_node_start_master(master, False) + if result.failed or not result.data: self.LogWarning("Could not re-enable the master role on" " the master, please restart manually.") @@ -1186,6 +1308,21 @@ class LUSetClusterParams(LogicalUnit): _OP_REQP = [] REQ_BGL = False + def CheckParameters(self): + """Check parameters + + """ + if not hasattr(self.op, "candidate_pool_size"): + self.op.candidate_pool_size = None + if self.op.candidate_pool_size is not None: + try: + self.op.candidate_pool_size = int(self.op.candidate_pool_size) + except ValueError, err: + raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" % + str(err)) + if self.op.candidate_pool_size < 1: + raise errors.OpPrereqError("At least one master candidate needed") + def ExpandNames(self): # FIXME: in the future maybe other cluster params won't require checking on # all nodes to be modified. @@ -1228,16 +1365,21 @@ class LUSetClusterParams(LogicalUnit): if self.op.vg_name: vglist = self.rpc.call_vg_list(node_list) for node in node_list: - vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name, + if vglist[node].failed: + # ignoring down node + self.LogWarning("Node %s unreachable/error, ignoring" % node) + continue + vgstatus = utils.CheckVolumeGroupSize(vglist[node].data, + self.op.vg_name, constants.MIN_VG_SIZE) if vgstatus: raise errors.OpPrereqError("Error on node '%s': %s" % (node, vgstatus)) self.cluster = cluster = self.cfg.GetClusterInfo() - # beparams changes do not need validation (we can't validate?), - # but we still process here + # validate beparams changes if self.op.beparams: + utils.CheckBEParams(self.op.beparams) self.new_beparams = cluster.FillDict( cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams) @@ -1284,8 +1426,43 @@ class LUSetClusterParams(LogicalUnit): self.cluster.enabled_hypervisors = self.op.enabled_hypervisors if self.op.beparams: self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams + if self.op.candidate_pool_size is not None: + self.cluster.candidate_pool_size = self.op.candidate_pool_size + self.cfg.Update(self.cluster) + # we want to update nodes after the cluster so that if any errors + # happen, we have recorded and saved the cluster info + if self.op.candidate_pool_size is not None: + _AdjustCandidatePool(self) + + +class LURedistributeConfig(NoHooksLU): + """Force the redistribution of cluster configuration. + + This is a very simple LU. + + """ + _OP_REQP = [] + REQ_BGL = False + + def ExpandNames(self): + self.needed_locks = { + locking.LEVEL_NODE: locking.ALL_SET, + } + self.share_locks[locking.LEVEL_NODE] = 1 + + def CheckPrereq(self): + """Check prerequisites. + + """ + + def Exec(self, feedback_fn): + """Redistribute the configuration. + + """ + self.cfg.Update(self.cfg.GetClusterInfo()) + def _WaitForSync(lu, instance, oneshot=False, unlock=False): """Sleep and poll for an instance's disk to sync. @@ -1308,7 +1485,7 @@ def _WaitForSync(lu, instance, oneshot=False, unlock=False): done = True cumul_degraded = False rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks) - if not rstats: + if rstats.failed or not rstats.data: lu.LogWarning("Can't get any data from node %s", node) retries += 1 if retries >= 10: @@ -1316,6 +1493,7 @@ def _WaitForSync(lu, instance, oneshot=False, unlock=False): " aborting." % node) time.sleep(6) continue + rstats = rstats.data retries = 0 for i in range(len(rstats)): mstat = rstats[i] @@ -1362,11 +1540,11 @@ def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False): result = True if on_primary or dev.AssembleOnSecondary(): rstats = lu.rpc.call_blockdev_find(node, dev) - if not rstats: + if rstats.failed or not rstats.data: logging.warning("Node %s: disk degraded, not found or node down", node) result = False else: - result = result and (not rstats[idx]) + result = result and (not rstats.data[idx]) if dev.children: for child in dev.children: result = result and _CheckDiskConsistency(lu, child, node, on_primary) @@ -1419,9 +1597,9 @@ class LUDiagnoseOS(NoHooksLU): """ all_os = {} for node_name, nr in rlist.iteritems(): - if not nr: + if nr.failed or not nr.data: continue - for os_obj in nr: + for os_obj in nr.data: if os_obj.name not in all_os: # build a list of nodes for this os containing empty lists # for each node in node_list @@ -1436,10 +1614,12 @@ class LUDiagnoseOS(NoHooksLU): """ node_list = self.acquired_locks[locking.LEVEL_NODE] - node_data = self.rpc.call_os_diagnose(node_list) + valid_nodes = [node for node in self.cfg.GetOnlineNodeList() + if node in node_list] + node_data = self.rpc.call_os_diagnose(valid_nodes) if node_data == False: raise errors.OpExecError("Can't gather the list of OSes") - pol = self._DiagnoseByOS(node_list, node_data) + pol = self._DiagnoseByOS(valid_nodes, node_data) output = [] for os_name, os_data in pol.iteritems(): row = [] @@ -1507,11 +1687,8 @@ class LURemoveNode(LogicalUnit): for instance_name in instance_list: instance = self.cfg.GetInstanceInfo(instance_name) - if node.name == instance.primary_node: - raise errors.OpPrereqError("Instance %s still running on the node," - " please remove first." % instance_name) - if node.name in instance.secondary_nodes: - raise errors.OpPrereqError("Instance %s has node as a secondary," + if node.name in instance.all_nodes: + raise errors.OpPrereqError("Instance %s is still running on the node," " please remove first." % instance_name) self.op.node_name = node.name self.node = node @@ -1528,6 +1705,9 @@ class LURemoveNode(LogicalUnit): self.rpc.call_node_leave_cluster(node.name) + # Promote nodes to master candidate as needed + _AdjustCandidatePool(self) + class LUQueryNodes(NoHooksLU): """Logical unit for querying nodes. @@ -1547,6 +1727,9 @@ class LUQueryNodes(NoHooksLU): "pinst_list", "sinst_list", "pip", "sip", "tags", "serial_no", + "master_candidate", + "master", + "offline", ) def ExpandNames(self): @@ -1602,8 +1785,9 @@ class LUQueryNodes(NoHooksLU): node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(), self.cfg.GetHypervisorType()) for name in nodenames: - nodeinfo = node_data.get(name, None) - if nodeinfo: + nodeinfo = node_data[name] + if not nodeinfo.failed and nodeinfo.data: + nodeinfo = nodeinfo.data fn = utils.TryConvert live_data[name] = { "mtotal": fn(int, nodeinfo.get('memory_total', None)), @@ -1635,6 +1819,8 @@ class LUQueryNodes(NoHooksLU): if secnode in node_to_secondary: node_to_secondary[secnode].add(inst.name) + master_node = self.cfg.GetMasterNode() + # end data gathering output = [] @@ -1659,6 +1845,12 @@ class LUQueryNodes(NoHooksLU): val = list(node.GetTags()) elif field == "serial_no": val = node.serial_no + elif field == "master_candidate": + val = node.master_candidate + elif field == "master": + val = node.name == master_node + elif field == "offline": + val = node.offline elif self._FIELDS_DYNAMIC.Matches(field): val = live_data[node.name].get(field, None) else: @@ -1713,10 +1905,10 @@ class LUQueryNodeVolumes(NoHooksLU): output = [] for node in nodenames: - if node not in volumes or not volumes[node]: + if node not in volumes or volumes[node].failed or not volumes[node].data: continue - node_vols = volumes[node][:] + node_vols = volumes[node].data[:] node_vols.sort(key=lambda vol: vol['dev']) for vol in node_vols: @@ -1847,9 +2039,15 @@ class LUAddNode(LogicalUnit): raise errors.OpPrereqError("Node secondary ip not reachable by TCP" " based ping to noded port") + cp_size = self.cfg.GetClusterInfo().candidate_pool_size + mc_now, _ = self.cfg.GetMasterCandidateStats() + master_candidate = mc_now < cp_size + self.new_node = objects.Node(name=node, primary_ip=primary_ip, - secondary_ip=secondary_ip) + secondary_ip=secondary_ip, + master_candidate=master_candidate, + offline=False) def Exec(self, feedback_fn): """Adds the new node to the cluster. @@ -1860,14 +2058,15 @@ class LUAddNode(LogicalUnit): # check connectivity result = self.rpc.call_version([node])[node] - if result: - if constants.PROTOCOL_VERSION == result: + result.Raise() + if result.data: + if constants.PROTOCOL_VERSION == result.data: logging.info("Communication to node %s fine, sw version %s match", - node, result) + node, result.data) else: raise errors.OpExecError("Version mismatch master version %s," " node version %s" % - (constants.PROTOCOL_VERSION, result)) + (constants.PROTOCOL_VERSION, result.data)) else: raise errors.OpExecError("Cannot get version from the new node") @@ -1890,15 +2089,16 @@ class LUAddNode(LogicalUnit): keyarray[2], keyarray[3], keyarray[4], keyarray[5]) - if not result: + if result.failed or not result.data: raise errors.OpExecError("Cannot transfer ssh keys to the new node") # Add node to our /etc/hosts, and add key to known_hosts utils.AddHostToEtcHosts(new_node.name) if new_node.secondary_ip != new_node.primary_ip: - if not self.rpc.call_node_has_ip_address(new_node.name, - new_node.secondary_ip): + result = self.rpc.call_node_has_ip_address(new_node.name, + new_node.secondary_ip) + if result.failed or not result.data: raise errors.OpExecError("Node claims it doesn't have the secondary ip" " you gave (%s). Please fix and re-run this" " command." % new_node.secondary_ip) @@ -1912,11 +2112,11 @@ class LUAddNode(LogicalUnit): result = self.rpc.call_node_verify(node_verify_list, node_verify_param, self.cfg.GetClusterName()) for verifier in node_verify_list: - if not result[verifier]: + if result[verifier].failed or not result[verifier].data: raise errors.OpExecError("Cannot communicate with %s's node daemon" " for remote verification" % verifier) - if result[verifier]['nodelist']: - for failed in result[verifier]['nodelist']: + if result[verifier].data['nodelist']: + for failed in result[verifier].data['nodelist']: feedback_fn("ssh/hostname verification failed %s -> %s" % (verifier, result[verifier]['nodelist'][failed])) raise errors.OpExecError("ssh/hostname verification failed.") @@ -1933,8 +2133,8 @@ class LUAddNode(LogicalUnit): logging.debug("Copying hosts and known_hosts to all nodes") for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE): result = self.rpc.call_upload_file(dist_nodes, fname) - for to_node in dist_nodes: - if not result[to_node]: + for to_node, to_result in result.iteritems(): + if to_result.failed or not to_result.data: logging.error("Copy of file %s to node %s failed", fname, to_node) to_copy = [] @@ -1942,7 +2142,7 @@ class LUAddNode(LogicalUnit): to_copy.append(constants.VNC_PASSWORD_FILE) for fname in to_copy: result = self.rpc.call_upload_file([node], fname) - if not result[node]: + if result[node].failed or not result[node]: logging.error("Could not copy file %s to node %s", fname, node) if self.op.readd: @@ -1951,6 +2151,112 @@ class LUAddNode(LogicalUnit): self.context.AddNode(new_node) +class LUSetNodeParams(LogicalUnit): + """Modifies the parameters of a node. + + """ + HPATH = "node-modify" + HTYPE = constants.HTYPE_NODE + _OP_REQP = ["node_name"] + REQ_BGL = False + + def CheckArguments(self): + node_name = self.cfg.ExpandNodeName(self.op.node_name) + if node_name is None: + raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name) + self.op.node_name = node_name + _CheckBooleanOpField(self.op, 'master_candidate') + _CheckBooleanOpField(self.op, 'offline') + if self.op.master_candidate is None and self.op.offline is None: + raise errors.OpPrereqError("Please pass at least one modification") + if self.op.offline == True and self.op.master_candidate == True: + raise errors.OpPrereqError("Can't set the node into offline and" + " master_candidate at the same time") + + def ExpandNames(self): + self.needed_locks = {locking.LEVEL_NODE: self.op.node_name} + + def BuildHooksEnv(self): + """Build hooks env. + + This runs on the master node. + + """ + env = { + "OP_TARGET": self.op.node_name, + "MASTER_CANDIDATE": str(self.op.master_candidate), + "OFFLINE": str(self.op.offline), + } + nl = [self.cfg.GetMasterNode(), + self.op.node_name] + return env, nl, nl + + def CheckPrereq(self): + """Check prerequisites. + + This only checks the instance list against the existing names. + + """ + node = self.node = self.cfg.GetNodeInfo(self.op.node_name) + + if ((self.op.master_candidate == False or self.op.offline == True) + and node.master_candidate): + # we will demote the node from master_candidate + if self.op.node_name == self.cfg.GetMasterNode(): + raise errors.OpPrereqError("The master node has to be a" + " master candidate and online") + cp_size = self.cfg.GetClusterInfo().candidate_pool_size + num_candidates, _ = self.cfg.GetMasterCandidateStats() + if num_candidates <= cp_size: + msg = ("Not enough master candidates (desired" + " %d, new value will be %d)" % (cp_size, num_candidates-1)) + if self.op.force: + self.LogWarning(msg) + else: + raise errors.OpPrereqError(msg) + + if (self.op.master_candidate == True and node.offline and + not self.op.offline == False): + raise errors.OpPrereqError("Can't set an offline node to" + " master_candidate") + + return + + def Exec(self, feedback_fn): + """Modifies a node. + + """ + node = self.node + + result = [] + + if self.op.offline is not None: + node.offline = self.op.offline + result.append(("offline", str(self.op.offline))) + if self.op.offline == True and node.master_candidate: + node.master_candidate = False + result.append(("master_candidate", "auto-demotion due to offline")) + + if self.op.master_candidate is not None: + node.master_candidate = self.op.master_candidate + result.append(("master_candidate", str(self.op.master_candidate))) + if self.op.master_candidate == False: + rrc = self.rpc.call_node_demote_from_mc(node.name) + if (rrc.failed or not isinstance(rrc.data, (tuple, list)) + or len(rrc.data) != 2): + self.LogWarning("Node rpc error: %s" % rrc.error) + elif not rrc.data[0]: + self.LogWarning("Node failed to demote itself: %s" % rrc.data[1]) + + # this will trigger configuration file update, if needed + self.cfg.Update(node) + # this will trigger job queue propagation or cleanup + if self.op.node_name != self.cfg.GetMasterNode(): + self.context.ReaddNode(node) + + return result + + class LUQueryClusterInfo(NoHooksLU): """Query cluster configuration. @@ -1985,6 +2291,7 @@ class LUQueryClusterInfo(NoHooksLU): "enabled_hypervisors": cluster.enabled_hypervisors, "hvparams": cluster.hvparams, "beparams": cluster.beparams, + "candidate_pool_size": cluster.candidate_pool_size, } return result @@ -2055,6 +2362,7 @@ class LUActivateInstanceDisks(NoHooksLU): self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) assert self.instance is not None, \ "Cannot retrieve locked instance %s" % self.op.instance_name + _CheckNodeOnline(self, self.instance.primary_node) def Exec(self, feedback_fn): """Activate the disks. @@ -2101,7 +2409,7 @@ def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False): for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): lu.cfg.SetDiskID(node_disk, node) result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False) - if not result: + if result.failed or not result: lu.proc.LogWarning("Could not prepare block device %s on node %s" " (is_primary=False, pass=1)", inst_disk.iv_name, node) @@ -2117,12 +2425,12 @@ def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False): continue lu.cfg.SetDiskID(node_disk, node) result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True) - if not result: + if result.failed or not result: lu.proc.LogWarning("Could not prepare block device %s on node %s" " (is_primary=True, pass=2)", inst_disk.iv_name, node) disks_ok = False - device_info.append((instance.primary_node, inst_disk.iv_name, result)) + device_info.append((instance.primary_node, inst_disk.iv_name, result.data)) # leave the disks configured for the primary node # this is a workaround that would be fixed better by @@ -2192,11 +2500,11 @@ def _SafeShutdownInstanceDisks(lu, instance): ins_l = lu.rpc.call_instance_list([instance.primary_node], [instance.hypervisor]) ins_l = ins_l[instance.primary_node] - if not type(ins_l) is list: + if ins_l.failed or not isinstance(ins_l.data, list): raise errors.OpExecError("Can't contact node '%s'" % instance.primary_node) - if instance.name in ins_l: + if instance.name in ins_l.data: raise errors.OpExecError("Instance is running, can't shutdown" " block devices.") @@ -2216,7 +2524,8 @@ def _ShutdownInstanceDisks(lu, instance, ignore_primary=False): for disk in instance.disks: for node, top_disk in disk.ComputeNodeTree(instance.primary_node): lu.cfg.SetDiskID(top_disk, node) - if not lu.rpc.call_blockdev_shutdown(node, top_disk): + result = lu.rpc.call_blockdev_shutdown(node, top_disk) + if result.failed or not result.data: logging.error("Could not shutdown block device %s on node %s", disk.iv_name, node) if not ignore_primary or node != instance.primary_node: @@ -2224,7 +2533,7 @@ def _ShutdownInstanceDisks(lu, instance, ignore_primary=False): return result -def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor): +def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name): """Checks if a node has enough free memory. This function check if a given node has the needed amount of free @@ -2240,18 +2549,15 @@ def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor): @param reason: string to use in the error message @type requested: C{int} @param requested: the amount of memory in MiB to check for - @type hypervisor: C{str} - @param hypervisor: the hypervisor to ask for memory stats + @type hypervisor_name: C{str} + @param hypervisor_name: the hypervisor to ask for memory stats @raise errors.OpPrereqError: if the node doesn't have enough memory, or we cannot check the node """ - nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor) - if not nodeinfo or not isinstance(nodeinfo, dict): - raise errors.OpPrereqError("Could not contact node %s for resource" - " information" % (node,)) - - free_mem = nodeinfo[node].get('memory_free') + nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name) + nodeinfo[node].Raise() + free_mem = nodeinfo[node].data.get('memory_free') if not isinstance(free_mem, int): raise errors.OpPrereqError("Can't compute free memory on node %s, result" " was '%s'" % (node, free_mem)) @@ -2283,8 +2589,7 @@ class LUStartupInstance(LogicalUnit): "FORCE": self.op.force, } env.update(_BuildInstanceHookEnvByObject(self, self.instance)) - nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] + - list(self.instance.secondary_nodes)) + nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) return env, nl, nl def CheckPrereq(self): @@ -2297,6 +2602,8 @@ class LUStartupInstance(LogicalUnit): assert self.instance is not None, \ "Cannot retrieve locked instance %s" % self.op.instance_name + _CheckNodeOnline(self, instance.primary_node) + bep = self.cfg.GetClusterInfo().FillBE(instance) # check bridges existance _CheckInstanceBridgesExist(self, instance) @@ -2319,9 +2626,11 @@ class LUStartupInstance(LogicalUnit): _StartInstanceDisks(self, instance, force) - if not self.rpc.call_instance_start(node_current, instance, extra_args): + result = self.rpc.call_instance_start(node_current, instance, extra_args) + msg = result.RemoteFailMsg() + if msg: _ShutdownInstanceDisks(self, instance) - raise errors.OpExecError("Could not start instance") + raise errors.OpExecError("Could not start instance: %s" % msg) class LURebootInstance(LogicalUnit): @@ -2353,8 +2662,7 @@ class LURebootInstance(LogicalUnit): "IGNORE_SECONDARIES": self.op.ignore_secondaries, } env.update(_BuildInstanceHookEnvByObject(self, self.instance)) - nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] + - list(self.instance.secondary_nodes)) + nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) return env, nl, nl def CheckPrereq(self): @@ -2367,6 +2675,8 @@ class LURebootInstance(LogicalUnit): assert self.instance is not None, \ "Cannot retrieve locked instance %s" % self.op.instance_name + _CheckNodeOnline(self, instance.primary_node) + # check bridges existance _CheckInstanceBridgesExist(self, instance) @@ -2383,17 +2693,21 @@ class LURebootInstance(LogicalUnit): if reboot_type in [constants.INSTANCE_REBOOT_SOFT, constants.INSTANCE_REBOOT_HARD]: - if not self.rpc.call_instance_reboot(node_current, instance, - reboot_type, extra_args): + result = self.rpc.call_instance_reboot(node_current, instance, + reboot_type, extra_args) + if result.failed or not result.data: raise errors.OpExecError("Could not reboot instance") else: if not self.rpc.call_instance_shutdown(node_current, instance): raise errors.OpExecError("could not shutdown instance for full reboot") _ShutdownInstanceDisks(self, instance) _StartInstanceDisks(self, instance, ignore_secondaries) - if not self.rpc.call_instance_start(node_current, instance, extra_args): + result = self.rpc.call_instance_start(node_current, instance, extra_args) + msg = result.RemoteFailMsg() + if msg: _ShutdownInstanceDisks(self, instance) - raise errors.OpExecError("Could not start instance for full reboot") + raise errors.OpExecError("Could not start instance for" + " full reboot: %s" % msg) self.cfg.MarkInstanceUp(instance.name) @@ -2417,8 +2731,7 @@ class LUShutdownInstance(LogicalUnit): """ env = _BuildInstanceHookEnvByObject(self, self.instance) - nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] + - list(self.instance.secondary_nodes)) + nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) return env, nl, nl def CheckPrereq(self): @@ -2430,6 +2743,7 @@ class LUShutdownInstance(LogicalUnit): self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) assert self.instance is not None, \ "Cannot retrieve locked instance %s" % self.op.instance_name + _CheckNodeOnline(self, self.instance.primary_node) def Exec(self, feedback_fn): """Shutdown the instance. @@ -2438,7 +2752,8 @@ class LUShutdownInstance(LogicalUnit): instance = self.instance node_current = instance.primary_node self.cfg.MarkInstanceDown(instance.name) - if not self.rpc.call_instance_shutdown(node_current, instance): + result = self.rpc.call_instance_shutdown(node_current, instance) + if result.failed or not result.data: self.proc.LogWarning("Could not shutdown instance") _ShutdownInstanceDisks(self, instance) @@ -2463,8 +2778,7 @@ class LUReinstallInstance(LogicalUnit): """ env = _BuildInstanceHookEnvByObject(self, self.instance) - nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] + - list(self.instance.secondary_nodes)) + nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) return env, nl, nl def CheckPrereq(self): @@ -2476,6 +2790,7 @@ class LUReinstallInstance(LogicalUnit): instance = self.cfg.GetInstanceInfo(self.op.instance_name) assert instance is not None, \ "Cannot retrieve locked instance %s" % self.op.instance_name + _CheckNodeOnline(self, instance.primary_node) if instance.disk_template == constants.DT_DISKLESS: raise errors.OpPrereqError("Instance '%s' has no disks" % @@ -2486,7 +2801,7 @@ class LUReinstallInstance(LogicalUnit): remote_info = self.rpc.call_instance_info(instance.primary_node, instance.name, instance.hypervisor) - if remote_info: + if remote_info.failed or remote_info.data: raise errors.OpPrereqError("Instance '%s' is running on the node %s" % (self.op.instance_name, instance.primary_node)) @@ -2499,8 +2814,9 @@ class LUReinstallInstance(LogicalUnit): if pnode is None: raise errors.OpPrereqError("Primary node '%s' is unknown" % self.op.pnode) - os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type) - if not os_obj: + result = self.rpc.call_os_get(pnode.name, self.op.os_type) + result.Raise() + if not isinstance(result.data, objects.OS): raise errors.OpPrereqError("OS '%s' not in supported OS list for" " primary node" % self.op.os_type) @@ -2520,10 +2836,12 @@ class LUReinstallInstance(LogicalUnit): _StartInstanceDisks(self, inst, None) try: feedback_fn("Running the instance OS create scripts...") - if not self.rpc.call_instance_os_add(inst.primary_node, inst): + result = self.rpc.call_instance_os_add(inst.primary_node, inst) + msg = result.RemoteFailMsg() + if msg: raise errors.OpExecError("Could not install OS for instance %s" - " on node %s" % - (inst.name, inst.primary_node)) + " on node %s: %s" % + (inst.name, inst.primary_node, msg)) finally: _ShutdownInstanceDisks(self, inst) @@ -2544,8 +2862,7 @@ class LURenameInstance(LogicalUnit): """ env = _BuildInstanceHookEnvByObject(self, self.instance) env["INSTANCE_NEW_NAME"] = self.op.new_name - nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] + - list(self.instance.secondary_nodes)) + nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) return env, nl, nl def CheckPrereq(self): @@ -2559,13 +2876,16 @@ class LURenameInstance(LogicalUnit): if instance is None: raise errors.OpPrereqError("Instance '%s' not known" % self.op.instance_name) + _CheckNodeOnline(self, instance.primary_node) + if instance.status != "down": raise errors.OpPrereqError("Instance '%s' is marked to be up" % self.op.instance_name) remote_info = self.rpc.call_instance_info(instance.primary_node, instance.name, instance.hypervisor) - if remote_info: + remote_info.Raise() + if remote_info.data: raise errors.OpPrereqError("Instance '%s' is running on the node %s" % (self.op.instance_name, instance.primary_node)) @@ -2609,15 +2929,15 @@ class LURenameInstance(LogicalUnit): result = self.rpc.call_file_storage_dir_rename(inst.primary_node, old_file_storage_dir, new_file_storage_dir) - - if not result: + result.Raise() + if not result.data: raise errors.OpExecError("Could not connect to node '%s' to rename" " directory '%s' to '%s' (but the instance" " has been renamed in Ganeti)" % ( inst.primary_node, old_file_storage_dir, new_file_storage_dir)) - if not result[0]: + if not result.data[0]: raise errors.OpExecError("Could not rename directory '%s' to '%s'" " (but the instance has been renamed in" " Ganeti)" % (old_file_storage_dir, @@ -2625,8 +2945,9 @@ class LURenameInstance(LogicalUnit): _StartInstanceDisks(self, inst, None) try: - if not self.rpc.call_instance_run_rename(inst.primary_node, inst, - old_name): + result = self.rpc.call_instance_run_rename(inst.primary_node, inst, + old_name) + if result.failed or not result.data: msg = ("Could not run OS rename script for instance %s on node %s" " (but the instance has been renamed in Ganeti)" % (inst.name, inst.primary_node)) @@ -2681,7 +3002,8 @@ class LURemoveInstance(LogicalUnit): logging.info("Shutting down instance %s on node %s", instance.name, instance.primary_node) - if not self.rpc.call_instance_shutdown(instance.primary_node, instance): + result = self.rpc.call_instance_shutdown(instance.primary_node, instance) + if result.failed or not result.data: if self.op.ignore_failures: feedback_fn("Warning: can't shutdown instance") else: @@ -2782,16 +3104,21 @@ class LUQueryInstances(NoHooksLU): hv_list = list(set([inst.hypervisor for inst in instance_list])) bad_nodes = [] + off_nodes = [] if self.do_locking: live_data = {} node_data = self.rpc.call_all_instances_info(nodes, hv_list) for name in nodes: result = node_data[name] - if result: - live_data.update(result) - elif result == False: + if result.offline: + # offline nodes will be in both lists + off_nodes.append(name) + if result.failed: bad_nodes.append(name) - # else no instance is alive + else: + if result.data: + live_data.update(result.data) + # else no instance is alive else: live_data = dict([(name, {}) for name in instance_names]) @@ -2822,7 +3149,9 @@ class LUQueryInstances(NoHooksLU): else: val = bool(live_data.get(instance.name)) elif field == "status": - if instance.primary_node in bad_nodes: + if instance.primary_node in off_nodes: + val = "ERROR_nodeoffline" + elif instance.primary_node in bad_nodes: val = "ERROR_nodedown" else: running = bool(live_data.get(instance.name)) @@ -2975,6 +3304,7 @@ class LUFailoverInstance(LogicalUnit): "a mirrored disk template") target_node = secondary_nodes[0] + _CheckNodeOnline(self, target_node) # check memory requirements on the secondary node _CheckNodeFreeMemory(self, target_node, "failing over instance %s" % instance.name, bep[constants.BE_MEMORY], @@ -2982,7 +3312,9 @@ class LUFailoverInstance(LogicalUnit): # check bridge existance brlist = [nic.bridge for nic in instance.nics] - if not self.rpc.call_bridges_exist(target_node, brlist): + result = self.rpc.call_bridges_exist(target_node, brlist) + result.Raise() + if not result.data: raise errors.OpPrereqError("One or more target bridges %s does not" " exist on destination node '%s'" % (brlist, target_node)) @@ -3011,7 +3343,8 @@ class LUFailoverInstance(LogicalUnit): logging.info("Shutting down instance %s on node %s", instance.name, source_node) - if not self.rpc.call_instance_shutdown(source_node, instance): + result = self.rpc.call_instance_shutdown(source_node, instance) + if result.failed or not result.data: if self.op.ignore_consistency: self.proc.LogWarning("Could not shutdown instance %s on node %s." " Proceeding" @@ -3042,60 +3375,391 @@ class LUFailoverInstance(LogicalUnit): raise errors.OpExecError("Can't activate the instance's disks") feedback_fn("* starting the instance on the target node") - if not self.rpc.call_instance_start(target_node, instance, None): + result = self.rpc.call_instance_start(target_node, instance, None) + msg = result.RemoteFailMsg() + if msg: _ShutdownInstanceDisks(self, instance) - raise errors.OpExecError("Could not start instance %s on node %s." % - (instance.name, target_node)) + raise errors.OpExecError("Could not start instance %s on node %s: %s" % + (instance.name, target_node, msg)) -def _CreateBlockDevOnPrimary(lu, node, instance, device, info): - """Create a tree of block devices on the primary node. +class LUMigrateInstance(LogicalUnit): + """Migrate an instance. - This always creates all devices. + This is migration without shutting down, compared to the failover, + which is done with shutdown. """ - if device.children: - for child in device.children: - if not _CreateBlockDevOnPrimary(lu, node, instance, child, info): - return False + HPATH = "instance-migrate" + HTYPE = constants.HTYPE_INSTANCE + _OP_REQP = ["instance_name", "live", "cleanup"] - lu.cfg.SetDiskID(device, node) - new_id = lu.rpc.call_blockdev_create(node, device, device.size, - instance.name, True, info) - if not new_id: - return False - if device.physical_id is None: - device.physical_id = new_id - return True + REQ_BGL = False + + def ExpandNames(self): + self._ExpandAndLockInstance() + self.needed_locks[locking.LEVEL_NODE] = [] + self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE + + def DeclareLocks(self, level): + if level == locking.LEVEL_NODE: + self._LockInstancesNodes() + + def BuildHooksEnv(self): + """Build hooks env. + + This runs on master, primary and secondary nodes of the instance. + + """ + env = _BuildInstanceHookEnvByObject(self, self.instance) + nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes) + return env, nl, nl + + def CheckPrereq(self): + """Check prerequisites. + + This checks that the instance is in the cluster. + + """ + instance = self.cfg.GetInstanceInfo( + self.cfg.ExpandInstanceName(self.op.instance_name)) + if instance is None: + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) + + if instance.disk_template != constants.DT_DRBD8: + raise errors.OpPrereqError("Instance's disk layout is not" + " drbd8, cannot migrate.") + + secondary_nodes = instance.secondary_nodes + if not secondary_nodes: + raise errors.ProgrammerError("no secondary node but using " + "drbd8 disk template") + + i_be = self.cfg.GetClusterInfo().FillBE(instance) + + target_node = secondary_nodes[0] + # check memory requirements on the secondary node + _CheckNodeFreeMemory(self, target_node, "migrating instance %s" % + instance.name, i_be[constants.BE_MEMORY], + instance.hypervisor) + + # check bridge existance + brlist = [nic.bridge for nic in instance.nics] + result = self.rpc.call_bridges_exist(target_node, brlist) + if result.failed or not result.data: + raise errors.OpPrereqError("One or more target bridges %s does not" + " exist on destination node '%s'" % + (brlist, target_node)) + + if not self.op.cleanup: + result = self.rpc.call_instance_migratable(instance.primary_node, + instance) + msg = result.RemoteFailMsg() + if msg: + raise errors.OpPrereqError("Can't migrate: %s - please use failover" % + msg) + + self.instance = instance + + def _WaitUntilSync(self): + """Poll with custom rpc for disk sync. + + This uses our own step-based rpc call. + + """ + self.feedback_fn("* wait until resync is done") + all_done = False + while not all_done: + all_done = True + result = self.rpc.call_drbd_wait_sync(self.all_nodes, + self.nodes_ip, + self.instance.disks) + min_percent = 100 + for node, nres in result.items(): + msg = nres.RemoteFailMsg() + if msg: + raise errors.OpExecError("Cannot resync disks on node %s: %s" % + (node, msg)) + node_done, node_percent = nres.data[1] + all_done = all_done and node_done + if node_percent is not None: + min_percent = min(min_percent, node_percent) + if not all_done: + if min_percent < 100: + self.feedback_fn(" - progress: %.1f%%" % min_percent) + time.sleep(2) + + def _EnsureSecondary(self, node): + """Demote a node to secondary. + + """ + self.feedback_fn("* switching node %s to secondary mode" % node) + + for dev in self.instance.disks: + self.cfg.SetDiskID(dev, node) + + result = self.rpc.call_blockdev_close(node, self.instance.name, + self.instance.disks) + msg = result.RemoteFailMsg() + if msg: + raise errors.OpExecError("Cannot change disk to secondary on node %s," + " error %s" % (node, msg)) + + def _GoStandalone(self): + """Disconnect from the network. + + """ + self.feedback_fn("* changing into standalone mode") + result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip, + self.instance.disks) + for node, nres in result.items(): + msg = nres.RemoteFailMsg() + if msg: + raise errors.OpExecError("Cannot disconnect disks node %s," + " error %s" % (node, msg)) + + def _GoReconnect(self, multimaster): + """Reconnect to the network. + + """ + if multimaster: + msg = "dual-master" + else: + msg = "single-master" + self.feedback_fn("* changing disks into %s mode" % msg) + result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip, + self.instance.disks, + self.instance.name, multimaster) + for node, nres in result.items(): + msg = nres.RemoteFailMsg() + if msg: + raise errors.OpExecError("Cannot change disks config on node %s," + " error: %s" % (node, msg)) + + def _ExecCleanup(self): + """Try to cleanup after a failed migration. + + The cleanup is done by: + - check that the instance is running only on one node + (and update the config if needed) + - change disks on its secondary node to secondary + - wait until disks are fully synchronized + - disconnect from the network + - change disks into single-master mode + - wait again until disks are fully synchronized + + """ + instance = self.instance + target_node = self.target_node + source_node = self.source_node + + # check running on only one node + self.feedback_fn("* checking where the instance actually runs" + " (if this hangs, the hypervisor might be in" + " a bad state)") + ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor]) + for node, result in ins_l.items(): + result.Raise() + if not isinstance(result.data, list): + raise errors.OpExecError("Can't contact node '%s'" % node) + + runningon_source = instance.name in ins_l[source_node].data + runningon_target = instance.name in ins_l[target_node].data + + if runningon_source and runningon_target: + raise errors.OpExecError("Instance seems to be running on two nodes," + " or the hypervisor is confused. You will have" + " to ensure manually that it runs only on one" + " and restart this operation.") + + if not (runningon_source or runningon_target): + raise errors.OpExecError("Instance does not seem to be running at all." + " In this case, it's safer to repair by" + " running 'gnt-instance stop' to ensure disk" + " shutdown, and then restarting it.") + + if runningon_target: + # the migration has actually succeeded, we need to update the config + self.feedback_fn("* instance running on secondary node (%s)," + " updating config" % target_node) + instance.primary_node = target_node + self.cfg.Update(instance) + demoted_node = source_node + else: + self.feedback_fn("* instance confirmed to be running on its" + " primary node (%s)" % source_node) + demoted_node = target_node + + self._EnsureSecondary(demoted_node) + try: + self._WaitUntilSync() + except errors.OpExecError: + # we ignore here errors, since if the device is standalone, it + # won't be able to sync + pass + self._GoStandalone() + self._GoReconnect(False) + self._WaitUntilSync() + + self.feedback_fn("* done") + + def _ExecMigration(self): + """Migrate an instance. + + The migrate is done by: + - change the disks into dual-master mode + - wait until disks are fully synchronized again + - migrate the instance + - change disks on the new secondary node (the old primary) to secondary + - wait until disks are fully synchronized + - change disks into single-master mode + + """ + instance = self.instance + target_node = self.target_node + source_node = self.source_node + + self.feedback_fn("* checking disk consistency between source and target") + for dev in instance.disks: + if not _CheckDiskConsistency(self, dev, target_node, False): + raise errors.OpExecError("Disk %s is degraded or not fully" + " synchronized on target node," + " aborting migrate." % dev.iv_name) + + self._EnsureSecondary(target_node) + self._GoStandalone() + self._GoReconnect(True) + self._WaitUntilSync() + + self.feedback_fn("* migrating instance to %s" % target_node) + time.sleep(10) + result = self.rpc.call_instance_migrate(source_node, instance, + self.nodes_ip[target_node], + self.op.live) + msg = result.RemoteFailMsg() + if msg: + logging.error("Instance migration failed, trying to revert" + " disk status: %s", msg) + try: + self._EnsureSecondary(target_node) + self._GoStandalone() + self._GoReconnect(False) + self._WaitUntilSync() + except errors.OpExecError, err: + self.LogWarning("Migration failed and I can't reconnect the" + " drives: error '%s'\n" + "Please look and recover the instance status" % + str(err)) + + raise errors.OpExecError("Could not migrate instance %s: %s" % + (instance.name, msg)) + time.sleep(10) + + instance.primary_node = target_node + # distribute new instance config to the other nodes + self.cfg.Update(instance) + + self._EnsureSecondary(source_node) + self._WaitUntilSync() + self._GoStandalone() + self._GoReconnect(False) + self._WaitUntilSync() + + self.feedback_fn("* done") + + def Exec(self, feedback_fn): + """Perform the migration. + + """ + self.feedback_fn = feedback_fn + + self.source_node = self.instance.primary_node + self.target_node = self.instance.secondary_nodes[0] + self.all_nodes = [self.source_node, self.target_node] + self.nodes_ip = { + self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip, + self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip, + } + if self.op.cleanup: + return self._ExecCleanup() + else: + return self._ExecMigration() -def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info): - """Create a tree of block devices on a secondary node. +def _CreateBlockDev(lu, node, instance, device, force_create, + info, force_open): + """Create a tree of block devices on a given node. If this device type has to be created on secondaries, create it and all its children. If not, just recurse to children keeping the same 'force' value. + @param lu: the lu on whose behalf we execute + @param node: the node on which to create the device + @type instance: L{objects.Instance} + @param instance: the instance which owns the device + @type device: L{objects.Disk} + @param device: the device to create + @type force_create: boolean + @param force_create: whether to force creation of this device; this + will be change to True whenever we find a device which has + CreateOnSecondary() attribute + @param info: the extra 'metadata' we should attach to the device + (this will be represented as a LVM tag) + @type force_open: boolean + @param force_open: this parameter will be passes to the + L{backend.CreateBlockDevice} function where it specifies + whether we run on primary or not, and it affects both + the child assembly and the device own Open() execution + """ if device.CreateOnSecondary(): - force = True + force_create = True + if device.children: for child in device.children: - if not _CreateBlockDevOnSecondary(lu, node, instance, - child, force, info): - return False + _CreateBlockDev(lu, node, instance, child, force_create, + info, force_open) - if not force: - return True + if not force_create: + return + + _CreateSingleBlockDev(lu, node, instance, device, info, force_open) + + +def _CreateSingleBlockDev(lu, node, instance, device, info, force_open): + """Create a single block device on a given node. + + This will not recurse over children of the device, so they must be + created in advance. + + @param lu: the lu on whose behalf we execute + @param node: the node on which to create the device + @type instance: L{objects.Instance} + @param instance: the instance which owns the device + @type device: L{objects.Disk} + @param device: the device to create + @param info: the extra 'metadata' we should attach to the device + (this will be represented as a LVM tag) + @type force_open: boolean + @param force_open: this parameter will be passes to the + L{backend.CreateBlockDevice} function where it specifies + whether we run on primary or not, and it affects both + the child assembly and the device own Open() execution + + """ lu.cfg.SetDiskID(device, node) - new_id = lu.rpc.call_blockdev_create(node, device, device.size, - instance.name, False, info) - if not new_id: - return False + result = lu.rpc.call_blockdev_create(node, device, device.size, + instance.name, force_open, info) + msg = result.RemoteFailMsg() + if msg: + raise errors.OpExecError("Can't create block device %s on" + " node %s for instance %s: %s" % + (device, node, instance.name, msg)) if device.physical_id is None: - device.physical_id = new_id - return True + device.physical_id = result.data[1] def _GenerateUniqueNames(lu, exts): @@ -3166,11 +3830,11 @@ def _GenerateDiskTemplate(lu, template_name, minors = lu.cfg.AllocateDRBDMinor( [primary_node, remote_node] * len(disk_info), instance_name) - names = _GenerateUniqueNames(lu, - [".disk%d_%s" % (i, s) - for i in range(disk_count) - for s in ("data", "meta") - ]) + names = [] + for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i + for i in range(disk_count)]): + names.append(lv_prefix + "_data") + names.append(lv_prefix + "_meta") for idx, disk in enumerate(disk_info): disk_index = idx + base_index disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node, @@ -3216,19 +3880,18 @@ def _CreateDisks(lu, instance): """ info = _GetInstanceInfoText(instance) + pnode = instance.primary_node if instance.disk_template == constants.DT_FILE: file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) - result = lu.rpc.call_file_storage_dir_create(instance.primary_node, - file_storage_dir) + result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir) - if not result: - logging.error("Could not connect to node '%s'", instance.primary_node) - return False + if result.failed or not result.data: + raise errors.OpExecError("Could not connect to node '%s'" % pnode) - if not result[0]: - logging.error("Failed to create directory '%s'", file_storage_dir) - return False + if not result.data[0]: + raise errors.OpExecError("Failed to create directory '%s'" % + file_storage_dir) # Note: this needs to be kept in sync with adding of disks in # LUSetInstanceParams @@ -3236,19 +3899,9 @@ def _CreateDisks(lu, instance): logging.info("Creating volume %s for instance %s", device.iv_name, instance.name) #HARDCODE - for secondary_node in instance.secondary_nodes: - if not _CreateBlockDevOnSecondary(lu, secondary_node, instance, - device, False, info): - logging.error("Failed to create volume %s (%s) on secondary node %s!", - device.iv_name, device, secondary_node) - return False - #HARDCODE - if not _CreateBlockDevOnPrimary(lu, instance.primary_node, - instance, device, info): - logging.error("Failed to create volume %s on primary!", device.iv_name) - return False - - return True + for node in instance.all_nodes: + f_create = node == pnode + _CreateBlockDev(lu, node, instance, device, f_create, info, f_create) def _RemoveDisks(lu, instance): @@ -3273,15 +3926,17 @@ def _RemoveDisks(lu, instance): for device in instance.disks: for node, disk in device.ComputeNodeTree(instance.primary_node): lu.cfg.SetDiskID(disk, node) - if not lu.rpc.call_blockdev_remove(node, disk): + result = lu.rpc.call_blockdev_remove(node, disk) + if result.failed or not result.data: lu.proc.LogWarning("Could not remove block device %s on node %s," " continuing anyway", device.iv_name, node) result = False if instance.disk_template == constants.DT_FILE: file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) - if not lu.rpc.call_file_storage_dir_remove(instance.primary_node, - file_storage_dir): + result = lu.rpc.call_file_storage_dir_remove(instance.primary_node, + file_storage_dir) + if result.failed or not result.data: logging.error("Could not remove directory '%s'", file_storage_dir) result = False @@ -3329,13 +3984,14 @@ def _CheckHVParams(lu, nodenames, hvname, hvparams): hvname, hvparams) for node in nodenames: - info = hvinfo.get(node, None) - if not info or not isinstance(info, (tuple, list)): + info = hvinfo[node] + info.Raise() + if not info.data or not isinstance(info.data, (tuple, list)): raise errors.OpPrereqError("Cannot get current information" - " from node '%s' (%s)" % (node, info)) - if not info[0]: + " from node '%s' (%s)" % (node, info.data)) + if not info.data[0]: raise errors.OpPrereqError("Hypervisor parameter validation failed:" - " %s" % info[1]) + " %s" % info.data[1]) class LUCreateInstance(LogicalUnit): @@ -3402,6 +4058,7 @@ class LUCreateInstance(LogicalUnit): hv_type.CheckParameterSyntax(filled_hvp) # fill and remember the beparams dict + utils.CheckBEParams(self.op.beparams) self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams) @@ -3492,16 +4149,22 @@ class LUCreateInstance(LogicalUnit): src_node = getattr(self.op, "src_node", None) src_path = getattr(self.op, "src_path", None) - if src_node is None or src_path is None: - raise errors.OpPrereqError("Importing an instance requires source" - " node and path options") + if src_path is None: + self.op.src_path = src_path = self.op.instance_name - if not os.path.isabs(src_path): - raise errors.OpPrereqError("The source path must be absolute") - - self.op.src_node = src_node = self._ExpandNode(src_node) - if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET: - self.needed_locks[locking.LEVEL_NODE].append(src_node) + if src_node is None: + self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET + self.op.src_node = None + if os.path.isabs(src_path): + raise errors.OpPrereqError("Importing an instance from an absolute" + " path requires a source node option.") + else: + self.op.src_node = src_node = self._ExpandNode(src_node) + if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET: + self.needed_locks[locking.LEVEL_NODE].append(src_node) + if not os.path.isabs(src_path): + self.op.src_path = src_path = \ + os.path.join(constants.EXPORT_DIR, src_path) else: # INSTANCE_CREATE if getattr(self.op, "os_type", None) is None: @@ -3588,11 +4251,28 @@ class LUCreateInstance(LogicalUnit): src_node = self.op.src_node src_path = self.op.src_path - export_info = self.rpc.call_export_info(src_node, src_path) - - if not export_info: + if src_node is None: + exp_list = self.rpc.call_export_list( + self.acquired_locks[locking.LEVEL_NODE]) + found = False + for node in exp_list: + if not exp_list[node].failed and src_path in exp_list[node].data: + found = True + self.op.src_node = src_node = node + self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR, + src_path) + break + if not found: + raise errors.OpPrereqError("No export found for relative path %s" % + src_path) + + _CheckNodeOnline(self, src_node) + result = self.rpc.call_export_info(src_node, src_path) + result.Raise() + if not result.data: raise errors.OpPrereqError("No export found in dir %s" % src_path) + export_info = result.data if not export_info.has_section(constants.INISECT_EXP): raise errors.ProgrammerError("Corrupted export config") @@ -3653,6 +4333,10 @@ class LUCreateInstance(LogicalUnit): self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode) assert self.pnode is not None, \ "Cannot retrieve locked node %s" % self.op.pnode + if pnode.offline: + raise errors.OpPrereqError("Cannot use offline primary node '%s'" % + pnode.name) + self.secondaries = [] # mirror node verification @@ -3664,6 +4348,7 @@ class LUCreateInstance(LogicalUnit): raise errors.OpPrereqError("The secondary node cannot be" " the primary node.") self.secondaries.append(self.op.snode) + _CheckNodeOnline(self, self.op.snode) nodenames = [pnode.name] + self.secondaries @@ -3675,7 +4360,9 @@ class LUCreateInstance(LogicalUnit): nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(), self.op.hypervisor) for node in nodenames: - info = nodeinfo.get(node, None) + info = nodeinfo[node] + info.Raise() + info = info.data if not info: raise errors.OpPrereqError("Cannot get current information" " from node '%s'" % node) @@ -3691,17 +4378,19 @@ class LUCreateInstance(LogicalUnit): _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams) # os verification - os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type) - if not os_obj: + result = self.rpc.call_os_get(pnode.name, self.op.os_type) + result.Raise() + if not isinstance(result.data, objects.OS): raise errors.OpPrereqError("OS '%s' not in supported os list for" " primary node" % self.op.os_type) # bridge check on primary node bridges = [n.bridge for n in self.nics] - if not self.rpc.call_bridges_exist(self.pnode.name, bridges): - raise errors.OpPrereqError("one of the target bridges '%s' does not" - " exist on" - " destination node '%s'" % + result = self.rpc.call_bridges_exist(self.pnode.name, bridges) + result.Raise() + if not result.data: + raise errors.OpPrereqError("One of the target bridges '%s' does not" + " exist on destination node '%s'" % (",".join(bridges), pnode.name)) # memory check on primary node @@ -3769,10 +4458,15 @@ class LUCreateInstance(LogicalUnit): ) feedback_fn("* creating instance disks...") - if not _CreateDisks(self, iobj): - _RemoveDisks(self, iobj) - self.cfg.ReleaseDRBDMinors(instance) - raise errors.OpExecError("Device creation failed, reverting...") + try: + _CreateDisks(self, iobj) + except errors.OpExecError: + self.LogWarning("Device creation failed, reverting...") + try: + _RemoveDisks(self, iobj) + finally: + self.cfg.ReleaseDRBDMinors(instance) + raise feedback_fn("adding instance %s to cluster config" % instance) @@ -3783,8 +4477,15 @@ class LUCreateInstance(LogicalUnit): # Remove the temp. assignements for the instance's drbds self.cfg.ReleaseDRBDMinors(instance) # Unlock all the nodes - self.context.glm.release(locking.LEVEL_NODE) - del self.acquired_locks[locking.LEVEL_NODE] + if self.op.mode == constants.INSTANCE_IMPORT: + nodes_keep = [self.op.src_node] + nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE] + if node != self.op.src_node] + self.context.glm.release(locking.LEVEL_NODE, nodes_release) + self.acquired_locks[locking.LEVEL_NODE] = nodes_keep + else: + self.context.glm.release(locking.LEVEL_NODE) + del self.acquired_locks[locking.LEVEL_NODE] if self.op.wait_for_sync: disk_abort = not _WaitForSync(self, iobj) @@ -3810,10 +4511,12 @@ class LUCreateInstance(LogicalUnit): if iobj.disk_template != constants.DT_DISKLESS: if self.op.mode == constants.INSTANCE_CREATE: feedback_fn("* running the instance OS create scripts...") - if not self.rpc.call_instance_os_add(pnode_name, iobj): - raise errors.OpExecError("could not add os for instance %s" - " on node %s" % - (instance, pnode_name)) + result = self.rpc.call_instance_os_add(pnode_name, iobj) + msg = result.RemoteFailMsg() + if msg: + raise errors.OpExecError("Could not add os for instance %s" + " on node %s: %s" % + (instance, pnode_name, msg)) elif self.op.mode == constants.INSTANCE_IMPORT: feedback_fn("* running the instance OS import scripts...") @@ -3823,7 +4526,8 @@ class LUCreateInstance(LogicalUnit): import_result = self.rpc.call_instance_os_import(pnode_name, iobj, src_node, src_images, cluster_name) - for idx, result in enumerate(import_result): + import_result.Raise() + for idx, result in enumerate(import_result.data): if not result: self.LogWarning("Could not import the image %s for instance" " %s, disk %d, on node %s" % @@ -3836,8 +4540,10 @@ class LUCreateInstance(LogicalUnit): if self.op.start: logging.info("Starting instance %s on node %s", instance, pnode_name) feedback_fn("* starting instance...") - if not self.rpc.call_instance_start(pnode_name, iobj, None): - raise errors.OpExecError("Could not start instance") + result = self.rpc.call_instance_start(pnode_name, iobj, None) + msg = result.RemoteFailMsg() + if msg: + raise errors.OpExecError("Could not start instance: %s" % msg) class LUConnectConsole(NoHooksLU): @@ -3863,6 +4569,7 @@ class LUConnectConsole(NoHooksLU): self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) assert self.instance is not None, \ "Cannot retrieve locked instance %s" % self.op.instance_name + _CheckNodeOnline(self, self.instance.primary_node) def Exec(self, feedback_fn): """Connect to the console of an instance @@ -3873,10 +4580,9 @@ class LUConnectConsole(NoHooksLU): node_insts = self.rpc.call_instance_list([node], [instance.hypervisor])[node] - if node_insts is False: - raise errors.OpExecError("Can't connect to node %s." % node) + node_insts.Raise() - if instance.name not in node_insts: + if instance.name not in node_insts.data: raise errors.OpExecError("Instance %s is not running." % instance.name) logging.debug("Connecting to console of %s on %s", instance.name, node) @@ -3897,17 +4603,32 @@ class LUReplaceDisks(LogicalUnit): _OP_REQP = ["instance_name", "mode", "disks"] REQ_BGL = False - def ExpandNames(self): - self._ExpandAndLockInstance() - + def CheckArguments(self): if not hasattr(self.op, "remote_node"): self.op.remote_node = None - - ia_name = getattr(self.op, "iallocator", None) - if ia_name is not None: - if self.op.remote_node is not None: + if not hasattr(self.op, "iallocator"): + self.op.iallocator = None + + # check for valid parameter combination + cnt = [self.op.remote_node, self.op.iallocator].count(None) + if self.op.mode == constants.REPLACE_DISK_CHG: + if cnt == 2: + raise errors.OpPrereqError("When changing the secondary either an" + " iallocator script must be used or the" + " new node given") + elif cnt == 0: raise errors.OpPrereqError("Give either the iallocator or the new" " secondary, not both") + else: # not replacing the secondary + if cnt != 2: + raise errors.OpPrereqError("The iallocator and new node options can" + " be used only when changing the" + " secondary node") + + def ExpandNames(self): + self._ExpandAndLockInstance() + + if self.op.iallocator is not None: self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET elif self.op.remote_node is not None: remote_node = self.cfg.ExpandNodeName(self.op.remote_node) @@ -3982,9 +4703,9 @@ class LUReplaceDisks(LogicalUnit): "Cannot retrieve locked instance %s" % self.op.instance_name self.instance = instance - if instance.disk_template not in constants.DTS_NET_MIRROR: - raise errors.OpPrereqError("Instance's disk layout is not" - " network mirrored.") + if instance.disk_template != constants.DT_DRBD8: + raise errors.OpPrereqError("Can only run replace disks for DRBD8-based" + " instances") if len(instance.secondary_nodes) != 1: raise errors.OpPrereqError("The instance has a strange layout," @@ -3993,8 +4714,7 @@ class LUReplaceDisks(LogicalUnit): self.sec_node = instance.secondary_nodes[0] - ia_name = getattr(self.op, "iallocator", None) - if ia_name is not None: + if self.op.iallocator is not None: self._RunAllocator() remote_node = self.op.remote_node @@ -4008,35 +4728,24 @@ class LUReplaceDisks(LogicalUnit): raise errors.OpPrereqError("The specified node is the primary node of" " the instance.") elif remote_node == self.sec_node: - if self.op.mode == constants.REPLACE_DISK_SEC: - # this is for DRBD8, where we can't execute the same mode of - # replacement as for drbd7 (no different port allocated) - raise errors.OpPrereqError("Same secondary given, cannot execute" - " replacement") - if instance.disk_template == constants.DT_DRBD8: - if (self.op.mode == constants.REPLACE_DISK_ALL and - remote_node is not None): - # switch to replace secondary mode - self.op.mode = constants.REPLACE_DISK_SEC - - if self.op.mode == constants.REPLACE_DISK_ALL: - raise errors.OpPrereqError("Template 'drbd' only allows primary or" - " secondary disk replacement, not" - " both at once") - elif self.op.mode == constants.REPLACE_DISK_PRI: - if remote_node is not None: - raise errors.OpPrereqError("Template 'drbd' does not allow changing" - " the secondary while doing a primary" - " node disk replacement") - self.tgt_node = instance.primary_node - self.oth_node = instance.secondary_nodes[0] - elif self.op.mode == constants.REPLACE_DISK_SEC: - self.new_node = remote_node # this can be None, in which case - # we don't change the secondary - self.tgt_node = instance.secondary_nodes[0] - self.oth_node = instance.primary_node - else: - raise errors.ProgrammerError("Unhandled disk replace mode") + raise errors.OpPrereqError("The specified node is already the" + " secondary node of the instance.") + + if self.op.mode == constants.REPLACE_DISK_PRI: + n1 = self.tgt_node = instance.primary_node + n2 = self.oth_node = self.sec_node + elif self.op.mode == constants.REPLACE_DISK_SEC: + n1 = self.tgt_node = self.sec_node + n2 = self.oth_node = instance.primary_node + elif self.op.mode == constants.REPLACE_DISK_CHG: + n1 = self.new_node = remote_node + n2 = self.oth_node = instance.primary_node + self.tgt_node = self.sec_node + else: + raise errors.ProgrammerError("Unhandled disk replace mode") + + _CheckNodeOnline(self, n1) + _CheckNodeOnline(self, n2) if not self.op.disks: self.op.disks = range(len(instance.disks)) @@ -4084,8 +4793,8 @@ class LUReplaceDisks(LogicalUnit): if not results: raise errors.OpExecError("Can't list volume groups on the nodes") for node in oth_node, tgt_node: - res = results.get(node, False) - if not res or my_vg not in res: + res = results[node] + if res.failed or not res.data or my_vg not in res.data: raise errors.OpExecError("Volume group '%s' not found on %s" % (my_vg, node)) for idx, dev in enumerate(instance.disks): @@ -4129,21 +4838,18 @@ class LUReplaceDisks(LogicalUnit): iv_names[dev.iv_name] = (dev, old_lvs, new_lvs) info("creating new local storage on %s for %s" % (tgt_node, dev.iv_name)) - # since we *always* want to create this LV, we use the - # _Create...OnPrimary (which forces the creation), even if we - # are talking about the secondary node + # we pass force_create=True to force the LVM creation for new_lv in new_lvs: - if not _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv, - _GetInstanceInfoText(instance)): - raise errors.OpExecError("Failed to create new LV named '%s' on" - " node '%s'" % - (new_lv.logical_id[1], tgt_node)) + _CreateBlockDev(self, tgt_node, instance, new_lv, True, + _GetInstanceInfoText(instance), False) # Step: for each lv, detach+rename*2+attach self.proc.LogStep(4, steps_total, "change drbd configuration") for dev, old_lvs, new_lvs in iv_names.itervalues(): info("detaching %s drbd from local storage" % dev.iv_name) - if not self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs): + result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs) + result.Raise() + if not result.data: raise errors.OpExecError("Can't detach drbd from local storage on node" " %s for device %s" % (tgt_node, dev.iv_name)) #dev.children = [] @@ -4163,16 +4869,20 @@ class LUReplaceDisks(LogicalUnit): rlist = [] for to_ren in old_lvs: find_res = self.rpc.call_blockdev_find(tgt_node, to_ren) - if find_res is not None: # device exists + if not find_res.failed and find_res.data is not None: # device exists rlist.append((to_ren, ren_fn(to_ren, temp_suffix))) info("renaming the old LVs on the target node") - if not self.rpc.call_blockdev_rename(tgt_node, rlist): + result = self.rpc.call_blockdev_rename(tgt_node, rlist) + result.Raise() + if not result.data: raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node) # now we rename the new LVs to the old LVs info("renaming the new LVs on the target node") rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)] - if not self.rpc.call_blockdev_rename(tgt_node, rlist): + result = self.rpc.call_blockdev_rename(tgt_node, rlist) + result.Raise() + if not result.data: raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node) for old, new in zip(old_lvs, new_lvs): @@ -4185,9 +4895,11 @@ class LUReplaceDisks(LogicalUnit): # now that the new lvs have the old name, we can add them to the device info("adding new mirror component on %s" % tgt_node) - if not self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs): + result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs) + if result.failed or not result.data: for new_lv in new_lvs: - if not self.rpc.call_blockdev_remove(tgt_node, new_lv): + result = self.rpc.call_blockdev_remove(tgt_node, new_lv) + if result.failed or not result.data: warning("Can't rollback device %s", hint="manually cleanup unused" " logical volumes") raise errors.OpExecError("Can't add local storage to drbd") @@ -4206,8 +4918,8 @@ class LUReplaceDisks(LogicalUnit): # so check manually all the devices for name, (dev, old_lvs, new_lvs) in iv_names.iteritems(): cfg.SetDiskID(dev, instance.primary_node) - is_degr = self.rpc.call_blockdev_find(instance.primary_node, dev)[5] - if is_degr: + result = self.rpc.call_blockdev_find(instance.primary_node, dev) + if result.failed or result.data[5]: raise errors.OpExecError("DRBD device %s is degraded!" % name) # Step: remove old storage @@ -4216,7 +4928,8 @@ class LUReplaceDisks(LogicalUnit): info("remove logical volumes for %s" % name) for lv in old_lvs: cfg.SetDiskID(lv, tgt_node) - if not self.rpc.call_blockdev_remove(tgt_node, lv): + result = self.rpc.call_blockdev_remove(tgt_node, lv) + if result.failed or not result.data: warning("Can't remove old LV", hint="manually remove unused LVs") continue @@ -4243,23 +4956,25 @@ class LUReplaceDisks(LogicalUnit): warning, info = (self.proc.LogWarning, self.proc.LogInfo) instance = self.instance iv_names = {} - vgname = self.cfg.GetVGName() # start of work cfg = self.cfg old_node = self.tgt_node new_node = self.new_node pri_node = instance.primary_node + nodes_ip = { + old_node: self.cfg.GetNodeInfo(old_node).secondary_ip, + new_node: self.cfg.GetNodeInfo(new_node).secondary_ip, + pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip, + } # Step: check device activation self.proc.LogStep(1, steps_total, "check device existence") info("checking volume groups") my_vg = cfg.GetVGName() results = self.rpc.call_vg_list([pri_node, new_node]) - if not results: - raise errors.OpExecError("Can't list volume groups on the nodes") for node in pri_node, new_node: - res = results.get(node, False) - if not res or my_vg not in res: + res = results[node] + if res.failed or not res.data or my_vg not in res.data: raise errors.OpExecError("Volume group '%s' not found on %s" % (my_vg, node)) for idx, dev in enumerate(instance.disks): @@ -4267,7 +4982,9 @@ class LUReplaceDisks(LogicalUnit): continue info("checking disk/%d on %s" % (idx, pri_node)) cfg.SetDiskID(dev, pri_node) - if not self.rpc.call_blockdev_find(pri_node, dev): + result = self.rpc.call_blockdev_find(pri_node, dev) + result.Raise() + if not result.data: raise errors.OpExecError("Can't find disk/%d on node %s" % (idx, pri_node)) @@ -4285,18 +5002,12 @@ class LUReplaceDisks(LogicalUnit): # Step: create new storage self.proc.LogStep(3, steps_total, "allocate new storage") for idx, dev in enumerate(instance.disks): - size = dev.size info("adding new local storage on %s for disk/%d" % (new_node, idx)) - # since we *always* want to create this LV, we use the - # _Create...OnPrimary (which forces the creation), even if we - # are talking about the secondary node + # we pass force_create=True to force LVM creation for new_lv in dev.children: - if not _CreateBlockDevOnPrimary(self, new_node, instance, new_lv, - _GetInstanceInfoText(instance)): - raise errors.OpExecError("Failed to create new LV named '%s' on" - " node '%s'" % - (new_lv.logical_id[1], new_node)) + _CreateBlockDev(self, new_node, instance, new_lv, True, + _GetInstanceInfoText(instance), False) # Step 4: dbrd minors and drbd setups changes # after this, we must manually remove the drbd minors on both the @@ -4308,55 +5019,51 @@ class LUReplaceDisks(LogicalUnit): for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)): size = dev.size info("activating a new drbd on %s for disk/%d" % (new_node, idx)) - # create new devices on new_node - if pri_node == dev.logical_id[0]: - new_logical_id = (pri_node, new_node, - dev.logical_id[2], dev.logical_id[3], new_minor, - dev.logical_id[5]) + # create new devices on new_node; note that we create two IDs: + # one without port, so the drbd will be activated without + # networking information on the new node at this stage, and one + # with network, for the latter activation in step 4 + (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id + if pri_node == o_node1: + p_minor = o_minor1 else: - new_logical_id = (new_node, pri_node, - dev.logical_id[2], new_minor, dev.logical_id[4], - dev.logical_id[5]) - iv_names[idx] = (dev, dev.children, new_logical_id) + p_minor = o_minor2 + + new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret) + new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret) + + iv_names[idx] = (dev, dev.children, new_net_id) logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor, - new_logical_id) + new_net_id) new_drbd = objects.Disk(dev_type=constants.LD_DRBD8, - logical_id=new_logical_id, + logical_id=new_alone_id, children=dev.children) - if not _CreateBlockDevOnSecondary(self, new_node, instance, - new_drbd, False, - _GetInstanceInfoText(instance)): + try: + _CreateSingleBlockDev(self, new_node, instance, new_drbd, + _GetInstanceInfoText(instance), False) + except error.BlockDeviceError: self.cfg.ReleaseDRBDMinors(instance.name) - raise errors.OpExecError("Failed to create new DRBD on" - " node '%s'" % new_node) + raise for idx, dev in enumerate(instance.disks): # we have new devices, shutdown the drbd on the old secondary info("shutting down drbd for disk/%d on old node" % idx) cfg.SetDiskID(dev, old_node) - if not self.rpc.call_blockdev_shutdown(old_node, dev): + result = self.rpc.call_blockdev_shutdown(old_node, dev) + if result.failed or not result.data: warning("Failed to shutdown drbd for disk/%d on old node" % idx, hint="Please cleanup this device manually as soon as possible") info("detaching primary drbds from the network (=> standalone)") - done = 0 - for idx, dev in enumerate(instance.disks): - cfg.SetDiskID(dev, pri_node) - # set the network part of the physical (unique in bdev terms) id - # to None, meaning detach from network - dev.physical_id = (None, None, None, None) + dev.physical_id[4:] - # and 'find' the device, which will 'fix' it to match the - # standalone state - if self.rpc.call_blockdev_find(pri_node, dev): - done += 1 - else: - warning("Failed to detach drbd disk/%d from network, unusual case" % - idx) + result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip, + instance.disks)[pri_node] - if not done: - # no detaches succeeded (very unlikely) + msg = result.RemoteFailMsg() + if msg: + # detaches didn't succeed (unlikely) self.cfg.ReleaseDRBDMinors(instance.name) - raise errors.OpExecError("Can't detach at least one DRBD from old node") + raise errors.OpExecError("Can't detach the disks from the network on" + " old node: %s" % (msg,)) # if we managed to detach at least one, we update all the disks of # the instance to point to the new secondary @@ -4371,17 +5078,15 @@ class LUReplaceDisks(LogicalUnit): # and now perform the drbd attach info("attaching primary drbds to new secondary (standalone => connected)") - failures = [] - for idx, dev in enumerate(instance.disks): - info("attaching primary drbd for disk/%d to new secondary node" % idx) - # since the attach is smart, it's enough to 'find' the device, - # it will automatically activate the network, if the physical_id - # is correct - cfg.SetDiskID(dev, pri_node) - logging.debug("Disk to attach: %s", dev) - if not self.rpc.call_blockdev_find(pri_node, dev): - warning("can't attach drbd disk/%d to new secondary!" % idx, - "please do a gnt-instance info to see the status of disks") + result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip, + instance.disks, instance.name, + False) + for to_node, to_result in result.items(): + msg = to_result.RemoteFailMsg() + if msg: + warning("can't attach drbd disks on node %s: %s", to_node, msg, + hint="please do a gnt-instance info to see the" + " status of disks") # this can fail as the old devices are degraded and _WaitForSync # does a combined result over all disks, so we don't check its @@ -4392,8 +5097,9 @@ class LUReplaceDisks(LogicalUnit): # so check manually all the devices for idx, (dev, old_lvs, _) in iv_names.iteritems(): cfg.SetDiskID(dev, pri_node) - is_degr = self.rpc.call_blockdev_find(pri_node, dev)[5] - if is_degr: + result = self.rpc.call_blockdev_find(pri_node, dev) + result.Raise() + if result.data[5]: raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx) self.proc.LogStep(6, steps_total, "removing old storage") @@ -4401,7 +5107,8 @@ class LUReplaceDisks(LogicalUnit): info("remove logical volumes for disk/%d" % idx) for lv in old_lvs: cfg.SetDiskID(lv, old_node) - if not self.rpc.call_blockdev_remove(old_node, lv): + result = self.rpc.call_blockdev_remove(old_node, lv) + if result.failed or not result.data: warning("Can't remove LV on old secondary", hint="Cleanup stale volumes by hand") @@ -4417,13 +5124,10 @@ class LUReplaceDisks(LogicalUnit): if instance.status == "down": _StartInstanceDisks(self, instance, True) - if instance.disk_template == constants.DT_DRBD8: - if self.op.remote_node is None: - fn = self._ExecD8DiskOnly - else: - fn = self._ExecD8Secondary + if self.op.mode == constants.REPLACE_DISK_CHG: + fn = self._ExecD8Secondary else: - raise errors.ProgrammerError("Unhandled disk replacement case") + fn = self._ExecD8DiskOnly ret = fn(feedback_fn) @@ -4478,6 +5182,10 @@ class LUGrowDisk(LogicalUnit): instance = self.cfg.GetInstanceInfo(self.op.instance_name) assert instance is not None, \ "Cannot retrieve locked instance %s" % self.op.instance_name + nodenames = list(instance.all_nodes) + for node in nodenames: + _CheckNodeOnline(self, node) + self.instance = instance @@ -4487,22 +5195,21 @@ class LUGrowDisk(LogicalUnit): self.disk = instance.FindDisk(self.op.disk) - nodenames = [instance.primary_node] + list(instance.secondary_nodes) nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(), instance.hypervisor) for node in nodenames: - info = nodeinfo.get(node, None) - if not info: + info = nodeinfo[node] + if info.failed or not info.data: raise errors.OpPrereqError("Cannot get current information" " from node '%s'" % node) - vg_free = info.get('vg_free', None) + vg_free = info.data.get('vg_free', None) if not isinstance(vg_free, int): raise errors.OpPrereqError("Can't compute free disk space on" " node %s" % node) - if self.op.amount > info['vg_free']: + if self.op.amount > vg_free: raise errors.OpPrereqError("Not enough disk space on target node %s:" " %d MiB available, %d MiB required" % - (node, info['vg_free'], self.op.amount)) + (node, vg_free, self.op.amount)) def Exec(self, feedback_fn): """Execute disk grow. @@ -4510,15 +5217,16 @@ class LUGrowDisk(LogicalUnit): """ instance = self.instance disk = self.disk - for node in (instance.secondary_nodes + (instance.primary_node,)): + for node in instance.all_nodes: self.cfg.SetDiskID(disk, node) result = self.rpc.call_blockdev_grow(node, disk, self.op.amount) - if (not result or not isinstance(result, (list, tuple)) or - len(result) != 2): - raise errors.OpExecError("grow request failed to node %s" % node) - elif not result[0]: - raise errors.OpExecError("grow request failed to node %s: %s" % - (node, result[1])) + result.Raise() + if (not result.data or not isinstance(result.data, (list, tuple)) or + len(result.data) != 2): + raise errors.OpExecError("Grow request failed to node %s" % node) + elif not result.data[0]: + raise errors.OpExecError("Grow request failed to node %s: %s" % + (node, result.data[1])) disk.RecordGrow(self.op.amount) self.cfg.Update(instance) if self.op.wait_for_sync: @@ -4547,8 +5255,7 @@ class LUQueryInstanceData(NoHooksLU): for name in self.op.instances: full_name = self.cfg.ExpandInstanceName(name) if full_name is None: - raise errors.OpPrereqError("Instance '%s' not known" % - self.op.instance_name) + raise errors.OpPrereqError("Instance '%s' not known" % name) self.wanted_names.append(full_name) self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names else: @@ -4583,6 +5290,8 @@ class LUQueryInstanceData(NoHooksLU): if not static: self.cfg.SetDiskID(dev, instance.primary_node) dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev) + dev_pstatus.Raise() + dev_pstatus = dev_pstatus.data else: dev_pstatus = None @@ -4596,6 +5305,8 @@ class LUQueryInstanceData(NoHooksLU): if snode and not static: self.cfg.SetDiskID(dev, snode) dev_sstatus = self.rpc.call_blockdev_find(snode, dev) + dev_sstatus.Raise() + dev_sstatus = dev_sstatus.data else: dev_sstatus = None @@ -4629,6 +5340,8 @@ class LUQueryInstanceData(NoHooksLU): remote_info = self.rpc.call_instance_info(instance.primary_node, instance.name, instance.hypervisor) + remote_info.Raise() + remote_info = remote_info.data if remote_info and "state" in remote_info: remote_state = "up" else: @@ -4688,14 +5401,8 @@ class LUSetInstanceParams(LogicalUnit): self.op.hvparams or self.op.beparams): raise errors.OpPrereqError("No changes submitted") - for item in (constants.BE_MEMORY, constants.BE_VCPUS): - val = self.op.beparams.get(item, None) - if val is not None: - try: - val = int(val) - except ValueError, err: - raise errors.OpPrereqError("Invalid %s size: %s" % (item, str(err))) - self.op.beparams[item] = val + utils.CheckBEParams(self.op.beparams) + # Disk validation disk_addremove = 0 for disk_op, disk_dict in self.op.disks: @@ -4789,8 +5496,7 @@ class LUSetInstanceParams(LogicalUnit): args['vcpus'] = self.be_new[constants.BE_VCPUS] # FIXME: readd disk/nic changes env = _BuildInstanceHookEnvByObject(self, self.instance, override=args) - nl = [self.cfg.GetMasterNode(), - self.instance.primary_node] + list(self.instance.secondary_nodes) + nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) return env, nl, nl def CheckPrereq(self): @@ -4806,19 +5512,20 @@ class LUSetInstanceParams(LogicalUnit): instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) assert self.instance is not None, \ "Cannot retrieve locked instance %s" % self.op.instance_name - pnode = self.instance.primary_node - nodelist = [pnode] - nodelist.extend(instance.secondary_nodes) + pnode = instance.primary_node + nodelist = list(instance.all_nodes) # hvparams processing if self.op.hvparams: i_hvdict = copy.deepcopy(instance.hvparams) for key, val in self.op.hvparams.iteritems(): - if val is None: + if val == constants.VALUE_DEFAULT: try: del i_hvdict[key] except KeyError: pass + elif val == constants.VALUE_NONE: + i_hvdict[key] = None else: i_hvdict[key] = val cluster = self.cfg.GetClusterInfo() @@ -4837,7 +5544,7 @@ class LUSetInstanceParams(LogicalUnit): if self.op.beparams: i_bedict = copy.deepcopy(instance.beparams) for key, val in self.op.beparams.iteritems(): - if val is None: + if val == constants.VALUE_DEFAULT: try: del i_bedict[key] except KeyError: @@ -4863,30 +5570,31 @@ class LUSetInstanceParams(LogicalUnit): instance.hypervisor) nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(), instance.hypervisor) - - if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict): + if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict): # Assume the primary node is unreachable and go ahead self.warn.append("Can't get info from primary node %s" % pnode) else: - if instance_info: - current_mem = instance_info['memory'] + if not instance_info.failed and instance_info.data: + current_mem = instance_info.data['memory'] else: # Assume instance not running # (there is a slight race condition here, but it's not very probable, # and we have no other way to check) current_mem = 0 miss_mem = (be_new[constants.BE_MEMORY] - current_mem - - nodeinfo[pnode]['memory_free']) + nodeinfo[pnode].data['memory_free']) if miss_mem > 0: raise errors.OpPrereqError("This change will prevent the instance" " from starting, due to %d MB of memory" " missing on its primary node" % miss_mem) if be_new[constants.BE_AUTO_BALANCE]: - for node in instance.secondary_nodes: - if node not in nodeinfo or not isinstance(nodeinfo[node], dict): + for node, nres in nodeinfo.iteritems(): + if node not in instance.secondary_nodes: + continue + if nres.failed or not isinstance(nres.data, dict): self.warn.append("Can't get info from secondary node %s" % node) - elif be_new[constants.BE_MEMORY] > nodeinfo[node]['memory_free']: + elif be_new[constants.BE_MEMORY] > nres.data['memory_free']: self.warn.append("Not enough memory to failover instance to" " secondary node %s" % node) @@ -4923,9 +5631,9 @@ class LUSetInstanceParams(LogicalUnit): " an instance") ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor]) ins_l = ins_l[pnode] - if not type(ins_l) is list: + if ins_l.failed or not isinstance(ins_l.data, list): raise errors.OpPrereqError("Can't contact node '%s'" % pnode) - if instance.name in ins_l: + if instance.name in ins_l.data: raise errors.OpPrereqError("Instance is running, can't remove" " disks.") @@ -4963,7 +5671,8 @@ class LUSetInstanceParams(LogicalUnit): device_idx = len(instance.disks) for node, disk in device.ComputeNodeTree(instance.primary_node): self.cfg.SetDiskID(disk, node) - if not self.rpc.call_blockdev_remove(node, disk): + rpc_result = self.rpc.call_blockdev_remove(node, disk) + if rpc_result.failed or not rpc_result.data: self.proc.LogWarning("Could not remove disk/%d on node %s," " continuing anyway", device_idx, node) result.append(("disk/%d" % device_idx, "remove")) @@ -4991,17 +5700,15 @@ class LUSetInstanceParams(LogicalUnit): new_disk.iv_name, instance.name) # Note: this needs to be kept in sync with _CreateDisks #HARDCODE - for secondary_node in instance.secondary_nodes: - if not _CreateBlockDevOnSecondary(self, secondary_node, instance, - new_disk, False, info): + for node in instance.all_nodes: + f_create = node == instance.primary_node + try: + _CreateBlockDev(self, node, instance, new_disk, + f_create, info, f_create) + except error.OpExecError, err: self.LogWarning("Failed to create volume %s (%s) on" - " secondary node %s!", - new_disk.iv_name, new_disk, secondary_node) - #HARDCODE - if not _CreateBlockDevOnPrimary(self, instance.primary_node, - instance, new_disk, info): - self.LogWarning("Failed to create volume %s on primary!", - new_disk.iv_name) + " node %s: %s", + new_disk.iv_name, new_disk, node, err) result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" % (new_disk.size, new_disk.mode))) else: @@ -5083,7 +5790,15 @@ class LUQueryExports(NoHooksLU): that node. """ - return self.rpc.call_export_list(self.nodes) + rpcresult = self.rpc.call_export_list(self.nodes) + result = {} + for node in rpcresult: + if rpcresult[node].failed: + result[node] = False + else: + result[node] = rpcresult[node].data + + return result class LUExportInstance(LogicalUnit): @@ -5136,6 +5851,7 @@ class LUExportInstance(LogicalUnit): self.instance = self.cfg.GetInstanceInfo(instance_name) assert self.instance is not None, \ "Cannot retrieve locked instance %s" % self.op.instance_name + _CheckNodeOnline(self, self.instance.primary_node) self.dst_node = self.cfg.GetNodeInfo( self.cfg.ExpandNodeName(self.op.target_node)) @@ -5143,6 +5859,7 @@ class LUExportInstance(LogicalUnit): if self.dst_node is None: # This is wrong node name, not a non-locked node raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node) + _CheckNodeOnline(self, self.dst_node.name) # instance disk type verification for disk in self.instance.disks: @@ -5159,7 +5876,9 @@ class LUExportInstance(LogicalUnit): src_node = instance.primary_node if self.op.shutdown: # shutdown the instance, but not the disks - if not self.rpc.call_instance_shutdown(src_node, instance): + result = self.rpc.call_instance_shutdown(src_node, instance) + result.Raise() + if not result.data: raise errors.OpExecError("Could not shutdown instance %s on node %s" % (instance.name, src_node)) @@ -5167,43 +5886,52 @@ class LUExportInstance(LogicalUnit): snap_disks = [] + # set the disks ID correctly since call_instance_start needs the + # correct drbd minor to create the symlinks + for disk in instance.disks: + self.cfg.SetDiskID(disk, src_node) + try: for disk in instance.disks: # new_dev_name will be a snapshot of an lvm leaf of the one we passed new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk) - - if not new_dev_name: + if new_dev_name.failed or not new_dev_name.data: self.LogWarning("Could not snapshot block device %s on node %s", disk.logical_id[1], src_node) snap_disks.append(False) else: new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size, - logical_id=(vgname, new_dev_name), - physical_id=(vgname, new_dev_name), + logical_id=(vgname, new_dev_name.data), + physical_id=(vgname, new_dev_name.data), iv_name=disk.iv_name) snap_disks.append(new_dev) finally: if self.op.shutdown and instance.status == "up": - if not self.rpc.call_instance_start(src_node, instance, None): + result = self.rpc.call_instance_start(src_node, instance, None) + msg = result.RemoteFailMsg() + if msg: _ShutdownInstanceDisks(self, instance) - raise errors.OpExecError("Could not start instance") + raise errors.OpExecError("Could not start instance: %s" % msg) # TODO: check for size cluster_name = self.cfg.GetClusterName() for idx, dev in enumerate(snap_disks): if dev: - if not self.rpc.call_snapshot_export(src_node, dev, dst_node.name, - instance, cluster_name, idx): + result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name, + instance, cluster_name, idx) + if result.failed or not result.data: self.LogWarning("Could not export block device %s from node %s to" " node %s", dev.logical_id[1], src_node, dst_node.name) - if not self.rpc.call_blockdev_remove(src_node, dev): + result = self.rpc.call_blockdev_remove(src_node, dev) + if result.failed or not result.data: self.LogWarning("Could not remove snapshot block device %s from node" " %s", dev.logical_id[1], src_node) - if not self.rpc.call_finalize_export(dst_node.name, instance, snap_disks): + result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks) + if result.failed or not result.data: self.LogWarning("Could not finalize export for instance %s on node %s", instance.name, dst_node.name) @@ -5216,7 +5944,9 @@ class LUExportInstance(LogicalUnit): if nodelist: exportlist = self.rpc.call_export_list(nodelist) for node in exportlist: - if instance.name in exportlist[node]: + if exportlist[node].failed: + continue + if instance.name in exportlist[node].data: if not self.rpc.call_export_remove(node, instance.name): self.LogWarning("Could not remove older export for instance %s" " on node %s", instance.name, node) @@ -5257,9 +5987,13 @@ class LURemoveExport(NoHooksLU): locking.LEVEL_NODE]) found = False for node in exportlist: - if instance_name in exportlist[node]: + if exportlist[node].failed: + self.LogWarning("Failed to query node %s, continuing" % node) + continue + if instance_name in exportlist[node].data: found = True - if not self.rpc.call_export_remove(node, instance_name): + result = self.rpc.call_export_remove(node, instance_name) + if result.failed or not result.data: logging.error("Could not remove export for instance %s" " on node %s", instance_name, node) @@ -5476,9 +6210,10 @@ class LUTestDelay(NoHooksLU): if not result: raise errors.OpExecError("Complete failure from rpc call") for node, node_result in result.items(): - if not node_result: + node_result.Raise() + if not node_result.data: raise errors.OpExecError("Failure during rpc call to node %s," - " result: %s" % (node, node_result)) + " result: %s" % (node, node_result.data)) class IAllocator(object): @@ -5511,6 +6246,7 @@ class IAllocator(object): self.name = name self.mem_size = self.disks = self.disk_template = None self.os = self.tags = self.nics = self.vcpus = None + self.hypervisor = None self.relocate_from = None # computed fields self.required_nodes = None @@ -5558,19 +6294,20 @@ class IAllocator(object): node_list = cfg.GetNodeList() if self.mode == constants.IALLOCATOR_MODE_ALLOC: - hypervisor = self.hypervisor + hypervisor_name = self.hypervisor elif self.mode == constants.IALLOCATOR_MODE_RELOC: - hypervisor = cfg.GetInstanceInfo(self.name).hypervisor + hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(), - hypervisor) + hypervisor_name) node_iinfo = self.lu.rpc.call_all_instances_info(node_list, cluster_info.enabled_hypervisors) for nname in node_list: ninfo = cfg.GetNodeInfo(nname) - if nname not in node_data or not isinstance(node_data[nname], dict): + node_data[nname].Raise() + if not isinstance(node_data[nname].data, dict): raise errors.OpExecError("Can't get data for node %s" % nname) - remote_info = node_data[nname] + remote_info = node_data[nname].data for attr in ['memory_total', 'memory_free', 'memory_dom0', 'vg_size', 'vg_free', 'cpu_total']: if attr not in remote_info: @@ -5609,6 +6346,7 @@ class IAllocator(object): "primary_ip": ninfo.primary_ip, "secondary_ip": ninfo.secondary_ip, "total_cpus": remote_info['cpu_total'], + "offline": ninfo.offline, } node_results[nname] = pnr data["nodes"] = node_results @@ -5624,7 +6362,7 @@ class IAllocator(object): "vcpus": beinfo[constants.BE_VCPUS], "memory": beinfo[constants.BE_MEMORY], "os": iinfo.os, - "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes), + "nodes": list(iinfo.all_nodes), "nics": nic_data, "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks], "disk_template": iinfo.disk_template, @@ -5727,11 +6465,12 @@ class IAllocator(object): data = self.in_text result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text) + result.Raise() - if not isinstance(result, (list, tuple)) or len(result) != 4: + if not isinstance(result.data, (list, tuple)) or len(result.data) != 4: raise errors.OpExecError("Invalid result from master iallocator runner") - rcode, stdout, stderr, fail = result + rcode, stdout, stderr, fail = result.data if rcode == constants.IARUN_NOTFOUND: raise errors.OpExecError("Can't find allocator '%s'" % name)