X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/32388e6d2cc05a9b93b28c36f002462bba98c5c4..22f0f71df15a8c9bd8b0697cf26c16eece398b64:/lib/cmdlib.py diff --git a/lib/cmdlib.py b/lib/cmdlib.py index dc092a5..2434a5b 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -392,8 +392,8 @@ def _GetWantedInstances(lu, instances): wanted.append(instance) else: - wanted = lu.cfg.GetInstanceList() - return utils.NiceSort(wanted) + wanted = utils.NiceSort(lu.cfg.GetInstanceList()) + return wanted def _CheckOutputFields(static, dynamic, selected): @@ -455,8 +455,8 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status, @param secondary_nodes: list of secondary nodes as strings @type os_type: string @param os_type: the name of the instance's OS - @type status: string - @param status: the desired status of the instances + @type status: boolean + @param status: the should_run status of the instance @type memory: string @param memory: the memory size of the instance @type vcpus: string @@ -468,13 +468,17 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status, @return: the hook environment for this instance """ + if status: + str_status = "up" + else: + str_status = "down" env = { "OP_TARGET": name, "INSTANCE_NAME": name, "INSTANCE_PRIMARY": primary_node, "INSTANCE_SECONDARIES": " ".join(secondary_nodes), "INSTANCE_OS_TYPE": os_type, - "INSTANCE_STATUS": status, + "INSTANCE_STATUS": str_status, "INSTANCE_MEMORY": memory, "INSTANCE_VCPUS": vcpus, } @@ -516,7 +520,7 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None): 'primary_node': instance.primary_node, 'secondary_nodes': instance.secondary_nodes, 'os_type': instance.os, - 'status': instance.os, + 'status': instance.admin_up, 'memory': bep[constants.BE_MEMORY], 'vcpus': bep[constants.BE_VCPUS], 'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics], @@ -646,18 +650,28 @@ class LUVerifyCluster(LogicalUnit): # compares ganeti version local_version = constants.PROTOCOL_VERSION remote_version = node_result.get('version', None) - if not remote_version: + if not (remote_version and isinstance(remote_version, (list, tuple)) and + len(remote_version) == 2): feedback_fn(" - ERROR: connection to %s failed" % (node)) return True - if local_version != remote_version: - feedback_fn(" - ERROR: sw version mismatch: master %s, node(%s) %s" % - (local_version, node, remote_version)) + if local_version != remote_version[0]: + feedback_fn(" - ERROR: incompatible protocol versions: master %s," + " node %s %s" % (local_version, node, remote_version[0])) return True - # checks vg existance and size > 20G + # node seems compatible, we can actually try to look into its results bad = False + + # full package version + if constants.RELEASE_VERSION != remote_version[1]: + feedback_fn(" - WARNING: software version mismatch: master %s," + " node %s %s" % + (constants.RELEASE_VERSION, node, remote_version[1])) + + # checks vg existence and size > 20G + vglist = node_result.get(constants.NV_VGLIST, None) if not vglist: feedback_fn(" - ERROR: unable to check volume groups on node %s." % @@ -768,7 +782,7 @@ class LUVerifyCluster(LogicalUnit): (volume, node)) bad = True - if not instanceconfig.status == 'down': + if instanceconfig.admin_up: if ((node_current not in node_instance or not instance in node_instance[node_current]) and node_current not in n_offline): @@ -889,6 +903,7 @@ class LUVerifyCluster(LogicalUnit): i_non_redundant = [] # Non redundant instances i_non_a_balanced = [] # Non auto-balanced instances n_offline = [] # List of offline nodes + n_drained = [] # List of nodes being drained node_volume = {} node_instance = {} node_info = {} @@ -941,6 +956,9 @@ class LUVerifyCluster(LogicalUnit): ntype = "master" elif node_i.master_candidate: ntype = "master candidate" + elif node_i.drained: + ntype = "drained" + n_drained.append(node) else: ntype = "regular" feedback_fn("* Verifying node %s (%s)" % (node, ntype)) @@ -953,7 +971,7 @@ class LUVerifyCluster(LogicalUnit): node_drbd = {} for minor, instance in all_drbd_map[node].items(): instance = instanceinfo[instance] - node_drbd[minor] = (instance.name, instance.status == "up") + node_drbd[minor] = (instance.name, instance.admin_up) result = self._VerifyNode(node_i, file_names, local_checksums, nresult, feedback_fn, master_files, node_drbd) @@ -962,7 +980,7 @@ class LUVerifyCluster(LogicalUnit): lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data") if isinstance(lvdata, basestring): feedback_fn(" - ERROR: LVM problem on node %s: %s" % - (node, lvdata.encode('string_escape'))) + (node, utils.SafeEncode(lvdata))) bad = True node_volume[node] = {} elif not isinstance(lvdata, dict): @@ -1093,6 +1111,9 @@ class LUVerifyCluster(LogicalUnit): if n_offline: feedback_fn(" - NOTICE: %d offline node(s) found." % len(n_offline)) + if n_drained: + feedback_fn(" - NOTICE: %d drained node(s) found." % len(n_drained)) + return not bad def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result): @@ -1181,7 +1202,7 @@ class LUVerifyDisks(NoHooksLU): nv_dict = {} for inst in instances: inst_lvs = {} - if (inst.status != "up" or + if (not inst.admin_up or inst.disk_template not in constants.DTS_NET_MIRROR): continue inst.MapLVsByNode(inst_lvs) @@ -1566,11 +1587,15 @@ def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False): result = True if on_primary or dev.AssembleOnSecondary(): rstats = lu.rpc.call_blockdev_find(node, dev) - if rstats.failed or not rstats.data: - logging.warning("Node %s: disk degraded, not found or node down", node) + msg = rstats.RemoteFailMsg() + if msg: + lu.LogWarning("Can't find disk on node %s: %s", node, msg) + result = False + elif not rstats.payload: + lu.LogWarning("Can't find disk on node %s", node) result = False else: - result = result and (not rstats.data[idx]) + result = result and (not rstats.payload[idx]) if dev.children: for child in dev.children: result = result and _CheckDiskConsistency(lu, child, node, on_primary) @@ -1739,13 +1764,13 @@ class LUQueryNodes(NoHooksLU): """Logical unit for querying nodes. """ - _OP_REQP = ["output_fields", "names"] + _OP_REQP = ["output_fields", "names", "use_locking"] REQ_BGL = False _FIELDS_DYNAMIC = utils.FieldSet( "dtotal", "dfree", "mtotal", "mnode", "mfree", "bootid", - "ctotal", + "ctotal", "cnodes", "csockets", ) _FIELDS_STATIC = utils.FieldSet( @@ -1756,6 +1781,7 @@ class LUQueryNodes(NoHooksLU): "master_candidate", "master", "offline", + "drained", ) def ExpandNames(self): @@ -1771,7 +1797,8 @@ class LUQueryNodes(NoHooksLU): else: self.wanted = locking.ALL_SET - self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields) + self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields) + self.do_locking = self.do_node_query and self.op.use_locking if self.do_locking: # if we don't request only static fields, we need to lock the nodes self.needed_locks[locking.LEVEL_NODE] = self.wanted @@ -1806,7 +1833,7 @@ class LUQueryNodes(NoHooksLU): # begin data gathering - if self.do_locking: + if self.do_node_query: live_data = {} node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(), self.cfg.GetHypervisorType()) @@ -1823,6 +1850,8 @@ class LUQueryNodes(NoHooksLU): "dfree": fn(int, nodeinfo.get('vg_free', None)), "ctotal": fn(int, nodeinfo.get('cpu_total', None)), "bootid": nodeinfo.get('bootid', None), + "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)), + "csockets": fn(int, nodeinfo.get('cpu_sockets', None)), } else: live_data[name] = {} @@ -1877,6 +1906,8 @@ class LUQueryNodes(NoHooksLU): val = node.name == master_node elif field == "offline": val = node.offline + elif field == "drained": + val = node.drained elif self._FIELDS_DYNAMIC.Matches(field): val = live_data[node.name].get(field, None) else: @@ -2073,7 +2104,7 @@ class LUAddNode(LogicalUnit): primary_ip=primary_ip, secondary_ip=secondary_ip, master_candidate=master_candidate, - offline=False) + offline=False, drained=False) def Exec(self, feedback_fn): """Adds the new node to the cluster. @@ -2115,8 +2146,10 @@ class LUAddNode(LogicalUnit): keyarray[2], keyarray[3], keyarray[4], keyarray[5]) - if result.failed or not result.data: - raise errors.OpExecError("Cannot transfer ssh keys to the new node") + msg = result.RemoteFailMsg() + if msg: + raise errors.OpExecError("Cannot transfer ssh keys to the" + " new node: %s" % msg) # Add node to our /etc/hosts, and add key to known_hosts utils.AddHostToEtcHosts(new_node.name) @@ -2144,7 +2177,7 @@ class LUAddNode(LogicalUnit): if result[verifier].data['nodelist']: for failed in result[verifier].data['nodelist']: feedback_fn("ssh/hostname verification failed %s -> %s" % - (verifier, result[verifier]['nodelist'][failed])) + (verifier, result[verifier].data['nodelist'][failed])) raise errors.OpExecError("ssh/hostname verification failed.") # Distribute updated /etc/hosts and known_hosts to all nodes, @@ -2164,8 +2197,10 @@ class LUAddNode(LogicalUnit): logging.error("Copy of file %s to node %s failed", fname, to_node) to_copy = [] - if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors: + enabled_hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors + if constants.HTS_USE_VNC.intersection(enabled_hypervisors): to_copy.append(constants.VNC_PASSWORD_FILE) + for fname in to_copy: result = self.rpc.call_upload_file([node], fname) if result[node].failed or not result[node]: @@ -2268,11 +2303,9 @@ class LUSetNodeParams(LogicalUnit): result.append(("master_candidate", str(self.op.master_candidate))) if self.op.master_candidate == False: rrc = self.rpc.call_node_demote_from_mc(node.name) - if (rrc.failed or not isinstance(rrc.data, (tuple, list)) - or len(rrc.data) != 2): - self.LogWarning("Node rpc error: %s" % rrc.error) - elif not rrc.data[0]: - self.LogWarning("Node failed to demote itself: %s" % rrc.data[1]) + msg = rrc.RemoteFailMsg() + if msg: + self.LogWarning("Node failed to demote itself: %s" % msg) # this will trigger configuration file update, if needed self.cfg.Update(node) @@ -2315,7 +2348,8 @@ class LUQueryClusterInfo(NoHooksLU): "master": cluster.master_node, "default_hypervisor": cluster.default_hypervisor, "enabled_hypervisors": cluster.enabled_hypervisors, - "hvparams": cluster.hvparams, + "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor]) + for hypervisor in cluster.enabled_hypervisors]), "beparams": cluster.beparams, "candidate_pool_size": cluster.candidate_pool_size, } @@ -2435,10 +2469,11 @@ def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False): for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): lu.cfg.SetDiskID(node_disk, node) result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False) - if result.failed or not result: + msg = result.RemoteFailMsg() + if msg: lu.proc.LogWarning("Could not prepare block device %s on node %s" - " (is_primary=False, pass=1)", - inst_disk.iv_name, node) + " (is_primary=False, pass=1): %s", + inst_disk.iv_name, node, msg) if not ignore_secondaries: disks_ok = False @@ -2451,10 +2486,11 @@ def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False): continue lu.cfg.SetDiskID(node_disk, node) result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True) - if result.failed or not result: + msg = result.RemoteFailMsg() + if msg: lu.proc.LogWarning("Could not prepare block device %s on node %s" - " (is_primary=True, pass=2)", - inst_disk.iv_name, node) + " (is_primary=True, pass=2): %s", + inst_disk.iv_name, node, msg) disks_ok = False device_info.append((instance.primary_node, inst_disk.iv_name, result.data)) @@ -2546,17 +2582,18 @@ def _ShutdownInstanceDisks(lu, instance, ignore_primary=False): ignored. """ - result = True + all_result = True for disk in instance.disks: for node, top_disk in disk.ComputeNodeTree(instance.primary_node): lu.cfg.SetDiskID(top_disk, node) result = lu.rpc.call_blockdev_shutdown(node, top_disk) - if result.failed or not result.data: - logging.error("Could not shutdown block device %s on node %s", - disk.iv_name, node) + msg = result.RemoteFailMsg() + if msg: + lu.LogWarning("Could not shutdown block device %s on node %s: %s", + disk.iv_name, node, msg) if not ignore_primary or node != instance.primary_node: - result = False - return result + all_result = False + return all_result def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name): @@ -2821,7 +2858,7 @@ class LUReinstallInstance(LogicalUnit): if instance.disk_template == constants.DT_DISKLESS: raise errors.OpPrereqError("Instance '%s' has no disks" % self.op.instance_name) - if instance.status != "down": + if instance.admin_up: raise errors.OpPrereqError("Instance '%s' is marked to be up" % self.op.instance_name) remote_info = self.rpc.call_instance_info(instance.primary_node, @@ -2904,7 +2941,7 @@ class LURenameInstance(LogicalUnit): self.op.instance_name) _CheckNodeOnline(self, instance.primary_node) - if instance.status != "down": + if instance.admin_up: raise errors.OpPrereqError("Instance '%s' is marked to be up" % self.op.instance_name) remote_info = self.rpc.call_instance_info(instance.primary_node, @@ -2973,10 +3010,11 @@ class LURenameInstance(LogicalUnit): try: result = self.rpc.call_instance_run_rename(inst.primary_node, inst, old_name) - if result.failed or not result.data: + msg = result.RemoteFailMsg() + if msg: msg = ("Could not run OS rename script for instance %s on node %s" - " (but the instance has been renamed in Ganeti)" % - (inst.name, inst.primary_node)) + " (but the instance has been renamed in Ganeti): %s" % + (inst.name, inst.primary_node, msg)) self.proc.LogWarning(msg) finally: _ShutdownInstanceDisks(self, inst) @@ -3054,7 +3092,7 @@ class LUQueryInstances(NoHooksLU): """Logical unit for querying instances. """ - _OP_REQP = ["output_fields", "names"] + _OP_REQP = ["output_fields", "names", "use_locking"] REQ_BGL = False _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes", "admin_state", "admin_ram", @@ -3062,7 +3100,7 @@ class LUQueryInstances(NoHooksLU): "sda_size", "sdb_size", "vcpus", "tags", "network_port", "beparams", "(disk).(size)/([0-9]+)", - "(disk).(sizes)", + "(disk).(sizes)", "disk_usage", "(nic).(mac|ip|bridge)/([0-9]+)", "(nic).(macs|ips|bridges)", "(disk|nic).(count)", @@ -3088,7 +3126,8 @@ class LUQueryInstances(NoHooksLU): else: self.wanted = locking.ALL_SET - self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields) + self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields) + self.do_locking = self.do_node_query and self.op.use_locking if self.do_locking: self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted self.needed_locks[locking.LEVEL_NODE] = [] @@ -3109,19 +3148,25 @@ class LUQueryInstances(NoHooksLU): """ all_info = self.cfg.GetAllInstancesInfo() - if self.do_locking: - instance_names = self.acquired_locks[locking.LEVEL_INSTANCE] - elif self.wanted != locking.ALL_SET: - instance_names = self.wanted - missing = set(instance_names).difference(all_info.keys()) - if missing: - raise errors.OpExecError( - "Some instances were removed before retrieving their data: %s" - % missing) + if self.wanted == locking.ALL_SET: + # caller didn't specify instance names, so ordering is not important + if self.do_locking: + instance_names = self.acquired_locks[locking.LEVEL_INSTANCE] + else: + instance_names = all_info.keys() + instance_names = utils.NiceSort(instance_names) else: - instance_names = all_info.keys() + # caller did specify names, so we must keep the ordering + if self.do_locking: + tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE] + else: + tgt_set = all_info.keys() + missing = set(self.wanted).difference(tgt_set) + if missing: + raise errors.OpExecError("Some instances were removed before" + " retrieving their data: %s" % missing) + instance_names = self.wanted - instance_names = utils.NiceSort(instance_names) instance_list = [all_info[iname] for iname in instance_names] # begin data gathering @@ -3131,7 +3176,7 @@ class LUQueryInstances(NoHooksLU): bad_nodes = [] off_nodes = [] - if self.do_locking: + if self.do_node_query: live_data = {} node_data = self.rpc.call_all_instances_info(nodes, hv_list) for name in nodes: @@ -3168,7 +3213,7 @@ class LUQueryInstances(NoHooksLU): elif field == "snodes": val = list(instance.secondary_nodes) elif field == "admin_state": - val = (instance.status != "down") + val = instance.admin_up elif field == "oper_state": if instance.primary_node in bad_nodes: val = None @@ -3182,12 +3227,12 @@ class LUQueryInstances(NoHooksLU): else: running = bool(live_data.get(instance.name)) if running: - if instance.status != "down": + if instance.admin_up: val = "running" else: val = "ERROR_up" else: - if instance.status != "down": + if instance.admin_up: val = "ERROR_down" else: val = "ADMIN_down" @@ -3212,6 +3257,9 @@ class LUQueryInstances(NoHooksLU): val = instance.FindDisk(idx).size except errors.OpPrereqError: val = None + elif field == "disk_usage": # total disk usage per node + disk_sizes = [{'size': disk.size} for disk in instance.disks] + val = _ComputeDiskSize(instance.disk_template, disk_sizes) elif field == "tags": val = list(instance.GetTags()) elif field == "serial_no": @@ -3361,7 +3409,7 @@ class LUFailoverInstance(LogicalUnit): for dev in instance.disks: # for drbd, these are drbd over lvm if not _CheckDiskConsistency(self, dev, target_node, False): - if instance.status == "up" and not self.op.ignore_consistency: + if instance.admin_up and not self.op.ignore_consistency: raise errors.OpExecError("Disk %s is degraded on target node," " aborting failover." % dev.iv_name) @@ -3389,7 +3437,7 @@ class LUFailoverInstance(LogicalUnit): self.cfg.Update(instance) # Only start the instance if it's marked as up - if instance.status == "up": + if instance.admin_up: feedback_fn("* activating the instance's disks on target node") logging.info("Starting instance %s on node %s", instance.name, target_node) @@ -3507,7 +3555,7 @@ class LUMigrateInstance(LogicalUnit): if msg: raise errors.OpExecError("Cannot resync disks on node %s: %s" % (node, msg)) - node_done, node_percent = nres.data[1] + node_done, node_percent = nres.payload all_done = all_done and node_done if node_percent is not None: min_percent = min(min_percent, node_percent) @@ -3630,6 +3678,41 @@ class LUMigrateInstance(LogicalUnit): self.feedback_fn("* done") + def _RevertDiskStatus(self): + """Try to revert the disk status after a failed migration. + + """ + target_node = self.target_node + try: + self._EnsureSecondary(target_node) + self._GoStandalone() + self._GoReconnect(False) + self._WaitUntilSync() + except errors.OpExecError, err: + self.LogWarning("Migration failed and I can't reconnect the" + " drives: error '%s'\n" + "Please look and recover the instance status" % + str(err)) + + def _AbortMigration(self): + """Call the hypervisor code to abort a started migration. + + """ + instance = self.instance + target_node = self.target_node + migration_info = self.migration_info + + abort_result = self.rpc.call_finalize_migration(target_node, + instance, + migration_info, + False) + abort_msg = abort_result.RemoteFailMsg() + if abort_msg: + logging.error("Aborting migration failed on target node %s: %s" % + (target_node, abort_msg)) + # Don't raise an exception here, as we stil have to try to revert the + # disk status, even if this step failed. + def _ExecMigration(self): """Migrate an instance. @@ -3653,11 +3736,38 @@ class LUMigrateInstance(LogicalUnit): " synchronized on target node," " aborting migrate." % dev.iv_name) + # First get the migration information from the remote node + result = self.rpc.call_migration_info(source_node, instance) + msg = result.RemoteFailMsg() + if msg: + log_err = ("Failed fetching source migration information from %s: %s" % + (source_node, msg)) + logging.error(log_err) + raise errors.OpExecError(log_err) + + self.migration_info = migration_info = result.payload + + # Then switch the disks to master/master mode self._EnsureSecondary(target_node) self._GoStandalone() self._GoReconnect(True) self._WaitUntilSync() + self.feedback_fn("* preparing %s to accept the instance" % target_node) + result = self.rpc.call_accept_instance(target_node, + instance, + migration_info, + self.nodes_ip[target_node]) + + msg = result.RemoteFailMsg() + if msg: + logging.error("Instance pre-migration failed, trying to revert" + " disk status: %s", msg) + self._AbortMigration() + self._RevertDiskStatus() + raise errors.OpExecError("Could not pre-migrate instance %s: %s" % + (instance.name, msg)) + self.feedback_fn("* migrating instance to %s" % target_node) time.sleep(10) result = self.rpc.call_instance_migrate(source_node, instance, @@ -3667,17 +3777,8 @@ class LUMigrateInstance(LogicalUnit): if msg: logging.error("Instance migration failed, trying to revert" " disk status: %s", msg) - try: - self._EnsureSecondary(target_node) - self._GoStandalone() - self._GoReconnect(False) - self._WaitUntilSync() - except errors.OpExecError, err: - self.LogWarning("Migration failed and I can't reconnect the" - " drives: error '%s'\n" - "Please look and recover the instance status" % - str(err)) - + self._AbortMigration() + self._RevertDiskStatus() raise errors.OpExecError("Could not migrate instance %s: %s" % (instance.name, msg)) time.sleep(10) @@ -3686,6 +3787,17 @@ class LUMigrateInstance(LogicalUnit): # distribute new instance config to the other nodes self.cfg.Update(instance) + result = self.rpc.call_finalize_migration(target_node, + instance, + migration_info, + True) + msg = result.RemoteFailMsg() + if msg: + logging.error("Instance migration succeeded, but finalization failed:" + " %s" % msg) + raise errors.OpExecError("Could not finalize instance migration: %s" % + msg) + self._EnsureSecondary(source_node) self._WaitUntilSync() self._GoStandalone() @@ -3736,7 +3848,7 @@ def _CreateBlockDev(lu, node, instance, device, force_create, (this will be represented as a LVM tag) @type force_open: boolean @param force_open: this parameter will be passes to the - L{backend.CreateBlockDevice} function where it specifies + L{backend.BlockdevCreate} function where it specifies whether we run on primary or not, and it affects both the child assembly and the device own Open() execution @@ -3771,7 +3883,7 @@ def _CreateSingleBlockDev(lu, node, instance, device, info, force_open): (this will be represented as a LVM tag) @type force_open: boolean @param force_open: this parameter will be passes to the - L{backend.CreateBlockDevice} function where it specifies + L{backend.BlockdevCreate} function where it specifies whether we run on primary or not, and it affects both the child assembly and the device own Open() execution @@ -3785,7 +3897,7 @@ def _CreateSingleBlockDev(lu, node, instance, device, info, force_open): " node %s for instance %s: %s" % (device, node, instance.name, msg)) if device.physical_id is None: - device.physical_id = result.data[1] + device.physical_id = result.payload def _GenerateUniqueNames(lu, exts): @@ -3847,7 +3959,8 @@ def _GenerateDiskTemplate(lu, template_name, disk_index = idx + base_index disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"], logical_id=(vgname, names[idx]), - iv_name="disk/%d" % disk_index) + iv_name="disk/%d" % disk_index, + mode=disk["mode"]) disks.append(disk_dev) elif template_name == constants.DT_DRBD8: if len(secondary_nodes) != 1: @@ -3867,6 +3980,7 @@ def _GenerateDiskTemplate(lu, template_name, disk["size"], names[idx*2:idx*2+2], "disk/%d" % disk_index, minors[idx*2], minors[idx*2+1]) + disk_dev.mode = disk["mode"] disks.append(disk_dev) elif template_name == constants.DT_FILE: if len(secondary_nodes) != 0: @@ -3878,7 +3992,8 @@ def _GenerateDiskTemplate(lu, template_name, iv_name="disk/%d" % disk_index, logical_id=(file_driver, "%s/disk%d" % (file_storage_dir, - idx))) + idx)), + mode=disk["mode"]) disks.append(disk_dev) else: raise errors.ProgrammerError("Invalid disk template '%s'" % template_name) @@ -3948,15 +4063,15 @@ def _RemoveDisks(lu, instance): """ logging.info("Removing block devices for instance %s", instance.name) - result = True + all_result = True for device in instance.disks: for node, disk in device.ComputeNodeTree(instance.primary_node): lu.cfg.SetDiskID(disk, node) - result = lu.rpc.call_blockdev_remove(node, disk) - if result.failed or not result.data: - lu.proc.LogWarning("Could not remove block device %s on node %s," - " continuing anyway", device.iv_name, node) - result = False + msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg() + if msg: + lu.LogWarning("Could not remove block device %s on node %s," + " continuing anyway: %s", device.iv_name, node, msg) + all_result = False if instance.disk_template == constants.DT_FILE: file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) @@ -3964,9 +4079,9 @@ def _RemoveDisks(lu, instance): file_storage_dir) if result.failed or not result.data: logging.error("Could not remove directory '%s'", file_storage_dir) - result = False + all_result = False - return result + return all_result def _ComputeDiskSize(disk_template, disks): @@ -4011,13 +4126,12 @@ def _CheckHVParams(lu, nodenames, hvname, hvparams): hvparams) for node in nodenames: info = hvinfo[node] - info.Raise() - if not info.data or not isinstance(info.data, (tuple, list)): - raise errors.OpPrereqError("Cannot get current information" - " from node '%s' (%s)" % (node, info.data)) - if not info.data[0]: + if info.offline: + continue + msg = info.RemoteFailMsg() + if msg: raise errors.OpPrereqError("Hypervisor parameter validation failed:" - " %s" % info.data[1]) + " %s" % msg) class LUCreateInstance(LogicalUnit): @@ -4124,7 +4238,9 @@ class LUCreateInstance(LogicalUnit): raise errors.OpPrereqError("Invalid MAC address specified: %s" % mac) # bridge verification - bridge = nic.get("bridge", self.cfg.GetDefBridge()) + bridge = nic.get("bridge", None) + if bridge is None: + bridge = self.cfg.GetDefBridge() self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge)) # disk checks/pre-build @@ -4251,7 +4367,7 @@ class LUCreateInstance(LogicalUnit): env.update(_BuildInstanceHookEnv(name=self.op.instance_name, primary_node=self.op.pnode, secondary_nodes=self.secondaries, - status=self.instance_status, + status=self.op.start, os_type=self.op.os_type, memory=self.be_full[constants.BE_MEMORY], vcpus=self.be_full[constants.BE_VCPUS], @@ -4426,11 +4542,6 @@ class LUCreateInstance(LogicalUnit): self.be_full[constants.BE_MEMORY], self.op.hypervisor) - if self.op.start: - self.instance_status = 'up' - else: - self.instance_status = 'down' - def Exec(self, feedback_fn): """Create and add the instance to the cluster. @@ -4476,7 +4587,7 @@ class LUCreateInstance(LogicalUnit): primary_node=pnode_name, nics=self.nics, disks=disks, disk_template=self.op.disk_template, - status=self.instance_status, + admin_up=False, network_port=network_port, beparams=self.op.beparams, hvparams=self.op.hvparams, @@ -4500,8 +4611,6 @@ class LUCreateInstance(LogicalUnit): # Declare that we don't want to remove the instance lock anymore, as we've # added the instance to the config del self.remove_locks[locking.LEVEL_INSTANCE] - # Remove the temp. assignements for the instance's drbds - self.cfg.ReleaseDRBDMinors(instance) # Unlock all the nodes if self.op.mode == constants.INSTANCE_IMPORT: nodes_keep = [self.op.src_node] @@ -4564,6 +4673,8 @@ class LUCreateInstance(LogicalUnit): % self.op.mode) if self.op.start: + iobj.admin_up = True + self.cfg.Update(iobj) logging.info("Starting instance %s on node %s", instance, pnode_name) feedback_fn("* starting instance...") result = self.rpc.call_instance_start(pnode_name, iobj, None) @@ -4614,7 +4725,12 @@ class LUConnectConsole(NoHooksLU): logging.debug("Connecting to console of %s on %s", instance.name, node) hyper = hypervisor.GetHypervisor(instance.hypervisor) - console_cmd = hyper.GetShellCommandForConsole(instance) + cluster = self.cfg.GetClusterInfo() + # beparams and hvparams are passed separately, to avoid editing the + # instance and then saving the defaults in the instance itself. + hvparams = cluster.FillHV(instance) + beparams = cluster.FillBE(instance) + console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams) # build ssh cmdline return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True) @@ -4662,6 +4778,10 @@ class LUReplaceDisks(LogicalUnit): raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node) self.op.remote_node = remote_node + # Warning: do not remove the locking of the new secondary here + # unless DRBD8.AddChildren is changed to work in parallel; + # currently it doesn't since parallel invocations of + # FindUnusedMinor will conflict self.needed_locks[locking.LEVEL_NODE] = [remote_node] self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND else: @@ -4829,9 +4949,13 @@ class LUReplaceDisks(LogicalUnit): for node in tgt_node, oth_node: info("checking disk/%d on %s" % (idx, node)) cfg.SetDiskID(dev, node) - if not self.rpc.call_blockdev_find(node, dev): - raise errors.OpExecError("Can't find disk/%d on node %s" % - (idx, node)) + result = self.rpc.call_blockdev_find(node, dev) + msg = result.RemoteFailMsg() + if not msg and not result.payload: + msg = "disk not found" + if msg: + raise errors.OpExecError("Can't find disk/%d on node %s: %s" % + (idx, node, msg)) # Step: check other node consistency self.proc.LogStep(2, steps_total, "check peer consistency") @@ -4894,8 +5018,9 @@ class LUReplaceDisks(LogicalUnit): # build the rename list based on what LVs exist on the node rlist = [] for to_ren in old_lvs: - find_res = self.rpc.call_blockdev_find(tgt_node, to_ren) - if not find_res.failed and find_res.data is not None: # device exists + result = self.rpc.call_blockdev_find(tgt_node, to_ren) + if not result.RemoteFailMsg() and result.payload: + # device exists rlist.append((to_ren, ren_fn(to_ren, temp_suffix))) info("renaming the old LVs on the target node") @@ -4924,10 +5049,10 @@ class LUReplaceDisks(LogicalUnit): result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs) if result.failed or not result.data: for new_lv in new_lvs: - result = self.rpc.call_blockdev_remove(tgt_node, new_lv) - if result.failed or not result.data: - warning("Can't rollback device %s", hint="manually cleanup unused" - " logical volumes") + msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg() + if msg: + warning("Can't rollback device %s: %s", dev, msg, + hint="cleanup manually the unused logical volumes") raise errors.OpExecError("Can't add local storage to drbd") dev.children = new_lvs @@ -4945,7 +5070,13 @@ class LUReplaceDisks(LogicalUnit): for name, (dev, old_lvs, new_lvs) in iv_names.iteritems(): cfg.SetDiskID(dev, instance.primary_node) result = self.rpc.call_blockdev_find(instance.primary_node, dev) - if result.failed or result.data[5]: + msg = result.RemoteFailMsg() + if not msg and not result.payload: + msg = "disk not found" + if msg: + raise errors.OpExecError("Can't find DRBD device %s: %s" % + (name, msg)) + if result.payload[5]: raise errors.OpExecError("DRBD device %s is degraded!" % name) # Step: remove old storage @@ -4954,9 +5085,10 @@ class LUReplaceDisks(LogicalUnit): info("remove logical volumes for %s" % name) for lv in old_lvs: cfg.SetDiskID(lv, tgt_node) - result = self.rpc.call_blockdev_remove(tgt_node, lv) - if result.failed or not result.data: - warning("Can't remove old LV", hint="manually remove unused LVs") + msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg() + if msg: + warning("Can't remove old LV: %s" % msg, + hint="manually remove unused LVs") continue def _ExecD8Secondary(self, feedback_fn): @@ -5009,10 +5141,12 @@ class LUReplaceDisks(LogicalUnit): info("checking disk/%d on %s" % (idx, pri_node)) cfg.SetDiskID(dev, pri_node) result = self.rpc.call_blockdev_find(pri_node, dev) - result.Raise() - if not result.data: - raise errors.OpExecError("Can't find disk/%d on node %s" % - (idx, pri_node)) + msg = result.RemoteFailMsg() + if not msg and not result.payload: + msg = "disk not found" + if msg: + raise errors.OpExecError("Can't find disk/%d on node %s: %s" % + (idx, pri_node, msg)) # Step: check other node consistency self.proc.LogStep(2, steps_total, "check peer consistency") @@ -5075,9 +5209,10 @@ class LUReplaceDisks(LogicalUnit): # we have new devices, shutdown the drbd on the old secondary info("shutting down drbd for disk/%d on old node" % idx) cfg.SetDiskID(dev, old_node) - result = self.rpc.call_blockdev_shutdown(old_node, dev) - if result.failed or not result.data: - warning("Failed to shutdown drbd for disk/%d on old node" % idx, + msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg() + if msg: + warning("Failed to shutdown drbd for disk/%d on old node: %s" % + (idx, msg), hint="Please cleanup this device manually as soon as possible") info("detaching primary drbds from the network (=> standalone)") @@ -5098,9 +5233,6 @@ class LUReplaceDisks(LogicalUnit): dev.logical_id = new_logical_id cfg.SetDiskID(dev, pri_node) cfg.Update(instance) - # we can remove now the temp minors as now the new values are - # written to the config file (and therefore stable) - self.cfg.ReleaseDRBDMinors(instance.name) # and now perform the drbd attach info("attaching primary drbds to new secondary (standalone => connected)") @@ -5124,8 +5256,13 @@ class LUReplaceDisks(LogicalUnit): for idx, (dev, old_lvs, _) in iv_names.iteritems(): cfg.SetDiskID(dev, pri_node) result = self.rpc.call_blockdev_find(pri_node, dev) - result.Raise() - if result.data[5]: + msg = result.RemoteFailMsg() + if not msg and not result.payload: + msg = "disk not found" + if msg: + raise errors.OpExecError("Can't find DRBD device disk/%d: %s" % + (idx, msg)) + if result.payload[5]: raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx) self.proc.LogStep(6, steps_total, "removing old storage") @@ -5133,9 +5270,9 @@ class LUReplaceDisks(LogicalUnit): info("remove logical volumes for disk/%d" % idx) for lv in old_lvs: cfg.SetDiskID(lv, old_node) - result = self.rpc.call_blockdev_remove(old_node, lv) - if result.failed or not result.data: - warning("Can't remove LV on old secondary", + msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg() + if msg: + warning("Can't remove LV on old secondary: %s", msg, hint="Cleanup stale volumes by hand") def Exec(self, feedback_fn): @@ -5147,7 +5284,7 @@ class LUReplaceDisks(LogicalUnit): instance = self.instance # Activate the instance disks if we're replacing them on a down instance - if instance.status == "down": + if not instance.admin_up: _StartInstanceDisks(self, instance, True) if self.op.mode == constants.REPLACE_DISK_CHG: @@ -5158,7 +5295,7 @@ class LUReplaceDisks(LogicalUnit): ret = fn(feedback_fn) # Deactivate the instance disks if we're replacing them on a down instance - if instance.status == "down": + if not instance.admin_up: _SafeShutdownInstanceDisks(self, instance) return ret @@ -5246,13 +5383,10 @@ class LUGrowDisk(LogicalUnit): for node in instance.all_nodes: self.cfg.SetDiskID(disk, node) result = self.rpc.call_blockdev_grow(node, disk, self.op.amount) - result.Raise() - if (not result.data or not isinstance(result.data, (list, tuple)) or - len(result.data) != 2): - raise errors.OpExecError("Grow request failed to node %s" % node) - elif not result.data[0]: + msg = result.RemoteFailMsg() + if msg: raise errors.OpExecError("Grow request failed to node %s: %s" % - (node, result.data[1])) + (node, msg)) disk.RecordGrow(self.op.amount) self.cfg.Update(instance) if self.op.wait_for_sync: @@ -5316,8 +5450,11 @@ class LUQueryInstanceData(NoHooksLU): if not static: self.cfg.SetDiskID(dev, instance.primary_node) dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev) - dev_pstatus.Raise() - dev_pstatus = dev_pstatus.data + msg = dev_pstatus.RemoteFailMsg() + if msg: + raise errors.OpExecError("Can't compute disk status for %s: %s" % + (instance.name, msg)) + dev_pstatus = dev_pstatus.payload else: dev_pstatus = None @@ -5331,8 +5468,11 @@ class LUQueryInstanceData(NoHooksLU): if snode and not static: self.cfg.SetDiskID(dev, snode) dev_sstatus = self.rpc.call_blockdev_find(snode, dev) - dev_sstatus.Raise() - dev_sstatus = dev_sstatus.data + msg = dev_sstatus.RemoteFailMsg() + if msg: + raise errors.OpExecError("Can't compute disk status for %s: %s" % + (instance.name, msg)) + dev_sstatus = dev_sstatus.payload else: dev_sstatus = None @@ -5374,10 +5514,10 @@ class LUQueryInstanceData(NoHooksLU): remote_state = "down" else: remote_state = None - if instance.status == "down": - config_state = "down" - else: + if instance.admin_up: config_state = "up" + else: + config_state = "down" disks = [self._ComputeDiskStatus(instance, None, device) for device in instance.disks] @@ -5442,7 +5582,7 @@ class LUSetInstanceParams(LogicalUnit): raise errors.OpPrereqError("Invalid disk index") if disk_op == constants.DDM_ADD: mode = disk_dict.setdefault('mode', constants.DISK_RDWR) - if mode not in (constants.DISK_RDONLY, constants.DISK_RDWR): + if mode not in constants.DISK_ACCESS_SET: raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode) size = disk_dict.get('size', None) if size is None: @@ -5697,10 +5837,10 @@ class LUSetInstanceParams(LogicalUnit): device_idx = len(instance.disks) for node, disk in device.ComputeNodeTree(instance.primary_node): self.cfg.SetDiskID(disk, node) - rpc_result = self.rpc.call_blockdev_remove(node, disk) - if rpc_result.failed or not rpc_result.data: - self.proc.LogWarning("Could not remove disk/%d on node %s," - " continuing anyway", device_idx, node) + msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg() + if msg: + self.LogWarning("Could not remove disk/%d on node %s: %s," + " continuing anyway", device_idx, node, msg) result.append(("disk/%d" % device_idx, "remove")) elif disk_op == constants.DDM_ADD: # add a new disk @@ -5718,7 +5858,6 @@ class LUSetInstanceParams(LogicalUnit): file_path, file_driver, disk_idx_base)[0] - new_disk.mode = disk_dict['mode'] instance.disks.append(new_disk) info = _GetInstanceInfoText(instance) @@ -5770,7 +5909,7 @@ class LUSetInstanceParams(LogicalUnit): # hvparams changes if self.op.hvparams: - instance.hvparams = self.hv_new + instance.hvparams = self.hv_inst for key, val in self.op.hvparams.iteritems(): result.append(("hv/%s" % key, val)) @@ -5933,7 +6072,7 @@ class LUExportInstance(LogicalUnit): snap_disks.append(new_dev) finally: - if self.op.shutdown and instance.status == "up": + if self.op.shutdown and instance.admin_up: result = self.rpc.call_instance_start(src_node, instance, None) msg = result.RemoteFailMsg() if msg: @@ -5951,10 +6090,10 @@ class LUExportInstance(LogicalUnit): self.LogWarning("Could not export block device %s from node %s to" " node %s", dev.logical_id[1], src_node, dst_node.name) - result = self.rpc.call_blockdev_remove(src_node, dev) - if result.failed or not result.data: + msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg() + if msg: self.LogWarning("Could not remove snapshot block device %s from node" - " %s", dev.logical_id[1], src_node) + " %s: %s", dev.logical_id[1], src_node, msg) result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks) if result.failed or not result.data: @@ -6309,7 +6448,7 @@ class IAllocator(object): "version": 1, "cluster_name": cfg.GetClusterName(), "cluster_tags": list(cluster_info.GetTags()), - "enable_hypervisors": list(cluster_info.enabled_hypervisors), + "enabled_hypervisors": list(cluster_info.enabled_hypervisors), # we don't have job IDs } iinfo = cfg.GetAllInstancesInfo().values() @@ -6328,52 +6467,61 @@ class IAllocator(object): hypervisor_name) node_iinfo = self.lu.rpc.call_all_instances_info(node_list, cluster_info.enabled_hypervisors) - for nname in node_list: + for nname, nresult in node_data.items(): + # first fill in static (config-based) values ninfo = cfg.GetNodeInfo(nname) - node_data[nname].Raise() - if not isinstance(node_data[nname].data, dict): - raise errors.OpExecError("Can't get data for node %s" % nname) - remote_info = node_data[nname].data - for attr in ['memory_total', 'memory_free', 'memory_dom0', - 'vg_size', 'vg_free', 'cpu_total']: - if attr not in remote_info: - raise errors.OpExecError("Node '%s' didn't return attribute '%s'" % - (nname, attr)) - try: - remote_info[attr] = int(remote_info[attr]) - except ValueError, err: - raise errors.OpExecError("Node '%s' returned invalid value for '%s':" - " %s" % (nname, attr, str(err))) - # compute memory used by primary instances - i_p_mem = i_p_up_mem = 0 - for iinfo, beinfo in i_list: - if iinfo.primary_node == nname: - i_p_mem += beinfo[constants.BE_MEMORY] - if iinfo.name not in node_iinfo[nname]: - i_used_mem = 0 - else: - i_used_mem = int(node_iinfo[nname][iinfo.name]['memory']) - i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem - remote_info['memory_free'] -= max(0, i_mem_diff) - - if iinfo.status == "up": - i_p_up_mem += beinfo[constants.BE_MEMORY] - - # compute memory used by instances pnr = { "tags": list(ninfo.GetTags()), - "total_memory": remote_info['memory_total'], - "reserved_memory": remote_info['memory_dom0'], - "free_memory": remote_info['memory_free'], - "i_pri_memory": i_p_mem, - "i_pri_up_memory": i_p_up_mem, - "total_disk": remote_info['vg_size'], - "free_disk": remote_info['vg_free'], "primary_ip": ninfo.primary_ip, "secondary_ip": ninfo.secondary_ip, - "total_cpus": remote_info['cpu_total'], "offline": ninfo.offline, + "drained": ninfo.drained, + "master_candidate": ninfo.master_candidate, } + + if not ninfo.offline: + nresult.Raise() + if not isinstance(nresult.data, dict): + raise errors.OpExecError("Can't get data for node %s" % nname) + remote_info = nresult.data + for attr in ['memory_total', 'memory_free', 'memory_dom0', + 'vg_size', 'vg_free', 'cpu_total']: + if attr not in remote_info: + raise errors.OpExecError("Node '%s' didn't return attribute" + " '%s'" % (nname, attr)) + try: + remote_info[attr] = int(remote_info[attr]) + except ValueError, err: + raise errors.OpExecError("Node '%s' returned invalid value" + " for '%s': %s" % (nname, attr, err)) + # compute memory used by primary instances + i_p_mem = i_p_up_mem = 0 + for iinfo, beinfo in i_list: + if iinfo.primary_node == nname: + i_p_mem += beinfo[constants.BE_MEMORY] + if iinfo.name not in node_iinfo[nname].data: + i_used_mem = 0 + else: + i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory']) + i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem + remote_info['memory_free'] -= max(0, i_mem_diff) + + if iinfo.admin_up: + i_p_up_mem += beinfo[constants.BE_MEMORY] + + # compute memory used by instances + pnr_dyn = { + "total_memory": remote_info['memory_total'], + "reserved_memory": remote_info['memory_dom0'], + "free_memory": remote_info['memory_free'], + "total_disk": remote_info['vg_size'], + "free_disk": remote_info['vg_free'], + "total_cpus": remote_info['cpu_total'], + "i_pri_memory": i_p_mem, + "i_pri_up_memory": i_p_up_mem, + } + pnr.update(pnr_dyn) + node_results[nname] = pnr data["nodes"] = node_results @@ -6384,13 +6532,13 @@ class IAllocator(object): for n in iinfo.nics] pir = { "tags": list(iinfo.GetTags()), - "should_run": iinfo.status == "up", + "admin_up": iinfo.admin_up, "vcpus": beinfo[constants.BE_VCPUS], "memory": beinfo[constants.BE_MEMORY], "os": iinfo.os, - "nodes": list(iinfo.all_nodes), + "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes), "nics": nic_data, - "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks], + "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks], "disk_template": iinfo.disk_template, "hypervisor": iinfo.hypervisor, } @@ -6411,8 +6559,6 @@ class IAllocator(object): """ data = self.in_data - if len(self.disks) != 2: - raise errors.OpExecError("Only two-disk configurations supported") disk_space = _ComputeDiskSize(self.disk_template, self.disks) @@ -6569,8 +6715,6 @@ class LUTestAllocator(NoHooksLU): " 'nics' parameter") if not isinstance(self.op.disks, list): raise errors.OpPrereqError("Invalid parameter 'disks'") - if len(self.op.disks) != 2: - raise errors.OpPrereqError("Only two-disk configurations supported") for row in self.op.disks: if (not isinstance(row, dict) or "size" not in row or @@ -6579,7 +6723,7 @@ class LUTestAllocator(NoHooksLU): row["mode"] not in ['r', 'w']): raise errors.OpPrereqError("Invalid contents of the" " 'disks' parameter") - if self.op.hypervisor is None: + if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None: self.op.hypervisor = self.cfg.GetHypervisorType() elif self.op.mode == constants.IALLOCATOR_MODE_RELOC: if not hasattr(self.op, "name"):