X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/9c24736cae9056e7e38bf2f92eea496225d8d158..cfda0e48eb3238187dbb74bb56d54ca87747684c:/lib/cmdlib.py diff --git a/lib/cmdlib.py b/lib/cmdlib.py index 75f0ac4..fa8716e 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -453,15 +453,19 @@ class _QueryBase: #: Attribute holding field definitions FIELDS = None - def __init__(self, names, fields, use_locking): + def __init__(self, filter_, fields, use_locking): """Initializes this class. """ - self.names = names self.use_locking = use_locking - self.query = query.Query(self.FIELDS, fields) + self.query = query.Query(self.FIELDS, fields, filter_=filter_, + namefield="name") self.requested_data = self.query.RequestedData() + self.names = self.query.RequestedNames() + + # Sort only if no names were requested + self.sort_by_name = not self.names self.do_locking = None self.wanted = None @@ -492,15 +496,6 @@ class _QueryBase: # Return expanded names return self.wanted - @classmethod - def FieldsQuery(cls, fields): - """Returns list of available fields. - - @return: List of L{objects.QueryFieldDefinition} - - """ - return query.QueryFields(cls.FIELDS, fields) - def ExpandNames(self, lu): """Expand names for this query. @@ -529,13 +524,15 @@ class _QueryBase: """Collect data and execute query. """ - return query.GetQueryResponse(self.query, self._GetQueryData(lu)) + return query.GetQueryResponse(self.query, self._GetQueryData(lu), + sort_by_name=self.sort_by_name) def OldStyleQuery(self, lu): """Collect data and execute query. """ - return self.query.OldStyleQuery(self._GetQueryData(lu)) + return self.query.OldStyleQuery(self._GetQueryData(lu), + sort_by_name=self.sort_by_name) def _GetWantedNodes(lu, nodes): @@ -609,6 +606,18 @@ def _GetUpdatedParams(old_params, update_dict, return params_copy +def _RunPostHook(lu, node_name): + """Runs the post-hook for an opcode on a single node. + + """ + hm = lu.proc.hmclass(lu.rpc.call_hooks_runner, lu) + try: + hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name]) + except: + # pylint: disable-msg=W0702 + lu.LogWarning("Errors occurred running hooks on %s" % node_name) + + def _CheckOutputFields(static, dynamic, selected): """Checks whether all selected fields are valid. @@ -1035,7 +1044,7 @@ def _GetStorageTypeArgs(cfg, storage_type): # Special case for file storage if storage_type == constants.ST_FILE: # storage.FileStorage wants a list of storage directories - return [[cfg.GetFileStorageDir()]] + return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]] return [] @@ -1153,12 +1162,7 @@ class LUClusterDestroy(LogicalUnit): master = self.cfg.GetMasterNode() # Run post hooks on master node before it's removed - hm = self.proc.hmclass(self.rpc.call_hooks_runner, self) - try: - hm.RunPhase(constants.HOOKS_PHASE_POST, [master]) - except: - # pylint: disable-msg=W0702 - self.LogWarning("Errors occurred running hooks on %s" % master) + _RunPostHook(self, master) result = self.rpc.call_node_stop_master(master, False) result.Raise("Could not disable the master role") @@ -1254,8 +1258,8 @@ class LUClusterVerify(LogicalUnit): @ivar instances: a list of running instances (runtime) @ivar pinst: list of configured primary instances (config) @ivar sinst: list of configured secondary instances (config) - @ivar sbp: diction of {secondary-node: list of instances} of all peers - of this node (config) + @ivar sbp: dictionary of {primary-node: list of instances} for all + instances for which this node is secondary (config) @ivar mfree: free memory, as reported by hypervisor (runtime) @ivar dfree: free disk, as reported by the node (runtime) @ivar offline: the offline status (config) @@ -1617,6 +1621,7 @@ class LUClusterVerify(LogicalUnit): instances it was primary for. """ + cluster_info = self.cfg.GetClusterInfo() for node, n_img in node_image.items(): # This code checks that every node which is now listed as # secondary has enough memory to host all instances it is @@ -1635,7 +1640,7 @@ class LUClusterVerify(LogicalUnit): for prinode, instances in n_img.sbp.items(): needed_mem = 0 for instance in instances: - bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance]) + bep = cluster_info.FillBE(instance_cfg[instance]) if bep[constants.BE_AUTO_BALANCE]: needed_mem += bep[constants.BE_MEMORY] test = n_img.mfree < needed_mem @@ -2316,7 +2321,7 @@ class LUClusterVerify(LogicalUnit): utils.CommaJoin(inst_config.secondary_nodes), code=self.ETYPE_WARNING) - if inst_config.disk_template in constants.DTS_NET_MIRROR: + if inst_config.disk_template in constants.DTS_INT_MIRROR: pnode = inst_config.primary_node instance_nodes = utils.NiceSort(inst_config.all_nodes) instance_groups = {} @@ -2584,16 +2589,18 @@ class LUClusterRepairDiskSizes(NoHooksLU): newl = [v[2].Copy() for v in dskl] for dsk in newl: self.cfg.SetDiskID(dsk, node) - result = self.rpc.call_blockdev_getsizes(node, newl) + result = self.rpc.call_blockdev_getsize(node, newl) if result.fail_msg: - self.LogWarning("Failure in blockdev_getsizes call to node" + self.LogWarning("Failure in blockdev_getsize call to node" " %s, ignoring", node) continue - if len(result.data) != len(dskl): + if len(result.payload) != len(dskl): + logging.warning("Invalid result from node %s: len(dksl)=%d," + " result.payload=%s", node, len(dskl), result.payload) self.LogWarning("Invalid result from node %s, ignoring node results", node) continue - for ((instance, idx, disk), size) in zip(dskl, result.data): + for ((instance, idx, disk), size) in zip(dskl, result.payload): if size is None: self.LogWarning("Disk %d of instance %s did not return size" " information, ignoring", idx, instance.name) @@ -3225,6 +3232,7 @@ class LUOobCommand(NoHooksLU): """ REG_BGL = False + _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE) def CheckPrereq(self): """Check prerequisites. @@ -3237,6 +3245,36 @@ class LUOobCommand(NoHooksLU): """ self.nodes = [] + self.master_node = self.cfg.GetMasterNode() + + assert self.op.power_delay >= 0.0 + + if self.op.node_names: + if self.op.command in self._SKIP_MASTER: + if self.master_node in self.op.node_names: + master_node_obj = self.cfg.GetNodeInfo(self.master_node) + master_oob_handler = _SupportsOob(self.cfg, master_node_obj) + + if master_oob_handler: + additional_text = ("Run '%s %s %s' if you want to operate on the" + " master regardless") % (master_oob_handler, + self.op.command, + self.master_node) + else: + additional_text = "The master node does not support out-of-band" + + raise errors.OpPrereqError(("Operating on the master node %s is not" + " allowed for %s\n%s") % + (self.master_node, self.op.command, + additional_text), errors.ECODE_INVAL) + else: + self.op.node_names = self.cfg.GetNodeList() + if self.op.command in self._SKIP_MASTER: + self.op.node_names.remove(self.master_node) + + if self.op.command in self._SKIP_MASTER: + assert self.master_node not in self.op.node_names + for node_name in self.op.node_names: node = self.cfg.GetNodeInfo(node_name) @@ -3246,7 +3284,8 @@ class LUOobCommand(NoHooksLU): else: self.nodes.append(node) - if (self.op.command == constants.OOB_POWER_OFF and not node.offline): + if (not self.op.ignore_status and + (self.op.command == constants.OOB_POWER_OFF and not node.offline)): raise errors.OpPrereqError(("Cannot power off node %s because it is" " not marked offline") % node_name, errors.ECODE_STATE) @@ -3258,21 +3297,22 @@ class LUOobCommand(NoHooksLU): if self.op.node_names: self.op.node_names = [_ExpandNodeName(self.cfg, name) for name in self.op.node_names] + lock_names = self.op.node_names else: - self.op.node_names = self.cfg.GetNodeList() + lock_names = locking.ALL_SET self.needed_locks = { - locking.LEVEL_NODE: self.op.node_names, + locking.LEVEL_NODE: lock_names, } def Exec(self, feedback_fn): """Execute OOB and return result if we expect any. """ - master_node = self.cfg.GetMasterNode() + master_node = self.master_node ret = [] - for node in self.nodes: + for idx, node in enumerate(self.nodes): node_entry = [(constants.RS_NORMAL, node.name)] ret.append(node_entry) @@ -3326,6 +3366,10 @@ class LUOobCommand(NoHooksLU): node_entry.append((constants.RS_NORMAL, result.payload)) + if (self.op.command == constants.OOB_POWER_ON and + idx < len(self.nodes) - 1): + time.sleep(self.op.power_delay) + return ret def _CheckPayload(self, result): @@ -3364,37 +3408,28 @@ class LUOobCommand(NoHooksLU): raise errors.OpExecError("Check of out-of-band payload failed due to %s" % utils.CommaJoin(errs)) +class _OsQuery(_QueryBase): + FIELDS = query.OS_FIELDS - -class LUOsDiagnose(NoHooksLU): - """Logical unit for OS diagnose/query. - - """ - REQ_BGL = False - _HID = "hidden" - _BLK = "blacklisted" - _VLD = "valid" - _FIELDS_STATIC = utils.FieldSet() - _FIELDS_DYNAMIC = utils.FieldSet("name", _VLD, "node_status", "variants", - "parameters", "api_versions", _HID, _BLK) - - def CheckArguments(self): - if self.op.names: - raise errors.OpPrereqError("Selective OS query not supported", - errors.ECODE_INVAL) - - _CheckOutputFields(static=self._FIELDS_STATIC, - dynamic=self._FIELDS_DYNAMIC, - selected=self.op.output_fields) - - def ExpandNames(self): - # Lock all nodes, in shared mode + def ExpandNames(self, lu): + # Lock all nodes in shared mode # Temporary removal of locks, should be reverted later # TODO: reintroduce locks when they are lighter-weight - self.needed_locks = {} + lu.needed_locks = {} #self.share_locks[locking.LEVEL_NODE] = 1 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET + # The following variables interact with _QueryBase._GetNames + if self.names: + self.wanted = self.names + else: + self.wanted = locking.ALL_SET + + self.do_locking = self.use_locking + + def DeclareLocks(self, lu, level): + pass + @staticmethod def _DiagnoseByOS(rlist): """Remaps a per-node return list into an a per-os per-node dictionary @@ -3435,71 +3470,100 @@ class LUOsDiagnose(NoHooksLU): variants, params, api_versions)) return all_os - def Exec(self, feedback_fn): - """Compute the list of OSes. + def _GetQueryData(self, lu): + """Computes the list of nodes and their attributes. """ + # Locking is not used + assert not (lu.acquired_locks or self.do_locking or self.use_locking) + valid_nodes = [node.name - for node in self.cfg.GetAllNodesInfo().values() + for node in lu.cfg.GetAllNodesInfo().values() if not node.offline and node.vm_capable] - node_data = self.rpc.call_os_diagnose(valid_nodes) - pol = self._DiagnoseByOS(node_data) - output = [] - cluster = self.cfg.GetClusterInfo() + pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes)) + cluster = lu.cfg.GetClusterInfo() + + data = {} + + for (os_name, os_data) in pol.items(): + info = query.OsInfo(name=os_name, valid=True, node_status=os_data, + hidden=(os_name in cluster.hidden_os), + blacklisted=(os_name in cluster.blacklisted_os)) + + variants = set() + parameters = set() + api_versions = set() - for os_name in utils.NiceSort(pol.keys()): - os_data = pol[os_name] - row = [] - valid = True - (variants, params, api_versions) = null_state = (set(), set(), set()) for idx, osl in enumerate(os_data.values()): - valid = bool(valid and osl and osl[0][1]) - if not valid: - (variants, params, api_versions) = null_state + info.valid = bool(info.valid and osl and osl[0][1]) + if not info.valid: break - node_variants, node_params, node_api = osl[0][3:6] - if idx == 0: # first entry - variants = set(node_variants) - params = set(node_params) - api_versions = set(node_api) - else: # keep consistency + + (node_variants, node_params, node_api) = osl[0][3:6] + if idx == 0: + # First entry + variants.update(node_variants) + parameters.update(node_params) + api_versions.update(node_api) + else: + # Filter out inconsistent values variants.intersection_update(node_variants) - params.intersection_update(node_params) + parameters.intersection_update(node_params) api_versions.intersection_update(node_api) - is_hid = os_name in cluster.hidden_os - is_blk = os_name in cluster.blacklisted_os - if ((self._HID not in self.op.output_fields and is_hid) or - (self._BLK not in self.op.output_fields and is_blk) or - (self._VLD not in self.op.output_fields and not valid)): - continue + info.variants = list(variants) + info.parameters = list(parameters) + info.api_versions = list(api_versions) - for field in self.op.output_fields: - if field == "name": - val = os_name - elif field == self._VLD: - val = valid - elif field == "node_status": - # this is just a copy of the dict - val = {} - for node_name, nos_list in os_data.items(): - val[node_name] = nos_list - elif field == "variants": - val = utils.NiceSort(list(variants)) - elif field == "parameters": - val = list(params) - elif field == "api_versions": - val = list(api_versions) - elif field == self._HID: - val = is_hid - elif field == self._BLK: - val = is_blk - else: - raise errors.ParameterError(field) - row.append(val) - output.append(row) + data[os_name] = info + + # Prepare data in requested order + return [data[name] for name in self._GetNames(lu, pol.keys(), None) + if name in data] - return output + +class LUOsDiagnose(NoHooksLU): + """Logical unit for OS diagnose/query. + + """ + REQ_BGL = False + + @staticmethod + def _BuildFilter(fields, names): + """Builds a filter for querying OSes. + + """ + name_filter = qlang.MakeSimpleFilter("name", names) + + # Legacy behaviour: Hide hidden, blacklisted or invalid OSes if the + # respective field is not requested + status_filter = [[qlang.OP_NOT, [qlang.OP_TRUE, fname]] + for fname in ["hidden", "blacklisted"] + if fname not in fields] + if "valid" not in fields: + status_filter.append([qlang.OP_TRUE, "valid"]) + + if status_filter: + status_filter.insert(0, qlang.OP_AND) + else: + status_filter = None + + if name_filter and status_filter: + return [qlang.OP_AND, name_filter, status_filter] + elif name_filter: + return name_filter + else: + return status_filter + + def CheckArguments(self): + self.oq = _OsQuery(self._BuildFilter(self.op.output_fields, self.op.names), + self.op.output_fields, False) + + def ExpandNames(self): + self.oq.ExpandNames(self) + + def Exec(self, feedback_fn): + return self.oq.OldStyleQuery(self) class LUNodeRemove(LogicalUnit): @@ -3575,12 +3639,7 @@ class LUNodeRemove(LogicalUnit): self.context.RemoveNode(node.name) # Run post hooks on the node before it's removed - hm = self.proc.hmclass(self.rpc.call_hooks_runner, self) - try: - hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name]) - except: - # pylint: disable-msg=W0702 - self.LogWarning("Errors occurred running hooks on %s" % node.name) + _RunPostHook(self, node.name) result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup) msg = result.fail_msg @@ -3682,8 +3741,8 @@ class LUNodeQuery(NoHooksLU): REQ_BGL = False def CheckArguments(self): - self.nq = _NodeQuery(self.op.names, self.op.output_fields, - self.op.use_locking) + self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names), + self.op.output_fields, self.op.use_locking) def ExpandNames(self): self.nq.ExpandNames(self) @@ -3951,9 +4010,8 @@ class LUQuery(NoHooksLU): def CheckArguments(self): qcls = _GetQueryImplementation(self.op.what) - names = qlang.ReadSimpleFilter("name", self.op.filter) - self.impl = qcls(names, self.op.fields, False) + self.impl = qcls(self.op.filter, self.op.fields, False) def ExpandNames(self): self.impl.ExpandNames(self) @@ -3979,7 +4037,7 @@ class LUQueryFields(NoHooksLU): self.needed_locks = {} def Exec(self, feedback_fn): - return self.qcls.FieldsQuery(self.op.fields) + return query.QueryFields(self.qcls.FIELDS, self.op.fields) class LUNodeModifyStorage(NoHooksLU): @@ -4056,9 +4114,12 @@ class LUNodeAdd(LogicalUnit): "MASTER_CAPABLE": str(self.op.master_capable), "VM_CAPABLE": str(self.op.vm_capable), } - nodes_0 = self.cfg.GetNodeList() - nodes_1 = nodes_0 + [self.op.node_name, ] - return env, nodes_0, nodes_1 + + # Exclude added node + pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name])) + post_nodes = pre_nodes + [self.op.node_name, ] + + return (env, pre_nodes, post_nodes) def CheckPrereq(self): """Check prerequisites. @@ -4355,7 +4416,7 @@ class LUNodeSetParams(LogicalUnit): if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET: for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]: instance = self.context.cfg.GetInstanceInfo(instance_name) - i_mirrored = instance.disk_template in constants.DTS_NET_MIRROR + i_mirrored = instance.disk_template in constants.DTS_INT_MIRROR if i_mirrored and self.op.node_name in instance.all_nodes: instances_keep.append(instance_name) self.affected_instances.append(instance) @@ -4662,6 +4723,7 @@ class LUClusterQuery(NoHooksLU): "volume_group_name": cluster.volume_group_name, "drbd_usermode_helper": cluster.drbd_usermode_helper, "file_storage_dir": cluster.file_storage_dir, + "shared_file_storage_dir": cluster.shared_file_storage_dir, "maintain_node_health": cluster.maintain_node_health, "ctime": cluster.ctime, "mtime": cluster.mtime, @@ -5198,10 +5260,16 @@ class LUInstanceReboot(LogicalUnit): ignore_secondaries = self.op.ignore_secondaries reboot_type = self.op.reboot_type + remote_info = self.rpc.call_instance_info(instance.primary_node, + instance.name, + instance.hypervisor) + remote_info.Raise("Error checking node %s" % instance.primary_node) + instance_running = bool(remote_info.payload) + node_current = instance.primary_node - if reboot_type in [constants.INSTANCE_REBOOT_SOFT, - constants.INSTANCE_REBOOT_HARD]: + if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT, + constants.INSTANCE_REBOOT_HARD]: for disk in instance.disks: self.cfg.SetDiskID(disk, node_current) result = self.rpc.call_instance_reboot(node_current, instance, @@ -5209,10 +5277,14 @@ class LUInstanceReboot(LogicalUnit): self.op.shutdown_timeout) result.Raise("Could not reboot instance") else: - result = self.rpc.call_instance_shutdown(node_current, instance, - self.op.shutdown_timeout) - result.Raise("Could not shutdown instance for full reboot") - _ShutdownInstanceDisks(self, instance) + if instance_running: + result = self.rpc.call_instance_shutdown(node_current, instance, + self.op.shutdown_timeout) + result.Raise("Could not shutdown instance for full reboot") + _ShutdownInstanceDisks(self, instance) + else: + self.LogInfo("Instance %s was already stopped, starting now", + instance.name) _StartInstanceDisks(self, instance, ignore_secondaries) result = self.rpc.call_instance_start(node_current, instance, None, None) msg = result.fail_msg @@ -5478,6 +5550,11 @@ class LUInstanceRename(LogicalUnit): hostname = netutils.GetHostname(name=new_name) self.LogInfo("Resolved given name '%s' to '%s'", new_name, hostname.name) + if not utils.MatchNameComponent(self.op.new_name, [hostname.name]): + raise errors.OpPrereqError(("Resolved hostname '%s' does not look the" + " same as given hostname '%s'") % + (hostname.name, self.op.new_name), + errors.ECODE_INVAL) new_name = self.op.new_name = hostname.name if (self.op.ip_check and netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)): @@ -5498,7 +5575,7 @@ class LUInstanceRename(LogicalUnit): old_name = inst.name rename_file_storage = False - if (inst.disk_template == constants.DT_FILE and + if (inst.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE) and self.op.new_name != inst.name): old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1]) rename_file_storage = True @@ -5628,8 +5705,8 @@ class LUInstanceQuery(NoHooksLU): REQ_BGL = False def CheckArguments(self): - self.iq = _InstanceQuery(self.op.names, self.op.output_fields, - self.op.use_locking) + self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names), + self.op.output_fields, self.op.use_locking) def ExpandNames(self): self.iq.ExpandNames(self) @@ -5649,14 +5726,34 @@ class LUInstanceFailover(LogicalUnit): HTYPE = constants.HTYPE_INSTANCE REQ_BGL = False + def CheckArguments(self): + """Check the arguments. + + """ + self.iallocator = getattr(self.op, "iallocator", None) + self.target_node = getattr(self.op, "target_node", None) + def ExpandNames(self): self._ExpandAndLockInstance() + + if self.op.target_node is not None: + self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node) + self.needed_locks[locking.LEVEL_NODE] = [] self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE def DeclareLocks(self, level): if level == locking.LEVEL_NODE: - self._LockInstancesNodes() + instance = self.context.cfg.GetInstanceInfo(self.op.instance_name) + if instance.disk_template in constants.DTS_EXT_MIRROR: + if self.op.target_node is None: + self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET + else: + self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node, + self.op.target_node] + del self.recalculate_locks[locking.LEVEL_NODE] + else: + self._LockInstancesNodes() def BuildHooksEnv(self): """Build hooks env. @@ -5666,15 +5763,19 @@ class LUInstanceFailover(LogicalUnit): """ instance = self.instance source_node = instance.primary_node - target_node = instance.secondary_nodes[0] env = { "IGNORE_CONSISTENCY": self.op.ignore_consistency, "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout, "OLD_PRIMARY": source_node, - "OLD_SECONDARY": target_node, - "NEW_PRIMARY": target_node, - "NEW_SECONDARY": source_node, + "NEW_PRIMARY": self.op.target_node, } + + if instance.disk_template in constants.DTS_INT_MIRROR: + env["OLD_SECONDARY"] = instance.secondary_nodes[0] + env["NEW_SECONDARY"] = source_node + else: + env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = "" + env.update(_BuildInstanceHookEnvByObject(self, instance)) nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes) nl_post = list(nl) @@ -5692,19 +5793,47 @@ class LUInstanceFailover(LogicalUnit): "Cannot retrieve locked instance %s" % self.op.instance_name bep = self.cfg.GetClusterInfo().FillBE(instance) - if instance.disk_template not in constants.DTS_NET_MIRROR: + if instance.disk_template not in constants.DTS_MIRRORED: raise errors.OpPrereqError("Instance's disk layout is not" - " network mirrored, cannot failover.", + " mirrored, cannot failover.", errors.ECODE_STATE) - secondary_nodes = instance.secondary_nodes - if not secondary_nodes: - raise errors.ProgrammerError("no secondary node but using " - "a mirrored disk template") + if instance.disk_template in constants.DTS_EXT_MIRROR: + _CheckIAllocatorOrNode(self, "iallocator", "target_node") + if self.op.iallocator: + self._RunAllocator() + # Release all unnecessary node locks + nodes_keep = [instance.primary_node, self.op.target_node] + nodes_rel = [node for node in self.acquired_locks[locking.LEVEL_NODE] + if node not in nodes_keep] + self.context.glm.release(locking.LEVEL_NODE, nodes_rel) + self.acquired_locks[locking.LEVEL_NODE] = nodes_keep + + # self.op.target_node is already populated, either directly or by the + # iallocator run + target_node = self.op.target_node - target_node = secondary_nodes[0] + else: + secondary_nodes = instance.secondary_nodes + if not secondary_nodes: + raise errors.ConfigurationError("No secondary node but using" + " %s disk template" % + instance.disk_template) + target_node = secondary_nodes[0] + + if self.op.iallocator or (self.op.target_node and + self.op.target_node != target_node): + raise errors.OpPrereqError("Instances with disk template %s cannot" + " be failed over to arbitrary nodes" + " (neither an iallocator nor a target" + " node can be passed)" % + instance.disk_template, errors.ECODE_INVAL) _CheckNodeOnline(self, target_node) _CheckNodeNotDrained(self, target_node) + + # Save target_node so that we can use it in BuildHooksEnv + self.op.target_node = target_node + if instance.admin_up: # check memory requirements on the secondary node _CheckNodeFreeMemory(self, target_node, "failing over instance %s" % @@ -5728,7 +5857,7 @@ class LUInstanceFailover(LogicalUnit): primary_node = self.cfg.GetNodeInfo(instance.primary_node) source_node = instance.primary_node - target_node = instance.secondary_nodes[0] + target_node = self.op.target_node if instance.admin_up: feedback_fn("* checking disk consistency between source and target") @@ -5787,6 +5916,35 @@ class LUInstanceFailover(LogicalUnit): raise errors.OpExecError("Could not start instance %s on node %s: %s" % (instance.name, target_node, msg)) + def _RunAllocator(self): + """Run the allocator based on input opcode. + + """ + ial = IAllocator(self.cfg, self.rpc, + mode=constants.IALLOCATOR_MODE_RELOC, + name=self.instance.name, + # TODO See why hail breaks with a single node below + relocate_from=[self.instance.primary_node, + self.instance.primary_node], + ) + + ial.Run(self.op.iallocator) + + if not ial.success: + raise errors.OpPrereqError("Can't compute nodes using" + " iallocator '%s': %s" % + (self.op.iallocator, ial.info), + errors.ECODE_NORES) + if len(ial.result) != ial.required_nodes: + raise errors.OpPrereqError("iallocator '%s' returned invalid number" + " of nodes (%s), required %s" % + (self.op.iallocator, len(ial.result), + ial.required_nodes), errors.ECODE_FAULT) + self.op.target_node = ial.result[0] + self.LogInfo("Selected nodes for instance %s via iallocator %s: %s", + self.instance.name, self.op.iallocator, + utils.CommaJoin(ial.result)) + class LUInstanceMigrate(LogicalUnit): """Migrate an instance. @@ -5802,16 +5960,29 @@ class LUInstanceMigrate(LogicalUnit): def ExpandNames(self): self._ExpandAndLockInstance() + if self.op.target_node is not None: + self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node) + self.needed_locks[locking.LEVEL_NODE] = [] self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE self._migrater = TLMigrateInstance(self, self.op.instance_name, - self.op.cleanup) + self.op.cleanup, self.op.iallocator, + self.op.target_node) self.tasklets = [self._migrater] def DeclareLocks(self, level): if level == locking.LEVEL_NODE: - self._LockInstancesNodes() + instance = self.context.cfg.GetInstanceInfo(self.op.instance_name) + if instance.disk_template in constants.DTS_EXT_MIRROR: + if self.op.target_node is None: + self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET + else: + self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node, + self.op.target_node] + del self.recalculate_locks[locking.LEVEL_NODE] + else: + self._LockInstancesNodes() def BuildHooksEnv(self): """Build hooks env. @@ -5821,16 +5992,21 @@ class LUInstanceMigrate(LogicalUnit): """ instance = self._migrater.instance source_node = instance.primary_node - target_node = instance.secondary_nodes[0] + target_node = self._migrater.target_node env = _BuildInstanceHookEnvByObject(self, instance) env["MIGRATE_LIVE"] = self._migrater.live env["MIGRATE_CLEANUP"] = self.op.cleanup env.update({ "OLD_PRIMARY": source_node, - "OLD_SECONDARY": target_node, "NEW_PRIMARY": target_node, - "NEW_SECONDARY": source_node, }) + + if instance.disk_template in constants.DTS_INT_MIRROR: + env["OLD_SECONDARY"] = target_node + env["NEW_SECONDARY"] = source_node + else: + env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None + nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes) nl_post = list(nl) nl_post.append(source_node) @@ -6020,32 +6196,46 @@ class LUNodeMigrate(LogicalUnit): HTYPE = constants.HTYPE_NODE REQ_BGL = False + def CheckArguments(self): + _CheckIAllocatorOrNode(self, "iallocator", "remote_node") + def ExpandNames(self): self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) - self.needed_locks = { - locking.LEVEL_NODE: [self.op.node_name], - } - - self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND + self.needed_locks = {} # Create tasklets for migrating instances for all instances on this node names = [] tasklets = [] + self.lock_all_nodes = False + for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name): logging.debug("Migrating instance %s", inst.name) names.append(inst.name) - tasklets.append(TLMigrateInstance(self, inst.name, False)) + tasklets.append(TLMigrateInstance(self, inst.name, False, + self.op.iallocator, None)) + + if inst.disk_template in constants.DTS_EXT_MIRROR: + # We need to lock all nodes, as the iallocator will choose the + # destination nodes afterwards + self.lock_all_nodes = True self.tasklets = tasklets + # Declare node locks + if self.lock_all_nodes: + self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET + else: + self.needed_locks[locking.LEVEL_NODE] = [self.op.node_name] + self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND + # Declare instance locks self.needed_locks[locking.LEVEL_INSTANCE] = names def DeclareLocks(self, level): - if level == locking.LEVEL_NODE: + if level == locking.LEVEL_NODE and not self.lock_all_nodes: self._LockInstancesNodes() def BuildHooksEnv(self): @@ -6071,7 +6261,8 @@ class TLMigrateInstance(Tasklet): this variable is initalized only after CheckPrereq has run """ - def __init__(self, lu, instance_name, cleanup): + def __init__(self, lu, instance_name, cleanup, + iallocator=None, target_node=None): """Initializes this class. """ @@ -6081,6 +6272,8 @@ class TLMigrateInstance(Tasklet): self.instance_name = instance_name self.cleanup = cleanup self.live = False # will be overridden later + self.iallocator = iallocator + self.target_node = target_node def CheckPrereq(self): """Check prerequisites. @@ -6091,19 +6284,48 @@ class TLMigrateInstance(Tasklet): instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name) instance = self.cfg.GetInstanceInfo(instance_name) assert instance is not None + self.instance = instance - if instance.disk_template != constants.DT_DRBD8: - raise errors.OpPrereqError("Instance's disk layout is not" - " drbd8, cannot migrate.", errors.ECODE_STATE) + if instance.disk_template not in constants.DTS_MIRRORED: + raise errors.OpPrereqError("Instance's disk layout '%s' does not allow" + " migrations" % instance.disk_template, + errors.ECODE_STATE) + + if instance.disk_template in constants.DTS_EXT_MIRROR: + _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node") + + if self.iallocator: + self._RunAllocator() + + # self.target_node is already populated, either directly or by the + # iallocator run + target_node = self.target_node - secondary_nodes = instance.secondary_nodes - if not secondary_nodes: - raise errors.ConfigurationError("No secondary node but using" - " drbd8 disk template") + if len(self.lu.tasklets) == 1: + # It is safe to remove locks only when we're the only tasklet in the LU + nodes_keep = [instance.primary_node, self.target_node] + nodes_rel = [node for node in self.lu.acquired_locks[locking.LEVEL_NODE] + if node not in nodes_keep] + self.lu.context.glm.release(locking.LEVEL_NODE, nodes_rel) + self.lu.acquired_locks[locking.LEVEL_NODE] = nodes_keep + + else: + secondary_nodes = instance.secondary_nodes + if not secondary_nodes: + raise errors.ConfigurationError("No secondary node but using" + " %s disk template" % + instance.disk_template) + target_node = secondary_nodes[0] + if self.lu.op.iallocator or (self.lu.op.target_node and + self.lu.op.target_node != target_node): + raise errors.OpPrereqError("Instances with disk template %s cannot" + " be migrated over to arbitrary nodes" + " (neither an iallocator nor a target" + " node can be passed)" % + instance.disk_template, errors.ECODE_INVAL) i_be = self.cfg.GetClusterInfo().FillBE(instance) - target_node = secondary_nodes[0] # check memory requirements on the secondary node _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" % instance.name, i_be[constants.BE_MEMORY], @@ -6119,7 +6341,35 @@ class TLMigrateInstance(Tasklet): result.Raise("Can't migrate, please use failover", prereq=True, ecode=errors.ECODE_STATE) - self.instance = instance + + def _RunAllocator(self): + """Run the allocator based on input opcode. + + """ + ial = IAllocator(self.cfg, self.rpc, + mode=constants.IALLOCATOR_MODE_RELOC, + name=self.instance_name, + # TODO See why hail breaks with a single node below + relocate_from=[self.instance.primary_node, + self.instance.primary_node], + ) + + ial.Run(self.iallocator) + + if not ial.success: + raise errors.OpPrereqError("Can't compute nodes using" + " iallocator '%s': %s" % + (self.iallocator, ial.info), + errors.ECODE_NORES) + if len(ial.result) != ial.required_nodes: + raise errors.OpPrereqError("iallocator '%s' returned invalid number" + " of nodes (%s), required %s" % + (self.iallocator, len(ial.result), + ial.required_nodes), errors.ECODE_FAULT) + self.target_node = ial.result[0] + self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s", + self.instance_name, self.iallocator, + utils.CommaJoin(ial.result)) if self.lu.op.live is not None and self.lu.op.mode is not None: raise errors.OpPrereqError("Only one of the 'live' and 'mode'" @@ -6135,7 +6385,7 @@ class TLMigrateInstance(Tasklet): self.lu.op.live = None elif self.lu.op.mode is None: # read the default value from the hypervisor - i_hv = self.cfg.GetClusterInfo().FillHV(instance, skip_globals=False) + i_hv = self.cfg.GetClusterInfo().FillHV(self.instance, skip_globals=False) self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE] self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE @@ -6255,16 +6505,17 @@ class TLMigrateInstance(Tasklet): " primary node (%s)" % source_node) demoted_node = target_node - self._EnsureSecondary(demoted_node) - try: + if instance.disk_template in constants.DTS_INT_MIRROR: + self._EnsureSecondary(demoted_node) + try: + self._WaitUntilSync() + except errors.OpExecError: + # we ignore here errors, since if the device is standalone, it + # won't be able to sync + pass + self._GoStandalone() + self._GoReconnect(False) self._WaitUntilSync() - except errors.OpExecError: - # we ignore here errors, since if the device is standalone, it - # won't be able to sync - pass - self._GoStandalone() - self._GoReconnect(False) - self._WaitUntilSync() self.feedback_fn("* done") @@ -6273,6 +6524,9 @@ class TLMigrateInstance(Tasklet): """ target_node = self.target_node + if self.instance.disk_template in constants.DTS_EXT_MIRROR: + return + try: self._EnsureSecondary(target_node) self._GoStandalone() @@ -6337,11 +6591,12 @@ class TLMigrateInstance(Tasklet): self.migration_info = migration_info = result.payload - # Then switch the disks to master/master mode - self._EnsureSecondary(target_node) - self._GoStandalone() - self._GoReconnect(True) - self._WaitUntilSync() + if self.instance.disk_template not in constants.DTS_EXT_MIRROR: + # Then switch the disks to master/master mode + self._EnsureSecondary(target_node) + self._GoStandalone() + self._GoReconnect(True) + self._WaitUntilSync() self.feedback_fn("* preparing %s to accept the instance" % target_node) result = self.rpc.call_accept_instance(target_node, @@ -6390,11 +6645,12 @@ class TLMigrateInstance(Tasklet): raise errors.OpExecError("Could not finalize instance migration: %s" % msg) - self._EnsureSecondary(source_node) - self._WaitUntilSync() - self._GoStandalone() - self._GoReconnect(False) - self._WaitUntilSync() + if self.instance.disk_template not in constants.DTS_EXT_MIRROR: + self._EnsureSecondary(source_node) + self._WaitUntilSync() + self._GoStandalone() + self._GoReconnect(False) + self._WaitUntilSync() self.feedback_fn("* done") @@ -6407,7 +6663,13 @@ class TLMigrateInstance(Tasklet): self.feedback_fn = feedback_fn self.source_node = self.instance.primary_node - self.target_node = self.instance.secondary_nodes[0] + + # FIXME: if we implement migrate-to-any in DRBD, this needs fixing + if self.instance.disk_template in constants.DTS_INT_MIRROR: + self.target_node = self.instance.secondary_nodes[0] + # Otherwise self.target_node has been populated either + # directly, or through an iallocator. + self.all_nodes = [self.source_node, self.target_node] self.nodes_ip = { self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip, @@ -6591,6 +6853,34 @@ def _GenerateDiskTemplate(lu, template_name, disk_index)), mode=disk["mode"]) disks.append(disk_dev) + elif template_name == constants.DT_SHARED_FILE: + if len(secondary_nodes) != 0: + raise errors.ProgrammerError("Wrong template configuration") + + opcodes.RequireSharedFileStorage() + + for idx, disk in enumerate(disk_info): + disk_index = idx + base_index + disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"], + iv_name="disk/%d" % disk_index, + logical_id=(file_driver, + "%s/disk%d" % (file_storage_dir, + disk_index)), + mode=disk["mode"]) + disks.append(disk_dev) + elif template_name == constants.DT_BLOCK: + if len(secondary_nodes) != 0: + raise errors.ProgrammerError("Wrong template configuration") + + for idx, disk in enumerate(disk_info): + disk_index = idx + base_index + disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV, size=disk["size"], + logical_id=(constants.BLOCKDEV_DRIVER_MANUAL, + disk["adopt"]), + iv_name="disk/%d" % disk_index, + mode=disk["mode"]) + disks.append(disk_dev) + else: raise errors.ProgrammerError("Invalid disk template '%s'" % template_name) return disks @@ -6627,6 +6917,10 @@ def _WipeDisks(lu, instance): """ node = instance.primary_node + + for device in instance.disks: + lu.cfg.SetDiskID(device, node) + logging.info("Pause sync of instance %s disks", instance.name) result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True) @@ -6638,7 +6932,8 @@ def _WipeDisks(lu, instance): try: for idx, device in enumerate(instance.disks): lu.LogInfo("* Wiping disk %d", idx) - logging.info("Wiping disk %d for instance %s", idx, instance.name) + logging.info("Wiping disk %d for instance %s, node %s", + idx, instance.name, node) # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but # MAX_WIPE_CHUNK at max @@ -6700,7 +6995,7 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None): pnode = target_node all_nodes = [pnode] - if instance.disk_template == constants.DT_FILE: + if instance.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE): file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir) @@ -6790,6 +7085,7 @@ def _ComputeDiskSizePerVG(disk_template, disks): # 128 MB are added for drbd metadata for each disk constants.DT_DRBD8: _compute(disks, 128), constants.DT_FILE: {}, + constants.DT_SHARED_FILE: {}, } if disk_template not in req_size_dict: @@ -6810,6 +7106,8 @@ def _ComputeDiskSize(disk_template, disks): # 128 MB are added for drbd metadata for each disk constants.DT_DRBD8: sum(d["size"] + 128 for d in disks), constants.DT_FILE: None, + constants.DT_SHARED_FILE: 0, + constants.DT_BLOCK: 0, } if disk_template not in req_size_dict: @@ -6945,6 +7243,12 @@ class LUInstanceCreate(LogicalUnit): if self.op.mode == constants.INSTANCE_IMPORT: raise errors.OpPrereqError("Disk adoption not allowed for" " instance import", errors.ECODE_INVAL) + else: + if self.op.disk_template in constants.DTS_MUST_ADOPT: + raise errors.OpPrereqError("Disk template %s requires disk adoption," + " but no 'adopt' parameter given" % + self.op.disk_template, + errors.ECODE_INVAL) self.adopt_disks = has_adopt @@ -6971,7 +7275,7 @@ class LUInstanceCreate(LogicalUnit): _CheckIAllocatorOrNode(self, "iallocator", "pnode") if self.op.pnode is not None: - if self.op.disk_template in constants.DTS_NET_MIRROR: + if self.op.disk_template in constants.DTS_INT_MIRROR: if self.op.snode is None: raise errors.OpPrereqError("The networked disk templates need" " a mirror node", errors.ECODE_INVAL) @@ -7406,18 +7710,8 @@ class LUInstanceCreate(LogicalUnit): " in cluster" % mac, errors.ECODE_NOTUNIQUE) - # bridge verification - bridge = nic.get("bridge", None) - link = nic.get("link", None) - if bridge and link: - raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'" - " at the same time", errors.ECODE_INVAL) - elif bridge and nic_mode == constants.NIC_MODE_ROUTED: - raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic", - errors.ECODE_INVAL) - elif bridge: - link = bridge - + # Build nic parameters + link = nic.get(constants.INIC_LINK, None) nicparams = {} if nic_mode_req: nicparams[constants.NIC_MODE] = nic_mode_req @@ -7531,7 +7825,7 @@ class LUInstanceCreate(LogicalUnit): self.secondaries = [] # mirror node verification - if self.op.disk_template in constants.DTS_NET_MIRROR: + if self.op.disk_template in constants.DTS_INT_MIRROR: if self.op.snode == pnode.name: raise errors.OpPrereqError("The secondary node cannot be the" " primary node.", errors.ECODE_INVAL) @@ -7547,7 +7841,7 @@ class LUInstanceCreate(LogicalUnit): req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks) _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes) - else: # instead, we must check the adoption data + elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data all_lvs = set([i["vg"] + "/" + i["adopt"] for i in self.disks]) if len(all_lvs) != len(self.disks): raise errors.OpPrereqError("Duplicate volume names given for adoption", @@ -7583,6 +7877,34 @@ class LUInstanceCreate(LogicalUnit): for dsk in self.disks: dsk["size"] = int(float(node_lvs[dsk["vg"] + "/" + dsk["adopt"]][0])) + elif self.op.disk_template == constants.DT_BLOCK: + # Normalize and de-duplicate device paths + all_disks = set([os.path.abspath(i["adopt"]) for i in self.disks]) + if len(all_disks) != len(self.disks): + raise errors.OpPrereqError("Duplicate disk names given for adoption", + errors.ECODE_INVAL) + baddisks = [d for d in all_disks + if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)] + if baddisks: + raise errors.OpPrereqError("Device node(s) %s lie outside %s and" + " cannot be adopted" % + (", ".join(baddisks), + constants.ADOPTABLE_BLOCKDEV_ROOT), + errors.ECODE_INVAL) + + node_disks = self.rpc.call_bdev_sizes([pnode.name], + list(all_disks))[pnode.name] + node_disks.Raise("Cannot get block device information from node %s" % + pnode.name) + node_disks = node_disks.payload + delta = all_disks.difference(node_disks.keys()) + if delta: + raise errors.OpPrereqError("Missing block device(s): %s" % + utils.CommaJoin(delta), + errors.ECODE_INVAL) + for dsk in self.disks: + dsk["size"] = int(float(node_disks[dsk["adopt"]])) + _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams) _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant) @@ -7613,7 +7935,7 @@ class LUInstanceCreate(LogicalUnit): else: network_port = None - if constants.ENABLE_FILE_STORAGE: + if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE: # this is needed because os.path.join does not accept None arguments if self.op.file_storage_dir is None: string_file_storage_dir = "" @@ -7621,7 +7943,12 @@ class LUInstanceCreate(LogicalUnit): string_file_storage_dir = self.op.file_storage_dir # build the full file storage dir path - file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(), + if self.op.disk_template == constants.DT_SHARED_FILE: + get_fsd_fn = self.cfg.GetSharedFileStorageDir + else: + get_fsd_fn = self.cfg.GetFileStorageDir + + file_storage_dir = utils.PathJoin(get_fsd_fn(), string_file_storage_dir, instance) else: file_storage_dir = "" @@ -7649,17 +7976,18 @@ class LUInstanceCreate(LogicalUnit): ) if self.adopt_disks: - # rename LVs to the newly-generated names; we need to construct - # 'fake' LV disks with the old data, plus the new unique_id - tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks] - rename_to = [] - for t_dsk, a_dsk in zip (tmp_disks, self.disks): - rename_to.append(t_dsk.logical_id) - t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"]) - self.cfg.SetDiskID(t_dsk, pnode_name) - result = self.rpc.call_blockdev_rename(pnode_name, - zip(tmp_disks, rename_to)) - result.Raise("Failed to rename adoped LVs") + if self.op.disk_template == constants.DT_PLAIN: + # rename LVs to the newly-generated names; we need to construct + # 'fake' LV disks with the old data, plus the new unique_id + tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks] + rename_to = [] + for t_dsk, a_dsk in zip (tmp_disks, self.disks): + rename_to.append(t_dsk.logical_id) + t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"]) + self.cfg.SetDiskID(t_dsk, pnode_name) + result = self.rpc.call_blockdev_rename(pnode_name, + zip(tmp_disks, rename_to)) + result.Raise("Failed to rename adoped LVs") else: feedback_fn("* creating instance disks...") try: @@ -7704,7 +8032,7 @@ class LUInstanceCreate(LogicalUnit): if self.op.wait_for_sync: disk_abort = not _WaitForSync(self, iobj) - elif iobj.disk_template in constants.DTS_NET_MIRROR: + elif iobj.disk_template in constants.DTS_INT_MIRROR: # make sure the disks are not degraded (still sync-ing is ok) time.sleep(15) feedback_fn("* checking mirrors status") @@ -7839,9 +8167,9 @@ class LUInstanceConsole(NoHooksLU): if instance.name not in node_insts.payload: if instance.admin_up: - state = "ERROR_down" + state = constants.INSTST_ERRORDOWN else: - state = "ADMIN_down" + state = constants.INSTST_ADMINDOWN raise errors.OpExecError("Instance %s is not running (state %s)" % (instance.name, state)) @@ -8026,6 +8354,30 @@ class TLReplaceDisks(Tasklet): return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance, node_name, True) + def _CheckDisksActivated(self, instance): + """Checks if the instance disks are activated. + + @param instance: The instance to check disks + @return: True if they are activated, False otherwise + + """ + nodes = instance.all_nodes + + for idx, dev in enumerate(instance.disks): + for node in nodes: + self.lu.LogInfo("Checking disk/%d on %s", idx, node) + self.cfg.SetDiskID(dev, node) + + result = self.rpc.call_blockdev_find(node, dev) + + if result.offline: + continue + elif result.fail_msg or not result.payload: + return False + + return True + + def CheckPrereq(self): """Check prerequisites. @@ -8089,6 +8441,10 @@ class TLReplaceDisks(Tasklet): errors.ECODE_INVAL) if self.mode == constants.REPLACE_DISK_AUTO: + if not self._CheckDisksActivated(instance): + raise errors.OpPrereqError("Please run activate-disks on instance %s" + " first" % self.instance_name, + errors.ECODE_STATE) faulty_primary = self._FindFaultyDisks(instance.primary_node) faulty_secondary = self._FindFaultyDisks(secondary_node) @@ -8767,9 +9123,10 @@ class LUInstanceGrowDisk(LogicalUnit): self.disk = instance.FindDisk(self.op.disk) - if instance.disk_template != constants.DT_FILE: - # TODO: check the free disk space for file, when that feature - # will be supported + if instance.disk_template not in (constants.DT_FILE, + constants.DT_SHARED_FILE): + # TODO: check the free disk space for file, when that feature will be + # supported _CheckNodesFreeDiskPerVG(self, nodenames, self.disk.ComputeGrowth(self.op.amount)) @@ -9028,7 +9385,7 @@ class LUInstanceSetParams(LogicalUnit): errors.ECODE_INVAL) if (self.op.disk_template and - self.op.disk_template in constants.DTS_NET_MIRROR and + self.op.disk_template in constants.DTS_INT_MIRROR and self.op.remote_node is None): raise errors.OpPrereqError("Changing the disk template to a mirrored" " one requires specifying a secondary node", @@ -9188,7 +9545,7 @@ class LUInstanceSetParams(LogicalUnit): self.op.disk_template), errors.ECODE_INVAL) _CheckInstanceDown(self, instance, "cannot change disk template") - if self.op.disk_template in constants.DTS_NET_MIRROR: + if self.op.disk_template in constants.DTS_INT_MIRROR: if self.op.remote_node == pnode: raise errors.OpPrereqError("Given new secondary node %s is the same" " as the primary node of the instance" % @@ -9510,7 +9867,8 @@ class LUInstanceSetParams(LogicalUnit): result.append(("disk/%d" % device_idx, "remove")) elif disk_op == constants.DDM_ADD: # add a new disk - if instance.disk_template == constants.DT_FILE: + if instance.disk_template in (constants.DT_FILE, + constants.DT_SHARED_FILE): file_driver, file_path = instance.disks[0].logical_id file_path = os.path.dirname(file_path) else: @@ -10176,7 +10534,7 @@ class LUGroupAssignNodes(NoHooksLU): In particular, it returns information about newly split instances, and instances that were already split, and remain so after the change. - Only instances whose disk template is listed in constants.DTS_NET_MIRROR are + Only instances whose disk template is listed in constants.DTS_INT_MIRROR are considered. @type changes: list of (node_name, new_group_uuid) pairs. @@ -10199,7 +10557,7 @@ class LUGroupAssignNodes(NoHooksLU): return [instance.primary_node] + list(instance.secondary_nodes) for inst in instance_data.values(): - if inst.disk_template not in constants.DTS_NET_MIRROR: + if inst.disk_template not in constants.DTS_INT_MIRROR: continue instance_nodes = InstanceNodes(inst) @@ -10216,7 +10574,6 @@ class LUGroupAssignNodes(NoHooksLU): class _GroupQuery(_QueryBase): - FIELDS = query.GROUP_FIELDS def ExpandNames(self, lu): @@ -10299,7 +10656,8 @@ class LUGroupQuery(NoHooksLU): REQ_BGL = False def CheckArguments(self): - self.gq = _GroupQuery(self.op.names, self.op.output_fields, False) + self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names), + self.op.output_fields, False) def ExpandNames(self): self.gq.ExpandNames(self) @@ -10447,7 +10805,7 @@ class LUGroupRename(LogicalUnit): def ExpandNames(self): # This raises errors.OpPrereqError on its own: - self.group_uuid = self.cfg.LookupNodeGroup(self.op.old_name) + self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) self.needed_locks = { locking.LEVEL_NODEGROUP: [self.group_uuid], @@ -10456,8 +10814,7 @@ class LUGroupRename(LogicalUnit): def CheckPrereq(self): """Check prerequisites. - This checks that the given old_name exists as a node group, and that - new_name doesn't. + Ensures requested new name is not yet used. """ try: @@ -10475,7 +10832,7 @@ class LUGroupRename(LogicalUnit): """ env = { - "OLD_NAME": self.op.old_name, + "OLD_NAME": self.op.group_name, "NEW_NAME": self.op.new_name, } @@ -10498,7 +10855,7 @@ class LUGroupRename(LogicalUnit): if group is None: raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" % - (self.op.old_name, self.group_uuid)) + (self.op.group_name, self.group_uuid)) group.name = self.op.new_name self.cfg.Update(group, feedback_fn) @@ -11055,8 +11412,7 @@ class IAllocator(object): "i_pri_up_memory": i_p_up_mem, } pnr_dyn.update(node_results[nname]) - - node_results[nname] = pnr_dyn + node_results[nname] = pnr_dyn return node_results @@ -11108,7 +11464,7 @@ class IAllocator(object): """ disk_space = _ComputeDiskSize(self.disk_template, self.disks) - if self.disk_template in constants.DTS_NET_MIRROR: + if self.disk_template in constants.DTS_INT_MIRROR: self.required_nodes = 2 else: self.required_nodes = 1 @@ -11141,11 +11497,12 @@ class IAllocator(object): raise errors.ProgrammerError("Unknown instance '%s' passed to" " IAllocator" % self.name) - if instance.disk_template not in constants.DTS_NET_MIRROR: + if instance.disk_template not in constants.DTS_MIRRORED: raise errors.OpPrereqError("Can't relocate non-mirrored instances", errors.ECODE_INVAL) - if len(instance.secondary_nodes) != 1: + if instance.disk_template in constants.DTS_INT_MIRROR and \ + len(instance.secondary_nodes) != 1: raise errors.OpPrereqError("Instance has not exactly one secondary node", errors.ECODE_STATE) @@ -11330,13 +11687,16 @@ _QUERY_IMPL = { constants.QR_INSTANCE: _InstanceQuery, constants.QR_NODE: _NodeQuery, constants.QR_GROUP: _GroupQuery, + constants.QR_OS: _OsQuery, } +assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP + def _GetQueryImplementation(name): """Returns the implemtnation for a query type. - @param name: Query type, must be one of L{constants.QR_OP_QUERY} + @param name: Query type, must be one of L{constants.QR_VIA_OP} """ try: