X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/9c24736cae9056e7e38bf2f92eea496225d8d158..aac4511abf7941d7494df99ea2ac9204806a2f9f:/lib/cmdlib.py diff --git a/lib/cmdlib.py b/lib/cmdlib.py index 75f0ac4..f2be110 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -453,15 +453,19 @@ class _QueryBase: #: Attribute holding field definitions FIELDS = None - def __init__(self, names, fields, use_locking): + def __init__(self, filter_, fields, use_locking): """Initializes this class. """ - self.names = names self.use_locking = use_locking - self.query = query.Query(self.FIELDS, fields) + self.query = query.Query(self.FIELDS, fields, filter_=filter_, + namefield="name") self.requested_data = self.query.RequestedData() + self.names = self.query.RequestedNames() + + # Sort only if no names were requested + self.sort_by_name = not self.names self.do_locking = None self.wanted = None @@ -529,13 +533,15 @@ class _QueryBase: """Collect data and execute query. """ - return query.GetQueryResponse(self.query, self._GetQueryData(lu)) + return query.GetQueryResponse(self.query, self._GetQueryData(lu), + sort_by_name=self.sort_by_name) def OldStyleQuery(self, lu): """Collect data and execute query. """ - return self.query.OldStyleQuery(self._GetQueryData(lu)) + return self.query.OldStyleQuery(self._GetQueryData(lu), + sort_by_name=self.sort_by_name) def _GetWantedNodes(lu, nodes): @@ -1035,7 +1041,7 @@ def _GetStorageTypeArgs(cfg, storage_type): # Special case for file storage if storage_type == constants.ST_FILE: # storage.FileStorage wants a list of storage directories - return [[cfg.GetFileStorageDir()]] + return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]] return [] @@ -1254,8 +1260,8 @@ class LUClusterVerify(LogicalUnit): @ivar instances: a list of running instances (runtime) @ivar pinst: list of configured primary instances (config) @ivar sinst: list of configured secondary instances (config) - @ivar sbp: diction of {secondary-node: list of instances} of all peers - of this node (config) + @ivar sbp: dictionary of {primary-node: list of instances} for all + instances for which this node is secondary (config) @ivar mfree: free memory, as reported by hypervisor (runtime) @ivar dfree: free disk, as reported by the node (runtime) @ivar offline: the offline status (config) @@ -1617,6 +1623,7 @@ class LUClusterVerify(LogicalUnit): instances it was primary for. """ + cluster_info = self.cfg.GetClusterInfo() for node, n_img in node_image.items(): # This code checks that every node which is now listed as # secondary has enough memory to host all instances it is @@ -1635,7 +1642,7 @@ class LUClusterVerify(LogicalUnit): for prinode, instances in n_img.sbp.items(): needed_mem = 0 for instance in instances: - bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance]) + bep = cluster_info.FillBE(instance_cfg[instance]) if bep[constants.BE_AUTO_BALANCE]: needed_mem += bep[constants.BE_MEMORY] test = n_img.mfree < needed_mem @@ -2584,16 +2591,18 @@ class LUClusterRepairDiskSizes(NoHooksLU): newl = [v[2].Copy() for v in dskl] for dsk in newl: self.cfg.SetDiskID(dsk, node) - result = self.rpc.call_blockdev_getsizes(node, newl) + result = self.rpc.call_blockdev_getsize(node, newl) if result.fail_msg: - self.LogWarning("Failure in blockdev_getsizes call to node" + self.LogWarning("Failure in blockdev_getsize call to node" " %s, ignoring", node) continue - if len(result.data) != len(dskl): + if len(result.payload) != len(dskl): + logging.warning("Invalid result from node %s: len(dksl)=%d," + " result.payload=%s", node, len(dskl), result.payload) self.LogWarning("Invalid result from node %s, ignoring node results", node) continue - for ((instance, idx, disk), size) in zip(dskl, result.data): + for ((instance, idx, disk), size) in zip(dskl, result.payload): if size is None: self.LogWarning("Disk %d of instance %s did not return size" " information, ignoring", idx, instance.name) @@ -3225,6 +3234,7 @@ class LUOobCommand(NoHooksLU): """ REG_BGL = False + _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE) def CheckPrereq(self): """Check prerequisites. @@ -3237,6 +3247,34 @@ class LUOobCommand(NoHooksLU): """ self.nodes = [] + self.master_node = self.cfg.GetMasterNode() + + if self.op.node_names: + if self.op.command in self._SKIP_MASTER: + if self.master_node in self.op.node_names: + master_node_obj = self.cfg.GetNodeInfo(self.master_node) + master_oob_handler = _SupportsOob(self.cfg, master_node_obj) + + if master_oob_handler: + additional_text = ("Run '%s %s %s' if you want to operate on the" + " master regardless") % (master_oob_handler, + self.op.command, + self.master_node) + else: + additional_text = "The master node does not support out-of-band" + + raise errors.OpPrereqError(("Operating on the master node %s is not" + " allowed for %s\n%s") % + (self.master_node, self.op.command, + additional_text), errors.ECODE_INVAL) + else: + self.op.node_names = self.cfg.GetNodeList() + if self.op.command in self._SKIP_MASTER: + self.op.node_names.remove(self.master_node) + + if self.op.command in self._SKIP_MASTER: + assert self.master_node not in self.op.node_names + for node_name in self.op.node_names: node = self.cfg.GetNodeInfo(node_name) @@ -3246,7 +3284,8 @@ class LUOobCommand(NoHooksLU): else: self.nodes.append(node) - if (self.op.command == constants.OOB_POWER_OFF and not node.offline): + if (not self.op.ignore_status and + (self.op.command == constants.OOB_POWER_OFF and not node.offline)): raise errors.OpPrereqError(("Cannot power off node %s because it is" " not marked offline") % node_name, errors.ECODE_STATE) @@ -3258,18 +3297,19 @@ class LUOobCommand(NoHooksLU): if self.op.node_names: self.op.node_names = [_ExpandNodeName(self.cfg, name) for name in self.op.node_names] + lock_names = self.op.node_names else: - self.op.node_names = self.cfg.GetNodeList() + lock_names = locking.ALL_SET self.needed_locks = { - locking.LEVEL_NODE: self.op.node_names, + locking.LEVEL_NODE: lock_names, } def Exec(self, feedback_fn): """Execute OOB and return result if we expect any. """ - master_node = self.cfg.GetMasterNode() + master_node = self.master_node ret = [] for node in self.nodes: @@ -3682,8 +3722,8 @@ class LUNodeQuery(NoHooksLU): REQ_BGL = False def CheckArguments(self): - self.nq = _NodeQuery(self.op.names, self.op.output_fields, - self.op.use_locking) + self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names), + self.op.output_fields, self.op.use_locking) def ExpandNames(self): self.nq.ExpandNames(self) @@ -3951,9 +3991,8 @@ class LUQuery(NoHooksLU): def CheckArguments(self): qcls = _GetQueryImplementation(self.op.what) - names = qlang.ReadSimpleFilter("name", self.op.filter) - self.impl = qcls(names, self.op.fields, False) + self.impl = qcls(self.op.filter, self.op.fields, False) def ExpandNames(self): self.impl.ExpandNames(self) @@ -4662,6 +4701,7 @@ class LUClusterQuery(NoHooksLU): "volume_group_name": cluster.volume_group_name, "drbd_usermode_helper": cluster.drbd_usermode_helper, "file_storage_dir": cluster.file_storage_dir, + "shared_file_storage_dir": cluster.shared_file_storage_dir, "maintain_node_health": cluster.maintain_node_health, "ctime": cluster.ctime, "mtime": cluster.mtime, @@ -5198,10 +5238,16 @@ class LUInstanceReboot(LogicalUnit): ignore_secondaries = self.op.ignore_secondaries reboot_type = self.op.reboot_type + remote_info = self.rpc.call_instance_info(instance.primary_node, + instance.name, + instance.hypervisor) + remote_info.Raise("Error checking node %s" % instance.primary_node) + instance_running = bool(remote_info.payload) + node_current = instance.primary_node - if reboot_type in [constants.INSTANCE_REBOOT_SOFT, - constants.INSTANCE_REBOOT_HARD]: + if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT, + constants.INSTANCE_REBOOT_HARD]: for disk in instance.disks: self.cfg.SetDiskID(disk, node_current) result = self.rpc.call_instance_reboot(node_current, instance, @@ -5209,10 +5255,14 @@ class LUInstanceReboot(LogicalUnit): self.op.shutdown_timeout) result.Raise("Could not reboot instance") else: - result = self.rpc.call_instance_shutdown(node_current, instance, - self.op.shutdown_timeout) - result.Raise("Could not shutdown instance for full reboot") - _ShutdownInstanceDisks(self, instance) + if instance_running: + result = self.rpc.call_instance_shutdown(node_current, instance, + self.op.shutdown_timeout) + result.Raise("Could not shutdown instance for full reboot") + _ShutdownInstanceDisks(self, instance) + else: + self.LogInfo("Instance %s was already stopped, starting now", + instance.name) _StartInstanceDisks(self, instance, ignore_secondaries) result = self.rpc.call_instance_start(node_current, instance, None, None) msg = result.fail_msg @@ -5478,6 +5528,11 @@ class LUInstanceRename(LogicalUnit): hostname = netutils.GetHostname(name=new_name) self.LogInfo("Resolved given name '%s' to '%s'", new_name, hostname.name) + if not utils.MatchNameComponent(self.op.new_name, [hostname.name]): + raise errors.OpPrereqError(("Resolved hostname '%s' does not look the" + " same as given hostname '%s'") % + (hostname.name, self.op.new_name), + errors.ECODE_INVAL) new_name = self.op.new_name = hostname.name if (self.op.ip_check and netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)): @@ -5498,7 +5553,7 @@ class LUInstanceRename(LogicalUnit): old_name = inst.name rename_file_storage = False - if (inst.disk_template == constants.DT_FILE and + if (inst.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE) and self.op.new_name != inst.name): old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1]) rename_file_storage = True @@ -5628,8 +5683,8 @@ class LUInstanceQuery(NoHooksLU): REQ_BGL = False def CheckArguments(self): - self.iq = _InstanceQuery(self.op.names, self.op.output_fields, - self.op.use_locking) + self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names), + self.op.output_fields, self.op.use_locking) def ExpandNames(self): self.iq.ExpandNames(self) @@ -5799,19 +5854,35 @@ class LUInstanceMigrate(LogicalUnit): HTYPE = constants.HTYPE_INSTANCE REQ_BGL = False + def CheckArguments(self): + _CheckIAllocatorOrNode(self, "iallocator", "target_node") + def ExpandNames(self): self._ExpandAndLockInstance() + if self.op.target_node is not None: + self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node) + self.needed_locks[locking.LEVEL_NODE] = [] self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE self._migrater = TLMigrateInstance(self, self.op.instance_name, - self.op.cleanup) + self.op.cleanup, self.op.iallocator, + self.op.target_node) self.tasklets = [self._migrater] def DeclareLocks(self, level): if level == locking.LEVEL_NODE: - self._LockInstancesNodes() + instance = self.context.cfg.GetInstanceInfo(self.op.instance_name) + if instance.disk_template in constants.DTS_EXT_MIRROR: + if self.op.target_node is None: + self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET + else: + self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node, + self.op.target_node] + del self.recalculate_locks[locking.LEVEL_NODE] + else: + self._LockInstancesNodes() def BuildHooksEnv(self): """Build hooks env. @@ -5821,16 +5892,21 @@ class LUInstanceMigrate(LogicalUnit): """ instance = self._migrater.instance source_node = instance.primary_node - target_node = instance.secondary_nodes[0] + target_node = self._migrater.target_node env = _BuildInstanceHookEnvByObject(self, instance) env["MIGRATE_LIVE"] = self._migrater.live env["MIGRATE_CLEANUP"] = self.op.cleanup env.update({ "OLD_PRIMARY": source_node, - "OLD_SECONDARY": target_node, "NEW_PRIMARY": target_node, - "NEW_SECONDARY": source_node, }) + + if instance.disk_template in constants.DTS_NET_MIRROR: + env["OLD_SECONDARY"] = target_node + env["NEW_SECONDARY"] = source_node + else: + env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None + nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes) nl_post = list(nl) nl_post.append(source_node) @@ -6020,32 +6096,46 @@ class LUNodeMigrate(LogicalUnit): HTYPE = constants.HTYPE_NODE REQ_BGL = False + def CheckArguments(self): + _CheckIAllocatorOrNode(self, "iallocator", "remote_node") + def ExpandNames(self): self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) - self.needed_locks = { - locking.LEVEL_NODE: [self.op.node_name], - } - - self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND + self.needed_locks = {} # Create tasklets for migrating instances for all instances on this node names = [] tasklets = [] + self.lock_all_nodes = False + for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name): logging.debug("Migrating instance %s", inst.name) names.append(inst.name) - tasklets.append(TLMigrateInstance(self, inst.name, False)) + tasklets.append(TLMigrateInstance(self, inst.name, False, + self.op.iallocator, None)) + + if inst.disk_template in constants.DTS_EXT_MIRROR: + # We need to lock all nodes, as the iallocator will choose the + # destination nodes afterwards + self.lock_all_nodes = True self.tasklets = tasklets + # Declare node locks + if self.lock_all_nodes: + self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET + else: + self.needed_locks[locking.LEVEL_NODE] = [self.op.node_name] + self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND + # Declare instance locks self.needed_locks[locking.LEVEL_INSTANCE] = names def DeclareLocks(self, level): - if level == locking.LEVEL_NODE: + if level == locking.LEVEL_NODE and not self.lock_all_nodes: self._LockInstancesNodes() def BuildHooksEnv(self): @@ -6071,7 +6161,8 @@ class TLMigrateInstance(Tasklet): this variable is initalized only after CheckPrereq has run """ - def __init__(self, lu, instance_name, cleanup): + def __init__(self, lu, instance_name, cleanup, + iallocator=None, target_node=None): """Initializes this class. """ @@ -6081,6 +6172,8 @@ class TLMigrateInstance(Tasklet): self.instance_name = instance_name self.cleanup = cleanup self.live = False # will be overridden later + self.iallocator = iallocator + self.target_node = target_node def CheckPrereq(self): """Check prerequisites. @@ -6091,19 +6184,43 @@ class TLMigrateInstance(Tasklet): instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name) instance = self.cfg.GetInstanceInfo(instance_name) assert instance is not None + self.instance = instance - if instance.disk_template != constants.DT_DRBD8: - raise errors.OpPrereqError("Instance's disk layout is not" - " drbd8, cannot migrate.", errors.ECODE_STATE) + if instance.disk_template not in constants.DTS_MIRRORED: + raise errors.OpPrereqError("Instance's disk layout '%s' does not allow" + " migrations" % instance.disk_template, + errors.ECODE_STATE) - secondary_nodes = instance.secondary_nodes - if not secondary_nodes: - raise errors.ConfigurationError("No secondary node but using" - " drbd8 disk template") + if instance.disk_template in constants.DTS_EXT_MIRROR: + if [self.iallocator, self.target_node].count(None) != 1: + raise errors.OpPrereqError("Do not specify both, iallocator and" + " target node", errors.ECODE_INVAL) + + if self.iallocator: + self._RunAllocator() + + # self.target_node is already populated, either directly or by the + # iallocator run + target_node = self.target_node + + if len(self.lu.tasklets) == 1: + # It is safe to remove locks only when we're the only tasklet in the LU + nodes_keep = [instance.primary_node, self.target_node] + nodes_rel = [node for node in self.lu.acquired_locks[locking.LEVEL_NODE] + if node not in nodes_keep] + self.lu.context.glm.release(locking.LEVEL_NODE, nodes_rel) + self.lu.acquired_locks[locking.LEVEL_NODE] = nodes_keep + + else: + secondary_nodes = instance.secondary_nodes + if not secondary_nodes: + raise errors.ConfigurationError("No secondary node but using" + " %s disk template" % + instance.disk_template) + target_node = secondary_nodes[0] i_be = self.cfg.GetClusterInfo().FillBE(instance) - target_node = secondary_nodes[0] # check memory requirements on the secondary node _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" % instance.name, i_be[constants.BE_MEMORY], @@ -6119,7 +6236,35 @@ class TLMigrateInstance(Tasklet): result.Raise("Can't migrate, please use failover", prereq=True, ecode=errors.ECODE_STATE) - self.instance = instance + + def _RunAllocator(self): + """Run the allocator based on input opcode. + + """ + ial = IAllocator(self.cfg, self.rpc, + mode=constants.IALLOCATOR_MODE_RELOC, + name=self.instance_name, + # TODO See why hail breaks with a single node below + relocate_from=[self.instance.primary_node, + self.instance.primary_node], + ) + + ial.Run(self.iallocator) + + if not ial.success: + raise errors.OpPrereqError("Can't compute nodes using" + " iallocator '%s': %s" % + (self.iallocator, ial.info), + errors.ECODE_NORES) + if len(ial.result) != ial.required_nodes: + raise errors.OpPrereqError("iallocator '%s' returned invalid number" + " of nodes (%s), required %s" % + (self.iallocator, len(ial.result), + ial.required_nodes), errors.ECODE_FAULT) + self.target_node = ial.result[0] + self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s", + self.instance_name, self.iallocator, + utils.CommaJoin(ial.result)) if self.lu.op.live is not None and self.lu.op.mode is not None: raise errors.OpPrereqError("Only one of the 'live' and 'mode'" @@ -6135,7 +6280,7 @@ class TLMigrateInstance(Tasklet): self.lu.op.live = None elif self.lu.op.mode is None: # read the default value from the hypervisor - i_hv = self.cfg.GetClusterInfo().FillHV(instance, skip_globals=False) + i_hv = self.cfg.GetClusterInfo().FillHV(self.instance, skip_globals=False) self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE] self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE @@ -6255,16 +6400,17 @@ class TLMigrateInstance(Tasklet): " primary node (%s)" % source_node) demoted_node = target_node - self._EnsureSecondary(demoted_node) - try: + if instance.disk_template in constants.DTS_NET_MIRROR: + self._EnsureSecondary(demoted_node) + try: + self._WaitUntilSync() + except errors.OpExecError: + # we ignore here errors, since if the device is standalone, it + # won't be able to sync + pass + self._GoStandalone() + self._GoReconnect(False) self._WaitUntilSync() - except errors.OpExecError: - # we ignore here errors, since if the device is standalone, it - # won't be able to sync - pass - self._GoStandalone() - self._GoReconnect(False) - self._WaitUntilSync() self.feedback_fn("* done") @@ -6273,6 +6419,9 @@ class TLMigrateInstance(Tasklet): """ target_node = self.target_node + if self.instance.disk_template in constants.DTS_EXT_MIRROR: + return + try: self._EnsureSecondary(target_node) self._GoStandalone() @@ -6337,11 +6486,12 @@ class TLMigrateInstance(Tasklet): self.migration_info = migration_info = result.payload - # Then switch the disks to master/master mode - self._EnsureSecondary(target_node) - self._GoStandalone() - self._GoReconnect(True) - self._WaitUntilSync() + if self.instance.disk_template not in constants.DTS_EXT_MIRROR: + # Then switch the disks to master/master mode + self._EnsureSecondary(target_node) + self._GoStandalone() + self._GoReconnect(True) + self._WaitUntilSync() self.feedback_fn("* preparing %s to accept the instance" % target_node) result = self.rpc.call_accept_instance(target_node, @@ -6390,11 +6540,12 @@ class TLMigrateInstance(Tasklet): raise errors.OpExecError("Could not finalize instance migration: %s" % msg) - self._EnsureSecondary(source_node) - self._WaitUntilSync() - self._GoStandalone() - self._GoReconnect(False) - self._WaitUntilSync() + if self.instance.disk_template not in constants.DTS_EXT_MIRROR: + self._EnsureSecondary(source_node) + self._WaitUntilSync() + self._GoStandalone() + self._GoReconnect(False) + self._WaitUntilSync() self.feedback_fn("* done") @@ -6407,7 +6558,13 @@ class TLMigrateInstance(Tasklet): self.feedback_fn = feedback_fn self.source_node = self.instance.primary_node - self.target_node = self.instance.secondary_nodes[0] + + # FIXME: if we implement migrate-to-any in DRBD, this needs fixing + if self.instance.disk_template in constants.DTS_NET_MIRROR: + self.target_node = self.instance.secondary_nodes[0] + # Otherwise self.target_node has been populated either + # directly, or through an iallocator. + self.all_nodes = [self.source_node, self.target_node] self.nodes_ip = { self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip, @@ -6591,6 +6748,34 @@ def _GenerateDiskTemplate(lu, template_name, disk_index)), mode=disk["mode"]) disks.append(disk_dev) + elif template_name == constants.DT_SHARED_FILE: + if len(secondary_nodes) != 0: + raise errors.ProgrammerError("Wrong template configuration") + + opcodes.RequireSharedFileStorage() + + for idx, disk in enumerate(disk_info): + disk_index = idx + base_index + disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"], + iv_name="disk/%d" % disk_index, + logical_id=(file_driver, + "%s/disk%d" % (file_storage_dir, + disk_index)), + mode=disk["mode"]) + disks.append(disk_dev) + elif template_name == constants.DT_BLOCK: + if len(secondary_nodes) != 0: + raise errors.ProgrammerError("Wrong template configuration") + + for idx, disk in enumerate(disk_info): + disk_index = idx + base_index + disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV, size=disk["size"], + logical_id=(constants.BLOCKDEV_DRIVER_MANUAL, + disk["adopt"]), + iv_name="disk/%d" % disk_index, + mode=disk["mode"]) + disks.append(disk_dev) + else: raise errors.ProgrammerError("Invalid disk template '%s'" % template_name) return disks @@ -6627,6 +6812,10 @@ def _WipeDisks(lu, instance): """ node = instance.primary_node + + for device in instance.disks: + lu.cfg.SetDiskID(device, node) + logging.info("Pause sync of instance %s disks", instance.name) result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True) @@ -6638,7 +6827,8 @@ def _WipeDisks(lu, instance): try: for idx, device in enumerate(instance.disks): lu.LogInfo("* Wiping disk %d", idx) - logging.info("Wiping disk %d for instance %s", idx, instance.name) + logging.info("Wiping disk %d for instance %s, node %s", + idx, instance.name, node) # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but # MAX_WIPE_CHUNK at max @@ -6700,7 +6890,7 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None): pnode = target_node all_nodes = [pnode] - if instance.disk_template == constants.DT_FILE: + if instance.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE): file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir) @@ -6790,6 +6980,7 @@ def _ComputeDiskSizePerVG(disk_template, disks): # 128 MB are added for drbd metadata for each disk constants.DT_DRBD8: _compute(disks, 128), constants.DT_FILE: {}, + constants.DT_SHARED_FILE: {}, } if disk_template not in req_size_dict: @@ -6810,6 +7001,8 @@ def _ComputeDiskSize(disk_template, disks): # 128 MB are added for drbd metadata for each disk constants.DT_DRBD8: sum(d["size"] + 128 for d in disks), constants.DT_FILE: None, + constants.DT_SHARED_FILE: 0, + constants.DT_BLOCK: 0, } if disk_template not in req_size_dict: @@ -6945,6 +7138,12 @@ class LUInstanceCreate(LogicalUnit): if self.op.mode == constants.INSTANCE_IMPORT: raise errors.OpPrereqError("Disk adoption not allowed for" " instance import", errors.ECODE_INVAL) + else: + if self.op.disk_template in constants.DTS_MUST_ADOPT: + raise errors.OpPrereqError("Disk template %s requires disk adoption," + " but no 'adopt' parameter given" % + self.op.disk_template, + errors.ECODE_INVAL) self.adopt_disks = has_adopt @@ -7406,18 +7605,8 @@ class LUInstanceCreate(LogicalUnit): " in cluster" % mac, errors.ECODE_NOTUNIQUE) - # bridge verification - bridge = nic.get("bridge", None) - link = nic.get("link", None) - if bridge and link: - raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'" - " at the same time", errors.ECODE_INVAL) - elif bridge and nic_mode == constants.NIC_MODE_ROUTED: - raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic", - errors.ECODE_INVAL) - elif bridge: - link = bridge - + # Build nic parameters + link = nic.get(constants.INIC_LINK, None) nicparams = {} if nic_mode_req: nicparams[constants.NIC_MODE] = nic_mode_req @@ -7547,7 +7736,7 @@ class LUInstanceCreate(LogicalUnit): req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks) _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes) - else: # instead, we must check the adoption data + elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data all_lvs = set([i["vg"] + "/" + i["adopt"] for i in self.disks]) if len(all_lvs) != len(self.disks): raise errors.OpPrereqError("Duplicate volume names given for adoption", @@ -7583,6 +7772,34 @@ class LUInstanceCreate(LogicalUnit): for dsk in self.disks: dsk["size"] = int(float(node_lvs[dsk["vg"] + "/" + dsk["adopt"]][0])) + elif self.op.disk_template == constants.DT_BLOCK: + # Normalize and de-duplicate device paths + all_disks = set([os.path.abspath(i["adopt"]) for i in self.disks]) + if len(all_disks) != len(self.disks): + raise errors.OpPrereqError("Duplicate disk names given for adoption", + errors.ECODE_INVAL) + baddisks = [d for d in all_disks + if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)] + if baddisks: + raise errors.OpPrereqError("Device node(s) %s lie outside %s and" + " cannot be adopted" % + (", ".join(baddisks), + constants.ADOPTABLE_BLOCKDEV_ROOT), + errors.ECODE_INVAL) + + node_disks = self.rpc.call_bdev_sizes([pnode.name], + list(all_disks))[pnode.name] + node_disks.Raise("Cannot get block device information from node %s" % + pnode.name) + node_disks = node_disks.payload + delta = all_disks.difference(node_disks.keys()) + if delta: + raise errors.OpPrereqError("Missing block device(s): %s" % + utils.CommaJoin(delta), + errors.ECODE_INVAL) + for dsk in self.disks: + dsk["size"] = int(float(node_disks[dsk["adopt"]])) + _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams) _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant) @@ -7613,7 +7830,7 @@ class LUInstanceCreate(LogicalUnit): else: network_port = None - if constants.ENABLE_FILE_STORAGE: + if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE: # this is needed because os.path.join does not accept None arguments if self.op.file_storage_dir is None: string_file_storage_dir = "" @@ -7621,7 +7838,12 @@ class LUInstanceCreate(LogicalUnit): string_file_storage_dir = self.op.file_storage_dir # build the full file storage dir path - file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(), + if self.op.disk_template == constants.DT_SHARED_FILE: + get_fsd_fn = self.cfg.GetSharedFileStorageDir + else: + get_fsd_fn = self.cfg.GetFileStorageDir + + file_storage_dir = utils.PathJoin(get_fsd_fn(), string_file_storage_dir, instance) else: file_storage_dir = "" @@ -7649,17 +7871,18 @@ class LUInstanceCreate(LogicalUnit): ) if self.adopt_disks: - # rename LVs to the newly-generated names; we need to construct - # 'fake' LV disks with the old data, plus the new unique_id - tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks] - rename_to = [] - for t_dsk, a_dsk in zip (tmp_disks, self.disks): - rename_to.append(t_dsk.logical_id) - t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"]) - self.cfg.SetDiskID(t_dsk, pnode_name) - result = self.rpc.call_blockdev_rename(pnode_name, - zip(tmp_disks, rename_to)) - result.Raise("Failed to rename adoped LVs") + if self.op.disk_template == constants.DT_PLAIN: + # rename LVs to the newly-generated names; we need to construct + # 'fake' LV disks with the old data, plus the new unique_id + tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks] + rename_to = [] + for t_dsk, a_dsk in zip (tmp_disks, self.disks): + rename_to.append(t_dsk.logical_id) + t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"]) + self.cfg.SetDiskID(t_dsk, pnode_name) + result = self.rpc.call_blockdev_rename(pnode_name, + zip(tmp_disks, rename_to)) + result.Raise("Failed to rename adoped LVs") else: feedback_fn("* creating instance disks...") try: @@ -7839,9 +8062,9 @@ class LUInstanceConsole(NoHooksLU): if instance.name not in node_insts.payload: if instance.admin_up: - state = "ERROR_down" + state = constants.INSTST_ERRORDOWN else: - state = "ADMIN_down" + state = constants.INSTST_ADMINDOWN raise errors.OpExecError("Instance %s is not running (state %s)" % (instance.name, state)) @@ -8767,9 +8990,10 @@ class LUInstanceGrowDisk(LogicalUnit): self.disk = instance.FindDisk(self.op.disk) - if instance.disk_template != constants.DT_FILE: - # TODO: check the free disk space for file, when that feature - # will be supported + if instance.disk_template not in (constants.DT_FILE, + constants.DT_SHARED_FILE): + # TODO: check the free disk space for file, when that feature will be + # supported _CheckNodesFreeDiskPerVG(self, nodenames, self.disk.ComputeGrowth(self.op.amount)) @@ -9510,7 +9734,8 @@ class LUInstanceSetParams(LogicalUnit): result.append(("disk/%d" % device_idx, "remove")) elif disk_op == constants.DDM_ADD: # add a new disk - if instance.disk_template == constants.DT_FILE: + if instance.disk_template in (constants.DT_FILE, + constants.DT_SHARED_FILE): file_driver, file_path = instance.disks[0].logical_id file_path = os.path.dirname(file_path) else: @@ -10216,7 +10441,6 @@ class LUGroupAssignNodes(NoHooksLU): class _GroupQuery(_QueryBase): - FIELDS = query.GROUP_FIELDS def ExpandNames(self, lu): @@ -10299,7 +10523,8 @@ class LUGroupQuery(NoHooksLU): REQ_BGL = False def CheckArguments(self): - self.gq = _GroupQuery(self.op.names, self.op.output_fields, False) + self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names), + self.op.output_fields, False) def ExpandNames(self): self.gq.ExpandNames(self) @@ -10447,7 +10672,7 @@ class LUGroupRename(LogicalUnit): def ExpandNames(self): # This raises errors.OpPrereqError on its own: - self.group_uuid = self.cfg.LookupNodeGroup(self.op.old_name) + self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) self.needed_locks = { locking.LEVEL_NODEGROUP: [self.group_uuid], @@ -10456,8 +10681,7 @@ class LUGroupRename(LogicalUnit): def CheckPrereq(self): """Check prerequisites. - This checks that the given old_name exists as a node group, and that - new_name doesn't. + Ensures requested new name is not yet used. """ try: @@ -10475,7 +10699,7 @@ class LUGroupRename(LogicalUnit): """ env = { - "OLD_NAME": self.op.old_name, + "OLD_NAME": self.op.group_name, "NEW_NAME": self.op.new_name, } @@ -10498,7 +10722,7 @@ class LUGroupRename(LogicalUnit): if group is None: raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" % - (self.op.old_name, self.group_uuid)) + (self.op.group_name, self.group_uuid)) group.name = self.op.new_name self.cfg.Update(group, feedback_fn) @@ -11055,8 +11279,7 @@ class IAllocator(object): "i_pri_up_memory": i_p_up_mem, } pnr_dyn.update(node_results[nname]) - - node_results[nname] = pnr_dyn + node_results[nname] = pnr_dyn return node_results @@ -11141,11 +11364,12 @@ class IAllocator(object): raise errors.ProgrammerError("Unknown instance '%s' passed to" " IAllocator" % self.name) - if instance.disk_template not in constants.DTS_NET_MIRROR: + if instance.disk_template not in constants.DTS_MIRRORED: raise errors.OpPrereqError("Can't relocate non-mirrored instances", errors.ECODE_INVAL) - if len(instance.secondary_nodes) != 1: + if instance.disk_template in constants.DTS_NET_MIRROR and \ + len(instance.secondary_nodes) != 1: raise errors.OpPrereqError("Instance has not exactly one secondary node", errors.ECODE_STATE)