X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/f6d9a52295f91fd8ce163603f90cb86f50272f82..d4f4b3e763cf53d4718521d22c3651cbb3f349a1:/lib/cmdlib.py diff --git a/lib/cmdlib.py b/lib/cmdlib.py index 06fc49f..46944d9 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -136,7 +136,7 @@ class LogicalUnit(object): # Acquire all nodes and one instance self.needed_locks = { locking.LEVEL_NODE: locking.ALL_SET, - locking.LEVEL_INSTANCES: ['instance1.example.tld'], + locking.LEVEL_INSTANCE: ['instance1.example.tld'], } # Acquire just two nodes self.needed_locks = { @@ -298,7 +298,11 @@ class LogicalUnit(object): wanted_nodes.append(instance.primary_node) if not primary_only: wanted_nodes.extend(instance.secondary_nodes) - self.needed_locks[locking.LEVEL_NODE] = wanted_nodes + + if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE: + self.needed_locks[locking.LEVEL_NODE] = wanted_nodes + elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND: + self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes) del self.recalculate_locks[locking.LEVEL_NODE] @@ -494,6 +498,14 @@ class LUVerifyCluster(LogicalUnit): HPATH = "cluster-verify" HTYPE = constants.HTYPE_CLUSTER _OP_REQP = ["skip_checks"] + REQ_BGL = False + + def ExpandNames(self): + self.needed_locks = { + locking.LEVEL_NODE: locking.ALL_SET, + locking.LEVEL_INSTANCE: locking.ALL_SET, + } + self.share_locks = dict(((i, 1) for i in locking.LEVELS)) def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result, remote_version, feedback_fn): @@ -908,6 +920,14 @@ class LUVerifyDisks(NoHooksLU): """ _OP_REQP = [] + REQ_BGL = False + + def ExpandNames(self): + self.needed_locks = { + locking.LEVEL_NODE: locking.ALL_SET, + locking.LEVEL_INSTANCE: locking.ALL_SET, + } + self.share_locks = dict(((i, 1) for i in locking.LEVELS)) def CheckPrereq(self): """Check prerequisites. @@ -1385,36 +1405,48 @@ class LUQueryNodes(NoHooksLU): "ctotal", ]) - _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt", - "pinst_list", "sinst_list", - "pip", "sip", "tags"], + self.static_fields = frozenset([ + "name", "pinst_cnt", "sinst_cnt", + "pinst_list", "sinst_list", + "pip", "sip", "tags", + ]) + + _CheckOutputFields(static=self.static_fields, dynamic=self.dynamic_fields, selected=self.op.output_fields) self.needed_locks = {} self.share_locks[locking.LEVEL_NODE] = 1 - # TODO: we could lock nodes only if the user asked for dynamic fields. For - # that we need atomic ways to get info for a group of nodes from the - # config, though. - if not self.op.names: - self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET + + if self.op.names: + self.wanted = _GetWantedNodes(self, self.op.names) else: - self.needed_locks[locking.LEVEL_NODE] = \ - _GetWantedNodes(self, self.op.names) + self.wanted = locking.ALL_SET + + self.do_locking = not self.static_fields.issuperset(self.op.output_fields) + if self.do_locking: + # if we don't request only static fields, we need to lock the nodes + self.needed_locks[locking.LEVEL_NODE] = self.wanted + def CheckPrereq(self): """Check prerequisites. """ - # This of course is valid only if we locked the nodes - self.wanted = self.acquired_locks[locking.LEVEL_NODE] + # The validation of the node list is done in the _GetWantedNodes, + # if non empty, and if empty, there's no validation to do + pass def Exec(self, feedback_fn): """Computes the list of nodes and their attributes. """ - nodenames = self.wanted - nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames] + all_info = self.cfg.GetAllNodesInfo() + if self.do_locking: + nodenames = self.acquired_locks[locking.LEVEL_NODE] + else: + nodenames = all_info.keys() + nodelist = [all_info[name] for name in nodenames] # begin data gathering @@ -1832,6 +1864,16 @@ class LUActivateInstanceDisks(NoHooksLU): """ _OP_REQP = ["instance_name"] + REQ_BGL = False + + def ExpandNames(self): + self._ExpandAndLockInstance() + self.needed_locks[locking.LEVEL_NODE] = [] + self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE + + def DeclareLocks(self, level): + if level == locking.LEVEL_NODE: + self._LockInstancesNodes() def CheckPrereq(self): """Check prerequisites. @@ -1839,13 +1881,9 @@ class LUActivateInstanceDisks(NoHooksLU): This checks that the instance is in the cluster. """ - instance = self.cfg.GetInstanceInfo( - self.cfg.ExpandInstanceName(self.op.instance_name)) - if instance is None: - raise errors.OpPrereqError("Instance '%s' not known" % - self.op.instance_name) - self.instance = instance - + self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) + assert self.instance is not None, \ + "Cannot retrieve locked instance %s" % self.op.instance_name def Exec(self, feedback_fn): """Activate the disks. @@ -1939,6 +1977,16 @@ class LUDeactivateInstanceDisks(NoHooksLU): """ _OP_REQP = ["instance_name"] + REQ_BGL = False + + def ExpandNames(self): + self._ExpandAndLockInstance() + self.needed_locks[locking.LEVEL_NODE] = [] + self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE + + def DeclareLocks(self, level): + if level == locking.LEVEL_NODE: + self._LockInstancesNodes() def CheckPrereq(self): """Check prerequisites. @@ -1946,29 +1994,36 @@ class LUDeactivateInstanceDisks(NoHooksLU): This checks that the instance is in the cluster. """ - instance = self.cfg.GetInstanceInfo( - self.cfg.ExpandInstanceName(self.op.instance_name)) - if instance is None: - raise errors.OpPrereqError("Instance '%s' not known" % - self.op.instance_name) - self.instance = instance + self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) + assert self.instance is not None, \ + "Cannot retrieve locked instance %s" % self.op.instance_name def Exec(self, feedback_fn): """Deactivate the disks """ instance = self.instance - ins_l = rpc.call_instance_list([instance.primary_node]) - ins_l = ins_l[instance.primary_node] - if not type(ins_l) is list: - raise errors.OpExecError("Can't contact node '%s'" % - instance.primary_node) + _SafeShutdownInstanceDisks(instance, self.cfg) - if self.instance.name in ins_l: - raise errors.OpExecError("Instance is running, can't shutdown" - " block devices.") - _ShutdownInstanceDisks(instance, self.cfg) +def _SafeShutdownInstanceDisks(instance, cfg): + """Shutdown block devices of an instance. + + This function checks if an instance is running, before calling + _ShutdownInstanceDisks. + + """ + ins_l = rpc.call_instance_list([instance.primary_node]) + ins_l = ins_l[instance.primary_node] + if not type(ins_l) is list: + raise errors.OpExecError("Can't contact node '%s'" % + instance.primary_node) + + if instance.name in ins_l: + raise errors.OpExecError("Instance is running, can't shutdown" + " block devices.") + + _ShutdownInstanceDisks(instance, cfg) def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False): @@ -2486,15 +2541,18 @@ class LUQueryInstances(NoHooksLU): def ExpandNames(self): self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"]) - _CheckOutputFields(static=["name", "os", "pnode", "snodes", - "admin_state", "admin_ram", - "disk_template", "ip", "mac", "bridge", - "sda_size", "sdb_size", "vcpus", "tags", - "auto_balance", - "network_port", "kernel_path", "initrd_path", - "hvm_boot_order", "hvm_acpi", "hvm_pae", - "hvm_cdrom_image_path", "hvm_nic_type", - "hvm_disk_type", "vnc_bind_address"], + self.static_fields = frozenset([ + "name", "os", "pnode", "snodes", + "admin_state", "admin_ram", + "disk_template", "ip", "mac", "bridge", + "sda_size", "sdb_size", "vcpus", "tags", + "auto_balance", + "network_port", "kernel_path", "initrd_path", + "hvm_boot_order", "hvm_acpi", "hvm_pae", + "hvm_cdrom_image_path", "hvm_nic_type", + "hvm_disk_type", "vnc_bind_address", + ]) + _CheckOutputFields(static=self.static_fields, dynamic=self.dynamic_fields, selected=self.op.output_fields) @@ -2502,37 +2560,37 @@ class LUQueryInstances(NoHooksLU): self.share_locks[locking.LEVEL_INSTANCE] = 1 self.share_locks[locking.LEVEL_NODE] = 1 - # TODO: we could lock instances (and nodes) only if the user asked for - # dynamic fields. For that we need atomic ways to get info for a group of - # instances from the config, though. - if not self.op.names: - self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET + if self.op.names: + self.wanted = _GetWantedInstances(self, self.op.names) else: - self.needed_locks[locking.LEVEL_INSTANCE] = \ - _GetWantedInstances(self, self.op.names) + self.wanted = locking.ALL_SET - self.needed_locks[locking.LEVEL_NODE] = [] - self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE + self.do_locking = not self.static_fields.issuperset(self.op.output_fields) + if self.do_locking: + self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted + self.needed_locks[locking.LEVEL_NODE] = [] + self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE def DeclareLocks(self, level): - # TODO: locking of nodes could be avoided when not querying them - if level == locking.LEVEL_NODE: + if level == locking.LEVEL_NODE and self.do_locking: self._LockInstancesNodes() def CheckPrereq(self): """Check prerequisites. """ - # This of course is valid only if we locked the instances - self.wanted = self.acquired_locks[locking.LEVEL_INSTANCE] + pass def Exec(self, feedback_fn): """Computes the list of nodes and their attributes. """ - instance_names = self.wanted - instance_list = [self.cfg.GetInstanceInfo(iname) for iname - in instance_names] + all_info = self.cfg.GetAllInstancesInfo() + if self.do_locking: + instance_names = self.acquired_locks[locking.LEVEL_INSTANCE] + else: + instance_names = all_info.keys() + instance_list = [all_info[iname] for iname in instance_names] # begin data gathering @@ -3496,6 +3554,38 @@ class LUReplaceDisks(LogicalUnit): HPATH = "mirrors-replace" HTYPE = constants.HTYPE_INSTANCE _OP_REQP = ["instance_name", "mode", "disks"] + REQ_BGL = False + + def ExpandNames(self): + self._ExpandAndLockInstance() + + if not hasattr(self.op, "remote_node"): + self.op.remote_node = None + + ia_name = getattr(self.op, "iallocator", None) + if ia_name is not None: + if self.op.remote_node is not None: + raise errors.OpPrereqError("Give either the iallocator or the new" + " secondary, not both") + self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET + elif self.op.remote_node is not None: + remote_node = self.cfg.ExpandNodeName(self.op.remote_node) + if remote_node is None: + raise errors.OpPrereqError("Node '%s' not known" % + self.op.remote_node) + self.op.remote_node = remote_node + self.needed_locks[locking.LEVEL_NODE] = [remote_node] + self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND + else: + self.needed_locks[locking.LEVEL_NODE] = [] + self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE + + def DeclareLocks(self, level): + # If we're not already locking all nodes in the set we have to declare the + # instance's primary/secondary nodes. + if (level == locking.LEVEL_NODE and + self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET): + self._LockInstancesNodes() def _RunAllocator(self): """Compute a new secondary node using an IAllocator. @@ -3546,16 +3636,10 @@ class LUReplaceDisks(LogicalUnit): This checks that the instance is in the cluster. """ - if not hasattr(self.op, "remote_node"): - self.op.remote_node = None - - instance = self.cfg.GetInstanceInfo( - self.cfg.ExpandInstanceName(self.op.instance_name)) - if instance is None: - raise errors.OpPrereqError("Instance '%s' not known" % - self.op.instance_name) + instance = self.cfg.GetInstanceInfo(self.op.instance_name) + assert instance is not None, \ + "Cannot retrieve locked instance %s" % self.op.instance_name self.instance = instance - self.op.instance_name = instance.name if instance.disk_template not in constants.DTS_NET_MIRROR: raise errors.OpPrereqError("Instance's disk layout is not" @@ -3570,18 +3654,13 @@ class LUReplaceDisks(LogicalUnit): ia_name = getattr(self.op, "iallocator", None) if ia_name is not None: - if self.op.remote_node is not None: - raise errors.OpPrereqError("Give either the iallocator or the new" - " secondary, not both") self._RunAllocator() remote_node = self.op.remote_node if remote_node is not None: - remote_node = self.cfg.ExpandNodeName(remote_node) - if remote_node is None: - raise errors.OpPrereqError("Node '%s' not known" % - self.op.remote_node) self.remote_node_info = self.cfg.GetNodeInfo(remote_node) + assert self.remote_node_info is not None, \ + "Cannot retrieve locked node %s" % remote_node else: self.remote_node_info = None if remote_node == instance.primary_node: @@ -3622,7 +3701,6 @@ class LUReplaceDisks(LogicalUnit): if instance.FindDisk(name) is None: raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" % (name, instance.name)) - self.op.remote_node = remote_node def _ExecD8DiskOnly(self, feedback_fn): """Replace a disk on the primary or secondary for dbrd8. @@ -3968,8 +4046,7 @@ class LUReplaceDisks(LogicalUnit): # Activate the instance disks if we're replacing them on a down instance if instance.status == "down": - op = opcodes.OpActivateInstanceDisks(instance_name=instance.name) - self.proc.ChainOpCode(op) + _StartInstanceDisks(self.cfg, instance, True) if instance.disk_template == constants.DT_DRBD8: if self.op.remote_node is None: @@ -3983,8 +4060,7 @@ class LUReplaceDisks(LogicalUnit): # Deactivate the instance disks if we're replacing them on a down instance if instance.status == "down": - op = opcodes.OpDeactivateInstanceDisks(instance_name=instance.name) - self.proc.ChainOpCode(op) + _SafeShutdownInstanceDisks(instance, self.cfg) return ret @@ -4084,6 +4160,33 @@ class LUQueryInstanceData(NoHooksLU): """ _OP_REQP = ["instances"] + REQ_BGL = False + def ExpandNames(self): + self.needed_locks = {} + self.share_locks = dict(((i, 1) for i in locking.LEVELS)) + + if not isinstance(self.op.instances, list): + raise errors.OpPrereqError("Invalid argument type 'instances'") + + if self.op.instances: + self.wanted_names = [] + for name in self.op.instances: + full_name = self.cfg.ExpandInstanceName(name) + if full_name is None: + raise errors.OpPrereqError("Instance '%s' not known" % + self.op.instance_name) + self.wanted_names.append(full_name) + self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names + else: + self.wanted_names = None + self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET + + self.needed_locks[locking.LEVEL_NODE] = [] + self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE + + def DeclareLocks(self, level): + if level == locking.LEVEL_NODE: + self._LockInstancesNodes() def CheckPrereq(self): """Check prerequisites. @@ -4091,21 +4194,12 @@ class LUQueryInstanceData(NoHooksLU): This only checks the optional instance list against the existing names. """ - if not isinstance(self.op.instances, list): - raise errors.OpPrereqError("Invalid argument type 'instances'") - if self.op.instances: - self.wanted_instances = [] - names = self.op.instances - for name in names: - instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name)) - if instance is None: - raise errors.OpPrereqError("No such instance name '%s'" % name) - self.wanted_instances.append(instance) - else: - self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name - in self.cfg.GetInstanceList()] - return + if self.wanted_names is None: + self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE] + self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name + in self.wanted_names] + return def _ComputeDiskStatus(self, instance, snode, dev): """Compute block device status.