Add GanetiLockManager.is_owned function
[ganeti-local] / lib / cmdlib.py
index 830cd07..46944d9 100644 (file)
@@ -126,9 +126,7 @@ class LogicalUnit(object):
       - Use an empty dict if you don't need any lock
       - If you don't need any lock at a particular level omit that level
       - Don't put anything for the BGL level
       - Use an empty dict if you don't need any lock
       - If you don't need any lock at a particular level omit that level
       - Don't put anything for the BGL level
-      - If you want all locks at a level use None as a value
-        (this reflects what LockSet does, and will be replaced before
-        CheckPrereq with the full list of nodes that have been locked)
+      - If you want all locks at a level use locking.ALL_SET as a value
 
     If you need to share locks (rather than acquire them exclusively) at one
     level you can modify self.share_locks, setting a true value (usually 1) for
 
     If you need to share locks (rather than acquire them exclusively) at one
     level you can modify self.share_locks, setting a true value (usually 1) for
@@ -137,8 +135,8 @@ class LogicalUnit(object):
     Examples:
     # Acquire all nodes and one instance
     self.needed_locks = {
     Examples:
     # Acquire all nodes and one instance
     self.needed_locks = {
-      locking.LEVEL_NODE: None,
-      locking.LEVEL_INSTANCES: ['instance1.example.tld'],
+      locking.LEVEL_NODE: locking.ALL_SET,
+      locking.LEVEL_INSTANCE: ['instance1.example.tld'],
     }
     # Acquire just two nodes
     self.needed_locks = {
     }
     # Acquire just two nodes
     self.needed_locks = {
@@ -263,7 +261,7 @@ class LogicalUnit(object):
     self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
     self.op.instance_name = expanded_name
 
     self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
     self.op.instance_name = expanded_name
 
-  def _LockInstancesNodes(self):
+  def _LockInstancesNodes(self, primary_only=False):
     """Helper function to declare instances' nodes for locking.
 
     This function should be called after locking one or more instances to lock
     """Helper function to declare instances' nodes for locking.
 
     This function should be called after locking one or more instances to lock
@@ -282,6 +280,9 @@ class LogicalUnit(object):
     if level == locking.LEVEL_NODE:
       self._LockInstancesNodes()
 
     if level == locking.LEVEL_NODE:
       self._LockInstancesNodes()
 
+    @type primary_only: boolean
+    @param primary_only: only lock primary nodes of locked instances
+
     """
     assert locking.LEVEL_NODE in self.recalculate_locks, \
       "_LockInstancesNodes helper function called with no nodes to recalculate"
     """
     assert locking.LEVEL_NODE in self.recalculate_locks, \
       "_LockInstancesNodes helper function called with no nodes to recalculate"
@@ -295,8 +296,13 @@ class LogicalUnit(object):
     for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
       instance = self.context.cfg.GetInstanceInfo(instance_name)
       wanted_nodes.append(instance.primary_node)
     for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
       instance = self.context.cfg.GetInstanceInfo(instance_name)
       wanted_nodes.append(instance.primary_node)
-      wanted_nodes.extend(instance.secondary_nodes)
-    self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
+      if not primary_only:
+        wanted_nodes.extend(instance.secondary_nodes)
+
+    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
+      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
+    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
+      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
 
     del self.recalculate_locks[locking.LEVEL_NODE]
 
 
     del self.recalculate_locks[locking.LEVEL_NODE]
 
@@ -322,17 +328,17 @@ def _GetWantedNodes(lu, nodes):
   if not isinstance(nodes, list):
     raise errors.OpPrereqError("Invalid argument type 'nodes'")
 
   if not isinstance(nodes, list):
     raise errors.OpPrereqError("Invalid argument type 'nodes'")
 
-  if nodes:
-    wanted = []
+  if not nodes:
+    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
+      " non-empty list of nodes whose name is to be expanded.")
 
 
-    for name in nodes:
-      node = lu.cfg.ExpandNodeName(name)
-      if node is None:
-        raise errors.OpPrereqError("No such node name '%s'" % name)
-      wanted.append(node)
+  wanted = []
+  for name in nodes:
+    node = lu.cfg.ExpandNodeName(name)
+    if node is None:
+      raise errors.OpPrereqError("No such node name '%s'" % name)
+    wanted.append(node)
 
 
-  else:
-    wanted = lu.cfg.GetNodeList()
   return utils.NiceSort(wanted)
 
 
   return utils.NiceSort(wanted)
 
 
@@ -492,6 +498,14 @@ class LUVerifyCluster(LogicalUnit):
   HPATH = "cluster-verify"
   HTYPE = constants.HTYPE_CLUSTER
   _OP_REQP = ["skip_checks"]
   HPATH = "cluster-verify"
   HTYPE = constants.HTYPE_CLUSTER
   _OP_REQP = ["skip_checks"]
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.needed_locks = {
+      locking.LEVEL_NODE: locking.ALL_SET,
+      locking.LEVEL_INSTANCE: locking.ALL_SET,
+    }
+    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
 
   def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
                   remote_version, feedback_fn):
 
   def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
                   remote_version, feedback_fn):
@@ -906,6 +920,14 @@ class LUVerifyDisks(NoHooksLU):
 
   """
   _OP_REQP = []
 
   """
   _OP_REQP = []
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.needed_locks = {
+      locking.LEVEL_NODE: locking.ALL_SET,
+      locking.LEVEL_INSTANCE: locking.ALL_SET,
+    }
+    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
 
   def CheckPrereq(self):
     """Check prerequisites.
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -1218,13 +1240,9 @@ class LUDiagnoseOS(NoHooksLU):
 
   """
   _OP_REQP = ["output_fields", "names"]
 
   """
   _OP_REQP = ["output_fields", "names"]
+  REQ_BGL = False
 
 
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This always succeeds, since this is a pure query LU.
-
-    """
+  def ExpandNames(self):
     if self.op.names:
       raise errors.OpPrereqError("Selective OS query not supported")
 
     if self.op.names:
       raise errors.OpPrereqError("Selective OS query not supported")
 
@@ -1233,6 +1251,16 @@ class LUDiagnoseOS(NoHooksLU):
                        dynamic=self.dynamic_fields,
                        selected=self.op.output_fields)
 
                        dynamic=self.dynamic_fields,
                        selected=self.op.output_fields)
 
+    # Lock all nodes, in shared mode
+    self.needed_locks = {}
+    self.share_locks[locking.LEVEL_NODE] = 1
+    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    """
+
   @staticmethod
   def _DiagnoseByOS(node_list, rlist):
     """Remaps a per-node return list into an a per-os per-node dictionary
   @staticmethod
   def _DiagnoseByOS(node_list, rlist):
     """Remaps a per-node return list into an a per-os per-node dictionary
@@ -1268,7 +1296,7 @@ class LUDiagnoseOS(NoHooksLU):
     """Compute the list of OSes.
 
     """
     """Compute the list of OSes.
 
     """
-    node_list = self.cfg.GetNodeList()
+    node_list = self.acquired_locks[locking.LEVEL_NODE]
     node_data = rpc.call_os_diagnose(node_list)
     if node_data == False:
       raise errors.OpExecError("Can't gather the list of OSes")
     node_data = rpc.call_os_diagnose(node_list)
     if node_data == False:
       raise errors.OpExecError("Can't gather the list of OSes")
@@ -1377,36 +1405,48 @@ class LUQueryNodes(NoHooksLU):
       "ctotal",
       ])
 
       "ctotal",
       ])
 
-    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
-                               "pinst_list", "sinst_list",
-                               "pip", "sip", "tags"],
+    self.static_fields = frozenset([
+      "name", "pinst_cnt", "sinst_cnt",
+      "pinst_list", "sinst_list",
+      "pip", "sip", "tags",
+      ])
+
+    _CheckOutputFields(static=self.static_fields,
                        dynamic=self.dynamic_fields,
                        selected=self.op.output_fields)
 
     self.needed_locks = {}
     self.share_locks[locking.LEVEL_NODE] = 1
                        dynamic=self.dynamic_fields,
                        selected=self.op.output_fields)
 
     self.needed_locks = {}
     self.share_locks[locking.LEVEL_NODE] = 1
-    # TODO: we could lock nodes only if the user asked for dynamic fields. For
-    # that we need atomic ways to get info for a group of nodes from the
-    # config, though.
-    if not self.op.names:
-      self.needed_locks[locking.LEVEL_NODE] = None
+
+    if self.op.names:
+      self.wanted = _GetWantedNodes(self, self.op.names)
     else:
     else:
-      self.needed_locks[locking.LEVEL_NODE] = \
-        _GetWantedNodes(self, self.op.names)
+      self.wanted = locking.ALL_SET
+
+    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
+    if self.do_locking:
+      # if we don't request only static fields, we need to lock the nodes
+      self.needed_locks[locking.LEVEL_NODE] = self.wanted
+
 
   def CheckPrereq(self):
     """Check prerequisites.
 
     """
 
   def CheckPrereq(self):
     """Check prerequisites.
 
     """
-    # This of course is valid only if we locked the nodes
-    self.wanted = self.acquired_locks[locking.LEVEL_NODE]
+    # The validation of the node list is done in the _GetWantedNodes,
+    # if non empty, and if empty, there's no validation to do
+    pass
 
   def Exec(self, feedback_fn):
     """Computes the list of nodes and their attributes.
 
     """
 
   def Exec(self, feedback_fn):
     """Computes the list of nodes and their attributes.
 
     """
-    nodenames = self.wanted
-    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
+    all_info = self.cfg.GetAllNodesInfo()
+    if self.do_locking:
+      nodenames = self.acquired_locks[locking.LEVEL_NODE]
+    else:
+      nodenames = all_info.keys()
+    nodelist = [all_info[name] for name in nodenames]
 
     # begin data gathering
 
 
     # begin data gathering
 
@@ -1483,6 +1523,20 @@ class LUQueryNodeVolumes(NoHooksLU):
 
   """
   _OP_REQP = ["nodes", "output_fields"]
 
   """
   _OP_REQP = ["nodes", "output_fields"]
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    _CheckOutputFields(static=["node"],
+                       dynamic=["phys", "vg", "name", "size", "instance"],
+                       selected=self.op.output_fields)
+
+    self.needed_locks = {}
+    self.share_locks[locking.LEVEL_NODE] = 1
+    if not self.op.nodes:
+      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+    else:
+      self.needed_locks[locking.LEVEL_NODE] = \
+        _GetWantedNodes(self, self.op.nodes)
 
   def CheckPrereq(self):
     """Check prerequisites.
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -1490,12 +1544,7 @@ class LUQueryNodeVolumes(NoHooksLU):
     This checks that the fields required are valid output fields.
 
     """
     This checks that the fields required are valid output fields.
 
     """
-    self.nodes = _GetWantedNodes(self, self.op.nodes)
-
-    _CheckOutputFields(static=["node"],
-                       dynamic=["phys", "vg", "name", "size", "instance"],
-                       selected=self.op.output_fields)
-
+    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
 
   def Exec(self, feedback_fn):
     """Computes the list of nodes and their attributes.
 
   def Exec(self, feedback_fn):
     """Computes the list of nodes and their attributes.
@@ -1815,6 +1864,16 @@ class LUActivateInstanceDisks(NoHooksLU):
 
   """
   _OP_REQP = ["instance_name"]
 
   """
   _OP_REQP = ["instance_name"]
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self._ExpandAndLockInstance()
+    self.needed_locks[locking.LEVEL_NODE] = []
+    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_NODE:
+      self._LockInstancesNodes()
 
   def CheckPrereq(self):
     """Check prerequisites.
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -1822,13 +1881,9 @@ class LUActivateInstanceDisks(NoHooksLU):
     This checks that the instance is in the cluster.
 
     """
     This checks that the instance is in the cluster.
 
     """
-    instance = self.cfg.GetInstanceInfo(
-      self.cfg.ExpandInstanceName(self.op.instance_name))
-    if instance is None:
-      raise errors.OpPrereqError("Instance '%s' not known" %
-                                 self.op.instance_name)
-    self.instance = instance
-
+    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+    assert self.instance is not None, \
+      "Cannot retrieve locked instance %s" % self.op.instance_name
 
   def Exec(self, feedback_fn):
     """Activate the disks.
 
   def Exec(self, feedback_fn):
     """Activate the disks.
@@ -1922,6 +1977,16 @@ class LUDeactivateInstanceDisks(NoHooksLU):
 
   """
   _OP_REQP = ["instance_name"]
 
   """
   _OP_REQP = ["instance_name"]
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self._ExpandAndLockInstance()
+    self.needed_locks[locking.LEVEL_NODE] = []
+    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_NODE:
+      self._LockInstancesNodes()
 
   def CheckPrereq(self):
     """Check prerequisites.
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -1929,29 +1994,36 @@ class LUDeactivateInstanceDisks(NoHooksLU):
     This checks that the instance is in the cluster.
 
     """
     This checks that the instance is in the cluster.
 
     """
-    instance = self.cfg.GetInstanceInfo(
-      self.cfg.ExpandInstanceName(self.op.instance_name))
-    if instance is None:
-      raise errors.OpPrereqError("Instance '%s' not known" %
-                                 self.op.instance_name)
-    self.instance = instance
+    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+    assert self.instance is not None, \
+      "Cannot retrieve locked instance %s" % self.op.instance_name
 
   def Exec(self, feedback_fn):
     """Deactivate the disks
 
     """
     instance = self.instance
 
   def Exec(self, feedback_fn):
     """Deactivate the disks
 
     """
     instance = self.instance
-    ins_l = rpc.call_instance_list([instance.primary_node])
-    ins_l = ins_l[instance.primary_node]
-    if not type(ins_l) is list:
-      raise errors.OpExecError("Can't contact node '%s'" %
-                               instance.primary_node)
+    _SafeShutdownInstanceDisks(instance, self.cfg)
 
 
-    if self.instance.name in ins_l:
-      raise errors.OpExecError("Instance is running, can't shutdown"
-                               " block devices.")
 
 
-    _ShutdownInstanceDisks(instance, self.cfg)
+def _SafeShutdownInstanceDisks(instance, cfg):
+  """Shutdown block devices of an instance.
+
+  This function checks if an instance is running, before calling
+  _ShutdownInstanceDisks.
+
+  """
+  ins_l = rpc.call_instance_list([instance.primary_node])
+  ins_l = ins_l[instance.primary_node]
+  if not type(ins_l) is list:
+    raise errors.OpExecError("Can't contact node '%s'" %
+                             instance.primary_node)
+
+  if instance.name in ins_l:
+    raise errors.OpExecError("Instance is running, can't shutdown"
+                             " block devices.")
+
+  _ShutdownInstanceDisks(instance, cfg)
 
 
 def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
 
 
 def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
@@ -2017,7 +2089,7 @@ class LUStartupInstance(LogicalUnit):
   def ExpandNames(self):
     self._ExpandAndLockInstance()
     self.needed_locks[locking.LEVEL_NODE] = []
   def ExpandNames(self):
     self._ExpandAndLockInstance()
     self.needed_locks[locking.LEVEL_NODE] = []
-    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
+    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODE:
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODE:
@@ -2092,12 +2164,12 @@ class LURebootInstance(LogicalUnit):
                                    constants.INSTANCE_REBOOT_FULL))
     self._ExpandAndLockInstance()
     self.needed_locks[locking.LEVEL_NODE] = []
                                    constants.INSTANCE_REBOOT_FULL))
     self._ExpandAndLockInstance()
     self.needed_locks[locking.LEVEL_NODE] = []
-    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
+    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODE:
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODE:
-      # FIXME: lock only primary on (not constants.INSTANCE_REBOOT_FULL)
-      self._LockInstancesNodes()
+      primary_only = not constants.INSTANCE_REBOOT_FULL
+      self._LockInstancesNodes(primary_only=primary_only)
 
   def BuildHooksEnv(self):
     """Build hooks env.
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -2166,7 +2238,7 @@ class LUShutdownInstance(LogicalUnit):
   def ExpandNames(self):
     self._ExpandAndLockInstance()
     self.needed_locks[locking.LEVEL_NODE] = []
   def ExpandNames(self):
     self._ExpandAndLockInstance()
     self.needed_locks[locking.LEVEL_NODE] = []
-    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
+    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODE:
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODE:
@@ -2218,7 +2290,7 @@ class LUReinstallInstance(LogicalUnit):
   def ExpandNames(self):
     self._ExpandAndLockInstance()
     self.needed_locks[locking.LEVEL_NODE] = []
   def ExpandNames(self):
     self._ExpandAndLockInstance()
     self.needed_locks[locking.LEVEL_NODE] = []
-    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
+    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODE:
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODE:
@@ -2469,15 +2541,18 @@ class LUQueryInstances(NoHooksLU):
 
   def ExpandNames(self):
     self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
 
   def ExpandNames(self):
     self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
-    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
-                               "admin_state", "admin_ram",
-                               "disk_template", "ip", "mac", "bridge",
-                               "sda_size", "sdb_size", "vcpus", "tags",
-                               "auto_balance",
-                               "network_port", "kernel_path", "initrd_path",
-                               "hvm_boot_order", "hvm_acpi", "hvm_pae",
-                               "hvm_cdrom_image_path", "hvm_nic_type",
-                               "hvm_disk_type", "vnc_bind_address"],
+    self.static_fields = frozenset([
+      "name", "os", "pnode", "snodes",
+      "admin_state", "admin_ram",
+      "disk_template", "ip", "mac", "bridge",
+      "sda_size", "sdb_size", "vcpus", "tags",
+      "auto_balance",
+      "network_port", "kernel_path", "initrd_path",
+      "hvm_boot_order", "hvm_acpi", "hvm_pae",
+      "hvm_cdrom_image_path", "hvm_nic_type",
+      "hvm_disk_type", "vnc_bind_address",
+      ])
+    _CheckOutputFields(static=self.static_fields,
                        dynamic=self.dynamic_fields,
                        selected=self.op.output_fields)
 
                        dynamic=self.dynamic_fields,
                        selected=self.op.output_fields)
 
@@ -2485,37 +2560,37 @@ class LUQueryInstances(NoHooksLU):
     self.share_locks[locking.LEVEL_INSTANCE] = 1
     self.share_locks[locking.LEVEL_NODE] = 1
 
     self.share_locks[locking.LEVEL_INSTANCE] = 1
     self.share_locks[locking.LEVEL_NODE] = 1
 
-    # TODO: we could lock instances (and nodes) only if the user asked for
-    # dynamic fields. For that we need atomic ways to get info for a group of
-    # instances from the config, though.
-    if not self.op.names:
-      self.needed_locks[locking.LEVEL_INSTANCE] = None # Acquire all
+    if self.op.names:
+      self.wanted = _GetWantedInstances(self, self.op.names)
     else:
     else:
-      self.needed_locks[locking.LEVEL_INSTANCE] = \
-        _GetWantedInstances(self, self.op.names)
+      self.wanted = locking.ALL_SET
 
 
-    self.needed_locks[locking.LEVEL_NODE] = []
-    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
+    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
+    if self.do_locking:
+      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
+      self.needed_locks[locking.LEVEL_NODE] = []
+      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
   def DeclareLocks(self, level):
 
   def DeclareLocks(self, level):
-    # TODO: locking of nodes could be avoided when not querying them
-    if level == locking.LEVEL_NODE:
+    if level == locking.LEVEL_NODE and self.do_locking:
       self._LockInstancesNodes()
 
   def CheckPrereq(self):
     """Check prerequisites.
 
     """
       self._LockInstancesNodes()
 
   def CheckPrereq(self):
     """Check prerequisites.
 
     """
-    # This of course is valid only if we locked the instances
-    self.wanted = self.acquired_locks[locking.LEVEL_INSTANCE]
+    pass
 
   def Exec(self, feedback_fn):
     """Computes the list of nodes and their attributes.
 
     """
 
   def Exec(self, feedback_fn):
     """Computes the list of nodes and their attributes.
 
     """
-    instance_names = self.wanted
-    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
-                     in instance_names]
+    all_info = self.cfg.GetAllInstancesInfo()
+    if self.do_locking:
+      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
+    else:
+      instance_names = all_info.keys()
+    instance_list = [all_info[iname] for iname in instance_names]
 
     # begin data gathering
 
 
     # begin data gathering
 
@@ -2630,7 +2705,7 @@ class LUFailoverInstance(LogicalUnit):
   def ExpandNames(self):
     self._ExpandAndLockInstance()
     self.needed_locks[locking.LEVEL_NODE] = []
   def ExpandNames(self):
     self._ExpandAndLockInstance()
     self.needed_locks[locking.LEVEL_NODE] = []
-    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
+    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODE:
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODE:
@@ -3479,6 +3554,38 @@ class LUReplaceDisks(LogicalUnit):
   HPATH = "mirrors-replace"
   HTYPE = constants.HTYPE_INSTANCE
   _OP_REQP = ["instance_name", "mode", "disks"]
   HPATH = "mirrors-replace"
   HTYPE = constants.HTYPE_INSTANCE
   _OP_REQP = ["instance_name", "mode", "disks"]
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self._ExpandAndLockInstance()
+
+    if not hasattr(self.op, "remote_node"):
+      self.op.remote_node = None
+
+    ia_name = getattr(self.op, "iallocator", None)
+    if ia_name is not None:
+      if self.op.remote_node is not None:
+        raise errors.OpPrereqError("Give either the iallocator or the new"
+                                   " secondary, not both")
+      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+    elif self.op.remote_node is not None:
+      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
+      if remote_node is None:
+        raise errors.OpPrereqError("Node '%s' not known" %
+                                   self.op.remote_node)
+      self.op.remote_node = remote_node
+      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
+      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
+    else:
+      self.needed_locks[locking.LEVEL_NODE] = []
+      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+  def DeclareLocks(self, level):
+    # If we're not already locking all nodes in the set we have to declare the
+    # instance's primary/secondary nodes.
+    if (level == locking.LEVEL_NODE and
+        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
+      self._LockInstancesNodes()
 
   def _RunAllocator(self):
     """Compute a new secondary node using an IAllocator.
 
   def _RunAllocator(self):
     """Compute a new secondary node using an IAllocator.
@@ -3529,16 +3636,10 @@ class LUReplaceDisks(LogicalUnit):
     This checks that the instance is in the cluster.
 
     """
     This checks that the instance is in the cluster.
 
     """
-    if not hasattr(self.op, "remote_node"):
-      self.op.remote_node = None
-
-    instance = self.cfg.GetInstanceInfo(
-      self.cfg.ExpandInstanceName(self.op.instance_name))
-    if instance is None:
-      raise errors.OpPrereqError("Instance '%s' not known" %
-                                 self.op.instance_name)
+    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+    assert instance is not None, \
+      "Cannot retrieve locked instance %s" % self.op.instance_name
     self.instance = instance
     self.instance = instance
-    self.op.instance_name = instance.name
 
     if instance.disk_template not in constants.DTS_NET_MIRROR:
       raise errors.OpPrereqError("Instance's disk layout is not"
 
     if instance.disk_template not in constants.DTS_NET_MIRROR:
       raise errors.OpPrereqError("Instance's disk layout is not"
@@ -3553,18 +3654,13 @@ class LUReplaceDisks(LogicalUnit):
 
     ia_name = getattr(self.op, "iallocator", None)
     if ia_name is not None:
 
     ia_name = getattr(self.op, "iallocator", None)
     if ia_name is not None:
-      if self.op.remote_node is not None:
-        raise errors.OpPrereqError("Give either the iallocator or the new"
-                                   " secondary, not both")
-      self.op.remote_node = self._RunAllocator()
+      self._RunAllocator()
 
     remote_node = self.op.remote_node
     if remote_node is not None:
 
     remote_node = self.op.remote_node
     if remote_node is not None:
-      remote_node = self.cfg.ExpandNodeName(remote_node)
-      if remote_node is None:
-        raise errors.OpPrereqError("Node '%s' not known" %
-                                   self.op.remote_node)
       self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
       self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
+      assert self.remote_node_info is not None, \
+        "Cannot retrieve locked node %s" % remote_node
     else:
       self.remote_node_info = None
     if remote_node == instance.primary_node:
     else:
       self.remote_node_info = None
     if remote_node == instance.primary_node:
@@ -3605,7 +3701,6 @@ class LUReplaceDisks(LogicalUnit):
       if instance.FindDisk(name) is None:
         raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
                                    (name, instance.name))
       if instance.FindDisk(name) is None:
         raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
                                    (name, instance.name))
-    self.op.remote_node = remote_node
 
   def _ExecD8DiskOnly(self, feedback_fn):
     """Replace a disk on the primary or secondary for dbrd8.
 
   def _ExecD8DiskOnly(self, feedback_fn):
     """Replace a disk on the primary or secondary for dbrd8.
@@ -3951,8 +4046,7 @@ class LUReplaceDisks(LogicalUnit):
 
     # Activate the instance disks if we're replacing them on a down instance
     if instance.status == "down":
 
     # Activate the instance disks if we're replacing them on a down instance
     if instance.status == "down":
-      op = opcodes.OpActivateInstanceDisks(instance_name=instance.name)
-      self.proc.ChainOpCode(op)
+      _StartInstanceDisks(self.cfg, instance, True)
 
     if instance.disk_template == constants.DT_DRBD8:
       if self.op.remote_node is None:
 
     if instance.disk_template == constants.DT_DRBD8:
       if self.op.remote_node is None:
@@ -3966,8 +4060,7 @@ class LUReplaceDisks(LogicalUnit):
 
     # Deactivate the instance disks if we're replacing them on a down instance
     if instance.status == "down":
 
     # Deactivate the instance disks if we're replacing them on a down instance
     if instance.status == "down":
-      op = opcodes.OpDeactivateInstanceDisks(instance_name=instance.name)
-      self.proc.ChainOpCode(op)
+      _SafeShutdownInstanceDisks(instance, self.cfg)
 
     return ret
 
 
     return ret
 
@@ -3979,6 +4072,16 @@ class LUGrowDisk(LogicalUnit):
   HPATH = "disk-grow"
   HTYPE = constants.HTYPE_INSTANCE
   _OP_REQP = ["instance_name", "disk", "amount"]
   HPATH = "disk-grow"
   HTYPE = constants.HTYPE_INSTANCE
   _OP_REQP = ["instance_name", "disk", "amount"]
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self._ExpandAndLockInstance()
+    self.needed_locks[locking.LEVEL_NODE] = []
+    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_NODE:
+      self._LockInstancesNodes()
 
   def BuildHooksEnv(self):
     """Build hooks env.
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -4003,13 +4106,11 @@ class LUGrowDisk(LogicalUnit):
     This checks that the instance is in the cluster.
 
     """
     This checks that the instance is in the cluster.
 
     """
-    instance = self.cfg.GetInstanceInfo(
-      self.cfg.ExpandInstanceName(self.op.instance_name))
-    if instance is None:
-      raise errors.OpPrereqError("Instance '%s' not known" %
-                                 self.op.instance_name)
+    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+    assert instance is not None, \
+      "Cannot retrieve locked instance %s" % self.op.instance_name
+
     self.instance = instance
     self.instance = instance
-    self.op.instance_name = instance.name
 
     if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
       raise errors.OpPrereqError("Instance's disk layout does not support"
 
     if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
       raise errors.OpPrereqError("Instance's disk layout does not support"
@@ -4044,7 +4145,7 @@ class LUGrowDisk(LogicalUnit):
     for node in (instance.secondary_nodes + (instance.primary_node,)):
       self.cfg.SetDiskID(disk, node)
       result = rpc.call_blockdev_grow(node, disk, self.op.amount)
     for node in (instance.secondary_nodes + (instance.primary_node,)):
       self.cfg.SetDiskID(disk, node)
       result = rpc.call_blockdev_grow(node, disk, self.op.amount)
-      if not result or not isinstance(result, tuple) or len(result) != 2:
+      if not result or not isinstance(result, (list, tuple)) or len(result) != 2:
         raise errors.OpExecError("grow request failed to node %s" % node)
       elif not result[0]:
         raise errors.OpExecError("grow request failed to node %s: %s" %
         raise errors.OpExecError("grow request failed to node %s" % node)
       elif not result[0]:
         raise errors.OpExecError("grow request failed to node %s: %s" %
@@ -4059,6 +4160,33 @@ class LUQueryInstanceData(NoHooksLU):
 
   """
   _OP_REQP = ["instances"]
 
   """
   _OP_REQP = ["instances"]
+  REQ_BGL = False
+  def ExpandNames(self):
+    self.needed_locks = {}
+    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
+
+    if not isinstance(self.op.instances, list):
+      raise errors.OpPrereqError("Invalid argument type 'instances'")
+
+    if self.op.instances:
+      self.wanted_names = []
+      for name in self.op.instances:
+        full_name = self.cfg.ExpandInstanceName(name)
+        if full_name is None:
+          raise errors.OpPrereqError("Instance '%s' not known" %
+                                     self.op.instance_name)
+        self.wanted_names.append(full_name)
+      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
+    else:
+      self.wanted_names = None
+      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
+
+    self.needed_locks[locking.LEVEL_NODE] = []
+    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_NODE:
+      self._LockInstancesNodes()
 
   def CheckPrereq(self):
     """Check prerequisites.
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -4066,21 +4194,12 @@ class LUQueryInstanceData(NoHooksLU):
     This only checks the optional instance list against the existing names.
 
     """
     This only checks the optional instance list against the existing names.
 
     """
-    if not isinstance(self.op.instances, list):
-      raise errors.OpPrereqError("Invalid argument type 'instances'")
-    if self.op.instances:
-      self.wanted_instances = []
-      names = self.op.instances
-      for name in names:
-        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
-        if instance is None:
-          raise errors.OpPrereqError("No such instance name '%s'" % name)
-        self.wanted_instances.append(instance)
-    else:
-      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
-                               in self.cfg.GetInstanceList()]
-    return
+    if self.wanted_names is None:
+      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
 
 
+    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
+                             in self.wanted_names]
+    return
 
   def _ComputeDiskStatus(self, instance, snode, dev):
     """Compute block device status.
 
   def _ComputeDiskStatus(self, instance, snode, dev):
     """Compute block device status.
@@ -4457,13 +4576,23 @@ class LUQueryExports(NoHooksLU):
   """Query the exports list
 
   """
   """Query the exports list
 
   """
-  _OP_REQP = []
+  _OP_REQP = ['nodes']
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.needed_locks = {}
+    self.share_locks[locking.LEVEL_NODE] = 1
+    if not self.op.nodes:
+      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+    else:
+      self.needed_locks[locking.LEVEL_NODE] = \
+        _GetWantedNodes(self, self.op.nodes)
 
   def CheckPrereq(self):
 
   def CheckPrereq(self):
-    """Check that the nodelist contains only existing nodes.
+    """Check prerequisites.
 
     """
 
     """
-    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
+    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
 
   def Exec(self, feedback_fn):
     """Compute the list of all the exported system images.
 
   def Exec(self, feedback_fn):
     """Compute the list of all the exported system images.
@@ -4484,6 +4613,23 @@ class LUExportInstance(LogicalUnit):
   HPATH = "instance-export"
   HTYPE = constants.HTYPE_INSTANCE
   _OP_REQP = ["instance_name", "target_node", "shutdown"]
   HPATH = "instance-export"
   HTYPE = constants.HTYPE_INSTANCE
   _OP_REQP = ["instance_name", "target_node", "shutdown"]
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self._ExpandAndLockInstance()
+    # FIXME: lock only instance primary and destination node
+    #
+    # Sad but true, for now we have do lock all nodes, as we don't know where
+    # the previous export might be, and and in this LU we search for it and
+    # remove it from its current node. In the future we could fix this by:
+    #  - making a tasklet to search (share-lock all), then create the new one,
+    #    then one to remove, after
+    #  - removing the removal operation altoghether
+    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+
+  def DeclareLocks(self, level):
+    """Last minute lock declaration."""
+    # All nodes are locked anyway, so nothing to do here.
 
   def BuildHooksEnv(self):
     """Build hooks env.
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -4506,20 +4652,16 @@ class LUExportInstance(LogicalUnit):
     This checks that the instance and node names are valid.
 
     """
     This checks that the instance and node names are valid.
 
     """
-    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
+    instance_name = self.op.instance_name
     self.instance = self.cfg.GetInstanceInfo(instance_name)
     self.instance = self.cfg.GetInstanceInfo(instance_name)
-    if self.instance is None:
-      raise errors.OpPrereqError("Instance '%s' not found" %
-                                 self.op.instance_name)
+    assert self.instance is not None, \
+          "Cannot retrieve locked instance %s" % self.op.instance_name
 
 
-    # node verification
-    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
-    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
+    self.dst_node = self.cfg.GetNodeInfo(
+      self.cfg.ExpandNodeName(self.op.target_node))
 
 
-    if self.dst_node is None:
-      raise errors.OpPrereqError("Destination node '%s' is unknown." %
-                                 self.op.target_node)
-    self.op.target_node = self.dst_node.name
+    assert self.dst_node is not None, \
+          "Cannot retrieve locked node %s" % self.op.target_node
 
     # instance disk type verification
     for disk in self.instance.disks:
 
     # instance disk type verification
     for disk in self.instance.disks:
@@ -5061,7 +5203,7 @@ class IAllocator(object):
 
     result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
 
 
     result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
 
-    if not isinstance(result, tuple) or len(result) != 4:
+    if not isinstance(result, (list, tuple)) or len(result) != 4:
       raise errors.OpExecError("Invalid result from master iallocator runner")
 
     rcode, stdout, stderr, fail = result
       raise errors.OpExecError("Invalid result from master iallocator runner")
 
     rcode, stdout, stderr, fail = result