cmdlib: Add function to list all instances on node
[ganeti-local] / lib / cmdlib.py
index 23d4ae1..bd2f5a5 100644 (file)
@@ -711,30 +711,32 @@ def _CheckInstanceBridgesExist(lu, instance, node=None):
   _CheckNicsBridgesExist(lu, instance.nics, node)
 
 
   _CheckNicsBridgesExist(lu, instance.nics, node)
 
 
-def _GetNodePrimaryInstances(cfg, node_name):
-  """Returns primary instances on a node.
+def _GetNodeInstancesInner(cfg, fn):
+  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
+
+
+def _GetNodeInstances(cfg, node_name):
+  """Returns a list of all primary and secondary instances on a node.
 
   """
 
   """
-  instances = []
 
 
-  for (_, inst) in cfg.GetAllInstancesInfo().iteritems():
-    if node_name == inst.primary_node:
-      instances.append(inst)
+  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
+
+
+def _GetNodePrimaryInstances(cfg, node_name):
+  """Returns primary instances on a node.
 
 
-  return instances
+  """
+  return _GetNodeInstancesInner(cfg,
+                                lambda inst: node_name == inst.primary_node)
 
 
 def _GetNodeSecondaryInstances(cfg, node_name):
   """Returns secondary instances on a node.
 
   """
 
 
 def _GetNodeSecondaryInstances(cfg, node_name):
   """Returns secondary instances on a node.
 
   """
-  instances = []
-
-  for (_, inst) in cfg.GetAllInstancesInfo().iteritems():
-    if node_name in inst.secondary_nodes:
-      instances.append(inst)
-
-  return instances
+  return _GetNodeInstancesInner(cfg,
+                                lambda inst: node_name in inst.secondary_nodes)
 
 
 def _GetStorageTypeArgs(cfg, storage_type):
 
 
 def _GetStorageTypeArgs(cfg, storage_type):
@@ -749,6 +751,52 @@ def _GetStorageTypeArgs(cfg, storage_type):
   return []
 
 
   return []
 
 
+def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
+  faulty = []
+
+  for dev in instance.disks:
+    cfg.SetDiskID(dev, node_name)
+
+  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
+  result.Raise("Failed to get disk status from node %s" % node_name,
+               prereq=prereq)
+
+  for idx, bdev_status in enumerate(result.payload):
+    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
+      faulty.append(idx)
+
+  return faulty
+
+
+class LUPostInitCluster(LogicalUnit):
+  """Logical unit for running hooks after cluster initialization.
+
+  """
+  HPATH = "cluster-init"
+  HTYPE = constants.HTYPE_CLUSTER
+  _OP_REQP = []
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    """
+    env = {"OP_TARGET": self.cfg.GetClusterName()}
+    mn = self.cfg.GetMasterNode()
+    return env, [], [mn]
+
+  def CheckPrereq(self):
+    """No prerequisites to check.
+
+    """
+    return True
+
+  def Exec(self, feedback_fn):
+    """Nothing to do.
+
+    """
+    return True
+
+
 class LUDestroyCluster(NoHooksLU):
   """Logical unit for destroying the cluster.
 
 class LUDestroyCluster(NoHooksLU):
   """Logical unit for destroying the cluster.
 
@@ -1471,6 +1519,100 @@ class LUVerifyDisks(NoHooksLU):
     return result
 
 
     return result
 
 
+class LURepairDiskSizes(NoHooksLU):
+  """Verifies the cluster disks sizes.
+
+  """
+  _OP_REQP = ["instances"]
+  REQ_BGL = False
+
+  def ExpandNames(self):
+
+    if not isinstance(self.op.instances, list):
+      raise errors.OpPrereqError("Invalid argument type 'instances'")
+
+    if self.op.instances:
+      self.wanted_names = []
+      for name in self.op.instances:
+        full_name = self.cfg.ExpandInstanceName(name)
+        if full_name is None:
+          raise errors.OpPrereqError("Instance '%s' not known" % name)
+        self.wanted_names.append(full_name)
+      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
+      self.needed_locks = {
+        locking.LEVEL_NODE: [],
+        locking.LEVEL_INSTANCE: self.wanted_names,
+        }
+      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+    else:
+      self.wanted_names = None
+      self.needed_locks = {
+        locking.LEVEL_NODE: locking.ALL_SET,
+        locking.LEVEL_INSTANCE: locking.ALL_SET,
+        }
+    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
+
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_NODE and self.wanted_names is not None:
+      self._LockInstancesNodes(primary_only=True)
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This only checks the optional instance list against the existing names.
+
+    """
+    if self.wanted_names is None:
+      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
+
+    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
+                             in self.wanted_names]
+
+  def Exec(self, feedback_fn):
+    """Verify the size of cluster disks.
+
+    """
+    # TODO: check child disks too
+    # TODO: check differences in size between primary/secondary nodes
+    per_node_disks = {}
+    for instance in self.wanted_instances:
+      pnode = instance.primary_node
+      if pnode not in per_node_disks:
+        per_node_disks[pnode] = []
+      for idx, disk in enumerate(instance.disks):
+        per_node_disks[pnode].append((instance, idx, disk))
+
+    changed = []
+    for node, dskl in per_node_disks.items():
+      result = self.rpc.call_blockdev_getsizes(node, [v[2] for v in dskl])
+      if result.failed:
+        self.LogWarning("Failure in blockdev_getsizes call to node"
+                        " %s, ignoring", node)
+        continue
+      if len(result.data) != len(dskl):
+        self.LogWarning("Invalid result from node %s, ignoring node results",
+                        node)
+        continue
+      for ((instance, idx, disk), size) in zip(dskl, result.data):
+        if size is None:
+          self.LogWarning("Disk %d of instance %s did not return size"
+                          " information, ignoring", idx, instance.name)
+          continue
+        if not isinstance(size, (int, long)):
+          self.LogWarning("Disk %d of instance %s did not return valid"
+                          " size information, ignoring", idx, instance.name)
+          continue
+        size = size >> 20
+        if size != disk.size:
+          self.LogInfo("Disk %d of instance %s has mismatched size,"
+                       " correcting: recorded %d, actual %d", idx,
+                       instance.name, disk.size, size)
+          disk.size = size
+          self.cfg.Update(instance)
+          changed.append((instance.name, idx, size))
+    return changed
+
+
 class LURenameCluster(LogicalUnit):
   """Rename the cluster.
 
 class LURenameCluster(LogicalUnit):
   """Rename the cluster.
 
@@ -1833,18 +1975,18 @@ def _WaitForSync(lu, instance, oneshot=False, unlock=False):
         lu.LogWarning("Can't compute data for node %s/%s",
                            node, instance.disks[i].iv_name)
         continue
         lu.LogWarning("Can't compute data for node %s/%s",
                            node, instance.disks[i].iv_name)
         continue
-      # we ignore the ldisk parameter
-      perc_done, est_time, is_degraded, _ = mstat
-      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
-      if perc_done is not None:
+
+      cumul_degraded = (cumul_degraded or
+                        (mstat.is_degraded and mstat.sync_percent is None))
+      if mstat.sync_percent is not None:
         done = False
         done = False
-        if est_time is not None:
-          rem_time = "%d estimated seconds remaining" % est_time
-          max_time = est_time
+        if mstat.estimated_time is not None:
+          rem_time = "%d estimated seconds remaining" % mstat.estimated_time
+          max_time = mstat.estimated_time
         else:
           rem_time = "no time estimate"
         lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
         else:
           rem_time = "no time estimate"
         lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
-                        (instance.disks[i].iv_name, perc_done, rem_time))
+                        (instance.disks[i].iv_name, mstat.sync_percent, rem_time))
 
     # if we're done but degraded, let's do a few small retries, to
     # make sure we see a stable and not transient situation; therefore
 
     # if we're done but degraded, let's do a few small retries, to
     # make sure we see a stable and not transient situation; therefore
@@ -1888,7 +2030,7 @@ def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
       result = False
     else:
       if ldisk:
       result = False
     else:
       if ldisk:
-        result = result and not rstats.payload.ldisk_degraded
+        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
       else:
         result = result and not rstats.payload.is_degraded
 
       else:
         result = result and not rstats.payload.is_degraded
 
@@ -2971,19 +3113,24 @@ class LUActivateInstanceDisks(NoHooksLU):
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
     _CheckNodeOnline(self, self.instance.primary_node)
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
     _CheckNodeOnline(self, self.instance.primary_node)
+    if not hasattr(self.op, "ignore_size"):
+      self.op.ignore_size = False
 
   def Exec(self, feedback_fn):
     """Activate the disks.
 
     """
 
   def Exec(self, feedback_fn):
     """Activate the disks.
 
     """
-    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
+    disks_ok, disks_info = \
+              _AssembleInstanceDisks(self, self.instance,
+                                     ignore_size=self.op.ignore_size)
     if not disks_ok:
       raise errors.OpExecError("Cannot activate block devices")
 
     return disks_info
 
 
     if not disks_ok:
       raise errors.OpExecError("Cannot activate block devices")
 
     return disks_info
 
 
-def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
+def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
+                           ignore_size=False):
   """Prepare the block devices for an instance.
 
   This sets up the block devices on all nodes.
   """Prepare the block devices for an instance.
 
   This sets up the block devices on all nodes.
@@ -2995,6 +3142,10 @@ def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
   @type ignore_secondaries: boolean
   @param ignore_secondaries: if true, errors on secondary nodes
       won't result in an error return from the function
   @type ignore_secondaries: boolean
   @param ignore_secondaries: if true, errors on secondary nodes
       won't result in an error return from the function
+  @type ignore_size: boolean
+  @param ignore_size: if true, the current known size of the disk
+      will not be used during the disk activation, useful for cases
+      when the size is wrong
   @return: False if the operation failed, otherwise a list of
       (host, instance_visible_name, node_visible_name)
       with the mapping from node devices to instance devices
   @return: False if the operation failed, otherwise a list of
       (host, instance_visible_name, node_visible_name)
       with the mapping from node devices to instance devices
@@ -3015,6 +3166,9 @@ def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
   # 1st pass, assemble on all nodes in secondary mode
   for inst_disk in instance.disks:
     for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
   # 1st pass, assemble on all nodes in secondary mode
   for inst_disk in instance.disks:
     for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
+      if ignore_size:
+        node_disk = node_disk.Copy()
+        node_disk.UnsetSize()
       lu.cfg.SetDiskID(node_disk, node)
       result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
       msg = result.fail_msg
       lu.cfg.SetDiskID(node_disk, node)
       result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
       msg = result.fail_msg
@@ -3032,6 +3186,9 @@ def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
     for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
       if node != instance.primary_node:
         continue
     for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
       if node != instance.primary_node:
         continue
+      if ignore_size:
+        node_disk = node_disk.Copy()
+        node_disk.UnsetSize()
       lu.cfg.SetDiskID(node_disk, node)
       result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
       msg = result.fail_msg
       lu.cfg.SetDiskID(node_disk, node)
       result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
       msg = result.fail_msg
@@ -3488,6 +3645,89 @@ class LUReinstallInstance(LogicalUnit):
       _ShutdownInstanceDisks(self, inst)
 
 
       _ShutdownInstanceDisks(self, inst)
 
 
+class LURecreateInstanceDisks(LogicalUnit):
+  """Recreate an instance's missing disks.
+
+  """
+  HPATH = "instance-recreate-disks"
+  HTYPE = constants.HTYPE_INSTANCE
+  _OP_REQP = ["instance_name", "disks"]
+  REQ_BGL = False
+
+  def CheckArguments(self):
+    """Check the arguments.
+
+    """
+    if not isinstance(self.op.disks, list):
+      raise errors.OpPrereqError("Invalid disks parameter")
+    for item in self.op.disks:
+      if (not isinstance(item, int) or
+          item < 0):
+        raise errors.OpPrereqError("Invalid disk specification '%s'" %
+                                   str(item))
+
+  def ExpandNames(self):
+    self._ExpandAndLockInstance()
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    This runs on master, primary and secondary nodes of the instance.
+
+    """
+    env = _BuildInstanceHookEnvByObject(self, self.instance)
+    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
+    return env, nl, nl
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks that the instance is in the cluster and is not running.
+
+    """
+    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+    assert instance is not None, \
+      "Cannot retrieve locked instance %s" % self.op.instance_name
+    _CheckNodeOnline(self, instance.primary_node)
+
+    if instance.disk_template == constants.DT_DISKLESS:
+      raise errors.OpPrereqError("Instance '%s' has no disks" %
+                                 self.op.instance_name)
+    if instance.admin_up:
+      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
+                                 self.op.instance_name)
+    remote_info = self.rpc.call_instance_info(instance.primary_node,
+                                              instance.name,
+                                              instance.hypervisor)
+    remote_info.Raise("Error checking node %s" % instance.primary_node,
+                      prereq=True)
+    if remote_info.payload:
+      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
+                                 (self.op.instance_name,
+                                  instance.primary_node))
+
+    if not self.op.disks:
+      self.op.disks = range(len(instance.disks))
+    else:
+      for idx in self.op.disks:
+        if idx >= len(instance.disks):
+          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx)
+
+    self.instance = instance
+
+  def Exec(self, feedback_fn):
+    """Recreate the disks.
+
+    """
+    to_skip = []
+    for idx, disk in enumerate(self.instance.disks):
+      if idx not in self.op.disks: # disk idx has not been passed in
+        to_skip.append(idx)
+        continue
+
+    _CreateDisks(self, self.instance, to_skip=to_skip)
+
+
 class LURenameInstance(LogicalUnit):
   """Rename an instance.
 
 class LURenameInstance(LogicalUnit):
   """Rename an instance.
 
@@ -4680,7 +4920,7 @@ def _GetInstanceInfoText(instance):
   return "originstname+%s" % instance.name
 
 
   return "originstname+%s" % instance.name
 
 
-def _CreateDisks(lu, instance):
+def _CreateDisks(lu, instance, to_skip=None):
   """Create all disks for an instance.
 
   This abstracts away some work from AddInstance.
   """Create all disks for an instance.
 
   This abstracts away some work from AddInstance.
@@ -4689,6 +4929,8 @@ def _CreateDisks(lu, instance):
   @param lu: the logical unit on whose behalf we execute
   @type instance: L{objects.Instance}
   @param instance: the instance whose disks we should create
   @param lu: the logical unit on whose behalf we execute
   @type instance: L{objects.Instance}
   @param instance: the instance whose disks we should create
+  @type to_skip: list
+  @param to_skip: list of indices to skip
   @rtype: boolean
   @return: the success of the creation
 
   @rtype: boolean
   @return: the success of the creation
 
@@ -4705,7 +4947,9 @@ def _CreateDisks(lu, instance):
 
   # Note: this needs to be kept in sync with adding of disks in
   # LUSetInstanceParams
 
   # Note: this needs to be kept in sync with adding of disks in
   # LUSetInstanceParams
-  for device in instance.disks:
+  for idx, device in enumerate(instance.disks):
+    if to_skip and idx in to_skip:
+      continue
     logging.info("Creating volume %s for instance %s",
                  device.iv_name, instance.name)
     #HARDCODE
     logging.info("Creating volume %s for instance %s",
                  device.iv_name, instance.name)
     #HARDCODE
@@ -5642,20 +5886,21 @@ class TLReplaceDisks(Tasklet):
 
     """
     # check for valid parameter combination
 
     """
     # check for valid parameter combination
-    cnt = [remote_node, iallocator].count(None)
     if mode == constants.REPLACE_DISK_CHG:
     if mode == constants.REPLACE_DISK_CHG:
-      if cnt == 2:
+      if remote_node is None and iallocator is None:
         raise errors.OpPrereqError("When changing the secondary either an"
                                    " iallocator script must be used or the"
                                    " new node given")
         raise errors.OpPrereqError("When changing the secondary either an"
                                    " iallocator script must be used or the"
                                    " new node given")
-      elif cnt == 0:
+
+      if remote_node is not None and iallocator is not None:
         raise errors.OpPrereqError("Give either the iallocator or the new"
                                    " secondary, not both")
         raise errors.OpPrereqError("Give either the iallocator or the new"
                                    " secondary, not both")
-    else: # not replacing the secondary
-      if cnt != 2:
-        raise errors.OpPrereqError("The iallocator and new node options can"
-                                   " be used only when changing the"
-                                   " secondary node")
+
+    elif remote_node is not None or iallocator is not None:
+      # Not replacing the secondary
+      raise errors.OpPrereqError("The iallocator and new node options can"
+                                 " only be used when changing the"
+                                 " secondary node")
 
   @staticmethod
   def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
 
   @staticmethod
   def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
@@ -5685,6 +5930,10 @@ class TLReplaceDisks(Tasklet):
 
     return remote_node_name
 
 
     return remote_node_name
 
+  def _FindFaultyDisks(self, node_name):
+    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
+                                    node_name, True)
+
   def CheckPrereq(self):
     """Check prerequisites.
 
   def CheckPrereq(self):
     """Check prerequisites.
 
@@ -5727,35 +5976,64 @@ class TLReplaceDisks(Tasklet):
       raise errors.OpPrereqError("The specified node is already the"
                                  " secondary node of the instance.")
 
       raise errors.OpPrereqError("The specified node is already the"
                                  " secondary node of the instance.")
 
-    if self.mode == constants.REPLACE_DISK_PRI:
-      self.target_node = self.instance.primary_node
-      self.other_node = secondary_node
-      check_nodes = [self.target_node, self.other_node]
+    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
+                                    constants.REPLACE_DISK_CHG):
+      raise errors.OpPrereqError("Cannot specify disks to be replaced")
+
+    if self.mode == constants.REPLACE_DISK_AUTO:
+      faulty_primary = self._FindFaultyDisks(self.instance.primary_node)
+      faulty_secondary = self._FindFaultyDisks(secondary_node)
+
+      if faulty_primary and faulty_secondary:
+        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
+                                   " one node and can not be repaired"
+                                   " automatically" % self.instance_name)
+
+      if faulty_primary:
+        self.disks = faulty_primary
+        self.target_node = self.instance.primary_node
+        self.other_node = secondary_node
+        check_nodes = [self.target_node, self.other_node]
+      elif faulty_secondary:
+        self.disks = faulty_secondary
+        self.target_node = secondary_node
+        self.other_node = self.instance.primary_node
+        check_nodes = [self.target_node, self.other_node]
+      else:
+        self.disks = []
+        check_nodes = []
 
 
-    elif self.mode == constants.REPLACE_DISK_SEC:
-      self.target_node = secondary_node
-      self.other_node = self.instance.primary_node
-      check_nodes = [self.target_node, self.other_node]
+    else:
+      # Non-automatic modes
+      if self.mode == constants.REPLACE_DISK_PRI:
+        self.target_node = self.instance.primary_node
+        self.other_node = secondary_node
+        check_nodes = [self.target_node, self.other_node]
 
 
-    elif self.mode == constants.REPLACE_DISK_CHG:
-      self.new_node = remote_node
-      self.other_node = self.instance.primary_node
-      self.target_node = secondary_node
-      check_nodes = [self.new_node, self.other_node]
+      elif self.mode == constants.REPLACE_DISK_SEC:
+        self.target_node = secondary_node
+        self.other_node = self.instance.primary_node
+        check_nodes = [self.target_node, self.other_node]
 
 
-      _CheckNodeNotDrained(self.lu, remote_node)
+      elif self.mode == constants.REPLACE_DISK_CHG:
+        self.new_node = remote_node
+        self.other_node = self.instance.primary_node
+        self.target_node = secondary_node
+        check_nodes = [self.new_node, self.other_node]
 
 
-    else:
-      raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
-                                   self.mode)
+        _CheckNodeNotDrained(self.lu, remote_node)
+
+      else:
+        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
+                                     self.mode)
+
+      # If not specified all disks should be replaced
+      if not self.disks:
+        self.disks = range(len(self.instance.disks))
 
     for node in check_nodes:
       _CheckNodeOnline(self.lu, node)
 
 
     for node in check_nodes:
       _CheckNodeOnline(self.lu, node)
 
-    # If not specified all disks should be replaced
-    if not self.disks:
-      self.disks = range(len(self.instance.disks))
-
     # Check whether disks are valid
     for disk_idx in self.disks:
       self.instance.FindDisk(disk_idx)
     # Check whether disks are valid
     for disk_idx in self.disks:
       self.instance.FindDisk(disk_idx)
@@ -5775,7 +6053,12 @@ class TLReplaceDisks(Tasklet):
     This dispatches the disk replacement to the appropriate handler.
 
     """
     This dispatches the disk replacement to the appropriate handler.
 
     """
-    feedback_fn("Replacing disks for %s" % self.instance.name)
+    if not self.disks:
+      feedback_fn("No disks need replacement")
+      return
+
+    feedback_fn("Replacing disk(s) %s for %s" %
+                (", ".join([str(i) for i in self.disks]), self.instance.name))
 
     activate_disks = (not self.instance.admin_up)
 
 
     activate_disks = (not self.instance.admin_up)
 
@@ -5784,7 +6067,8 @@ class TLReplaceDisks(Tasklet):
       _StartInstanceDisks(self.lu, self.instance, True)
 
     try:
       _StartInstanceDisks(self.lu, self.instance, True)
 
     try:
-      if self.mode == constants.REPLACE_DISK_CHG:
+      # Should we replace the secondary node?
+      if self.new_node is not None:
         return self._ExecDrbd8Secondary()
       else:
         return self._ExecDrbd8DiskOnly()
         return self._ExecDrbd8Secondary()
       else:
         return self._ExecDrbd8DiskOnly()
@@ -6293,22 +6577,33 @@ class LUQueryInstanceData(NoHooksLU):
                              in self.wanted_names]
     return
 
                              in self.wanted_names]
     return
 
+  def _ComputeBlockdevStatus(self, node, instance_name, dev):
+    """Returns the status of a block device
+
+    """
+    if self.op.static:
+      return None
+
+    self.cfg.SetDiskID(dev, node)
+
+    result = self.rpc.call_blockdev_find(node, dev)
+    if result.offline:
+      return None
+
+    result.Raise("Can't compute disk status for %s" % instance_name)
+
+    status = result.payload
+    if status is None:
+      return None
+
+    return (status.dev_path, status.major, status.minor,
+            status.sync_percent, status.estimated_time,
+            status.is_degraded, status.ldisk_status)
+
   def _ComputeDiskStatus(self, instance, snode, dev):
     """Compute block device status.
 
     """
   def _ComputeDiskStatus(self, instance, snode, dev):
     """Compute block device status.
 
     """
-    static = self.op.static
-    if not static:
-      self.cfg.SetDiskID(dev, instance.primary_node)
-      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
-      if dev_pstatus.offline:
-        dev_pstatus = None
-      else:
-        dev_pstatus.Raise("Can't compute disk status for %s" % instance.name)
-        dev_pstatus = dev_pstatus.payload.ToLegacyStatus()
-    else:
-      dev_pstatus = None
-
     if dev.dev_type in constants.LDS_DRBD:
       # we change the snode then (otherwise we use the one passed in)
       if dev.logical_id[0] == instance.primary_node:
     if dev.dev_type in constants.LDS_DRBD:
       # we change the snode then (otherwise we use the one passed in)
       if dev.logical_id[0] == instance.primary_node:
@@ -6316,16 +6611,9 @@ class LUQueryInstanceData(NoHooksLU):
       else:
         snode = dev.logical_id[0]
 
       else:
         snode = dev.logical_id[0]
 
-    if snode and not static:
-      self.cfg.SetDiskID(dev, snode)
-      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
-      if dev_sstatus.offline:
-        dev_sstatus = None
-      else:
-        dev_sstatus.Raise("Can't compute disk status for %s" % instance.name)
-        dev_sstatus = dev_sstatus.payload.ToLegacyStatus()
-    else:
-      dev_sstatus = None
+    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
+                                              instance.name, dev)
+    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
 
     if dev.children:
       dev_children = [self._ComputeDiskStatus(instance, snode, child)
 
     if dev.children:
       dev_children = [self._ComputeDiskStatus(instance, snode, child)
@@ -7037,6 +7325,8 @@ class LUExportInstance(LogicalUnit):
     for disk in instance.disks:
       self.cfg.SetDiskID(disk, src_node)
 
     for disk in instance.disks:
       self.cfg.SetDiskID(disk, src_node)
 
+    # per-disk results
+    dresults = []
     try:
       for idx, disk in enumerate(instance.disks):
         # result.payload will be a snapshot of an lvm leaf of the one we passed
     try:
       for idx, disk in enumerate(instance.disks):
         # result.payload will be a snapshot of an lvm leaf of the one we passed
@@ -7072,16 +7362,23 @@ class LUExportInstance(LogicalUnit):
         if msg:
           self.LogWarning("Could not export disk/%s from node %s to"
                           " node %s: %s", idx, src_node, dst_node.name, msg)
         if msg:
           self.LogWarning("Could not export disk/%s from node %s to"
                           " node %s: %s", idx, src_node, dst_node.name, msg)
+          dresults.append(False)
+        else:
+          dresults.append(True)
         msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
         if msg:
           self.LogWarning("Could not remove snapshot for disk/%d from node"
                           " %s: %s", idx, src_node, msg)
         msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
         if msg:
           self.LogWarning("Could not remove snapshot for disk/%d from node"
                           " %s: %s", idx, src_node, msg)
+      else:
+        dresults.append(False)
 
     result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
 
     result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
+    fin_resu = True
     msg = result.fail_msg
     if msg:
       self.LogWarning("Could not finalize export for instance %s"
                       " on node %s: %s", instance.name, dst_node.name, msg)
     msg = result.fail_msg
     if msg:
       self.LogWarning("Could not finalize export for instance %s"
                       " on node %s: %s", instance.name, dst_node.name, msg)
+      fin_resu = False
 
     nodelist = self.cfg.GetNodeList()
     nodelist.remove(dst_node.name)
 
     nodelist = self.cfg.GetNodeList()
     nodelist.remove(dst_node.name)
@@ -7100,6 +7397,7 @@ class LUExportInstance(LogicalUnit):
           if msg:
             self.LogWarning("Could not remove older export for instance %s"
                             " on node %s: %s", iname, node, msg)
           if msg:
             self.LogWarning("Could not remove older export for instance %s"
                             " on node %s: %s", iname, node, msg)
+    return fin_resu, dresults
 
 
 class LURemoveExport(NoHooksLU):
 
 
 class LURemoveExport(NoHooksLU):
@@ -7463,11 +7761,12 @@ class IAllocator(object):
         "master_candidate": ninfo.master_candidate,
         }
 
         "master_candidate": ninfo.master_candidate,
         }
 
-      if not ninfo.offline:
+      if not (ninfo.offline or ninfo.drained):
         nresult.Raise("Can't get data for node %s" % nname)
         node_iinfo[nname].Raise("Can't get node instance info from node %s" %
                                 nname)
         remote_info = nresult.payload
         nresult.Raise("Can't get data for node %s" % nname)
         node_iinfo[nname].Raise("Can't get node instance info from node %s" %
                                 nname)
         remote_info = nresult.payload
+
         for attr in ['memory_total', 'memory_free', 'memory_dom0',
                      'vg_size', 'vg_free', 'cpu_total']:
           if attr not in remote_info:
         for attr in ['memory_total', 'memory_free', 'memory_dom0',
                      'vg_size', 'vg_free', 'cpu_total']:
           if attr not in remote_info: