RAPI client: fix epydoc formatting
[ganeti-local] / lib / cmdlib.py
index 214e2bb..fa8716e 100644 (file)
@@ -453,15 +453,19 @@ class _QueryBase:
   #: Attribute holding field definitions
   FIELDS = None
 
   #: Attribute holding field definitions
   FIELDS = None
 
-  def __init__(self, names, fields, use_locking):
+  def __init__(self, filter_, fields, use_locking):
     """Initializes this class.
 
     """
     """Initializes this class.
 
     """
-    self.names = names
     self.use_locking = use_locking
 
     self.use_locking = use_locking
 
-    self.query = query.Query(self.FIELDS, fields)
+    self.query = query.Query(self.FIELDS, fields, filter_=filter_,
+                             namefield="name")
     self.requested_data = self.query.RequestedData()
     self.requested_data = self.query.RequestedData()
+    self.names = self.query.RequestedNames()
+
+    # Sort only if no names were requested
+    self.sort_by_name = not self.names
 
     self.do_locking = None
     self.wanted = None
 
     self.do_locking = None
     self.wanted = None
@@ -492,15 +496,6 @@ class _QueryBase:
     # Return expanded names
     return self.wanted
 
     # Return expanded names
     return self.wanted
 
-  @classmethod
-  def FieldsQuery(cls, fields):
-    """Returns list of available fields.
-
-    @return: List of L{objects.QueryFieldDefinition}
-
-    """
-    return query.QueryFields(cls.FIELDS, fields)
-
   def ExpandNames(self, lu):
     """Expand names for this query.
 
   def ExpandNames(self, lu):
     """Expand names for this query.
 
@@ -529,13 +524,15 @@ class _QueryBase:
     """Collect data and execute query.
 
     """
     """Collect data and execute query.
 
     """
-    return query.GetQueryResponse(self.query, self._GetQueryData(lu))
+    return query.GetQueryResponse(self.query, self._GetQueryData(lu),
+                                  sort_by_name=self.sort_by_name)
 
   def OldStyleQuery(self, lu):
     """Collect data and execute query.
 
     """
 
   def OldStyleQuery(self, lu):
     """Collect data and execute query.
 
     """
-    return self.query.OldStyleQuery(self._GetQueryData(lu))
+    return self.query.OldStyleQuery(self._GetQueryData(lu),
+                                    sort_by_name=self.sort_by_name)
 
 
 def _GetWantedNodes(lu, nodes):
 
 
 def _GetWantedNodes(lu, nodes):
@@ -609,6 +606,18 @@ def _GetUpdatedParams(old_params, update_dict,
   return params_copy
 
 
   return params_copy
 
 
+def _RunPostHook(lu, node_name):
+  """Runs the post-hook for an opcode on a single node.
+
+  """
+  hm = lu.proc.hmclass(lu.rpc.call_hooks_runner, lu)
+  try:
+    hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
+  except:
+    # pylint: disable-msg=W0702
+    lu.LogWarning("Errors occurred running hooks on %s" % node_name)
+
+
 def _CheckOutputFields(static, dynamic, selected):
   """Checks whether all selected fields are valid.
 
 def _CheckOutputFields(static, dynamic, selected):
   """Checks whether all selected fields are valid.
 
@@ -1035,7 +1044,7 @@ def _GetStorageTypeArgs(cfg, storage_type):
   # Special case for file storage
   if storage_type == constants.ST_FILE:
     # storage.FileStorage wants a list of storage directories
   # Special case for file storage
   if storage_type == constants.ST_FILE:
     # storage.FileStorage wants a list of storage directories
-    return [[cfg.GetFileStorageDir()]]
+    return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
 
   return []
 
 
   return []
 
@@ -1153,12 +1162,7 @@ class LUClusterDestroy(LogicalUnit):
     master = self.cfg.GetMasterNode()
 
     # Run post hooks on master node before it's removed
     master = self.cfg.GetMasterNode()
 
     # Run post hooks on master node before it's removed
-    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
-    try:
-      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
-    except:
-      # pylint: disable-msg=W0702
-      self.LogWarning("Errors occurred running hooks on %s" % master)
+    _RunPostHook(self, master)
 
     result = self.rpc.call_node_stop_master(master, False)
     result.Raise("Could not disable the master role")
 
     result = self.rpc.call_node_stop_master(master, False)
     result.Raise("Could not disable the master role")
@@ -1254,8 +1258,8 @@ class LUClusterVerify(LogicalUnit):
     @ivar instances: a list of running instances (runtime)
     @ivar pinst: list of configured primary instances (config)
     @ivar sinst: list of configured secondary instances (config)
     @ivar instances: a list of running instances (runtime)
     @ivar pinst: list of configured primary instances (config)
     @ivar sinst: list of configured secondary instances (config)
-    @ivar sbp: diction of {secondary-node: list of instances} of all peers
-        of this node (config)
+    @ivar sbp: dictionary of {primary-node: list of instances} for all
+        instances for which this node is secondary (config)
     @ivar mfree: free memory, as reported by hypervisor (runtime)
     @ivar dfree: free disk, as reported by the node (runtime)
     @ivar offline: the offline status (config)
     @ivar mfree: free memory, as reported by hypervisor (runtime)
     @ivar dfree: free disk, as reported by the node (runtime)
     @ivar offline: the offline status (config)
@@ -1554,7 +1558,7 @@ class LUClusterVerify(LogicalUnit):
                node_current)
 
     for node, n_img in node_image.items():
                node_current)
 
     for node, n_img in node_image.items():
-      if (not node == node_current):
+      if node != node_current:
         test = instance in n_img.instances
         _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
                  "instance should not run on node %s", node)
         test = instance in n_img.instances
         _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
                  "instance should not run on node %s", node)
@@ -1564,7 +1568,11 @@ class LUClusterVerify(LogicalUnit):
                 for idx, (success, status) in enumerate(disks)]
 
     for nname, success, bdev_status, idx in diskdata:
                 for idx, (success, status) in enumerate(disks)]
 
     for nname, success, bdev_status, idx in diskdata:
-      _ErrorIf(instanceconfig.admin_up and not success,
+      # the 'ghost node' construction in Exec() ensures that we have a
+      # node here
+      snode = node_image[nname]
+      bad_snode = snode.ghost or snode.offline
+      _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
                self.EINSTANCEFAULTYDISK, instance,
                "couldn't retrieve status for disk/%s on %s: %s",
                idx, nname, bdev_status)
                self.EINSTANCEFAULTYDISK, instance,
                "couldn't retrieve status for disk/%s on %s: %s",
                idx, nname, bdev_status)
@@ -1613,6 +1621,7 @@ class LUClusterVerify(LogicalUnit):
     instances it was primary for.
 
     """
     instances it was primary for.
 
     """
+    cluster_info = self.cfg.GetClusterInfo()
     for node, n_img in node_image.items():
       # This code checks that every node which is now listed as
       # secondary has enough memory to host all instances it is
     for node, n_img in node_image.items():
       # This code checks that every node which is now listed as
       # secondary has enough memory to host all instances it is
@@ -1622,10 +1631,16 @@ class LUClusterVerify(LogicalUnit):
       # WARNING: we currently take into account down instances as well
       # as up ones, considering that even if they're down someone
       # might want to start them even in the event of a node failure.
       # WARNING: we currently take into account down instances as well
       # as up ones, considering that even if they're down someone
       # might want to start them even in the event of a node failure.
+      if n_img.offline:
+        # we're skipping offline nodes from the N+1 warning, since
+        # most likely we don't have good memory infromation from them;
+        # we already list instances living on such nodes, and that's
+        # enough warning
+        continue
       for prinode, instances in n_img.sbp.items():
         needed_mem = 0
         for instance in instances:
       for prinode, instances in n_img.sbp.items():
         needed_mem = 0
         for instance in instances:
-          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
+          bep = cluster_info.FillBE(instance_cfg[instance])
           if bep[constants.BE_AUTO_BALANCE]:
             needed_mem += bep[constants.BE_MEMORY]
         test = n_img.mfree < needed_mem
           if bep[constants.BE_AUTO_BALANCE]:
             needed_mem += bep[constants.BE_MEMORY]
         test = n_img.mfree < needed_mem
@@ -2290,8 +2305,8 @@ class LUClusterVerify(LogicalUnit):
                self.ENODERPC, pnode, "instance %s, connection to"
                " primary node failed", instance)
 
                self.ENODERPC, pnode, "instance %s, connection to"
                " primary node failed", instance)
 
-      if pnode_img.offline:
-        inst_nodes_offline.append(pnode)
+      _ErrorIf(pnode_img.offline, self.EINSTANCEBADNODE, instance,
+               "instance lives on offline node %s", inst_config.primary_node)
 
       # If the instance is non-redundant we cannot survive losing its primary
       # node, so we are not N+1 compliant. On the other hand we have no disk
 
       # If the instance is non-redundant we cannot survive losing its primary
       # node, so we are not N+1 compliant. On the other hand we have no disk
@@ -2306,7 +2321,7 @@ class LUClusterVerify(LogicalUnit):
                utils.CommaJoin(inst_config.secondary_nodes),
                code=self.ETYPE_WARNING)
 
                utils.CommaJoin(inst_config.secondary_nodes),
                code=self.ETYPE_WARNING)
 
-      if inst_config.disk_template in constants.DTS_NET_MIRROR:
+      if inst_config.disk_template in constants.DTS_INT_MIRROR:
         pnode = inst_config.primary_node
         instance_nodes = utils.NiceSort(inst_config.all_nodes)
         instance_groups = {}
         pnode = inst_config.primary_node
         instance_nodes = utils.NiceSort(inst_config.all_nodes)
         instance_groups = {}
@@ -2340,7 +2355,7 @@ class LUClusterVerify(LogicalUnit):
 
       # warn that the instance lives on offline nodes
       _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
 
       # warn that the instance lives on offline nodes
       _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
-               "instance lives on offline node(s) %s",
+               "instance has offline secondary node(s) %s",
                utils.CommaJoin(inst_nodes_offline))
       # ... or ghost/non-vm_capable nodes
       for node in inst_config.all_nodes:
                utils.CommaJoin(inst_nodes_offline))
       # ... or ghost/non-vm_capable nodes
       for node in inst_config.all_nodes:
@@ -2574,16 +2589,18 @@ class LUClusterRepairDiskSizes(NoHooksLU):
       newl = [v[2].Copy() for v in dskl]
       for dsk in newl:
         self.cfg.SetDiskID(dsk, node)
       newl = [v[2].Copy() for v in dskl]
       for dsk in newl:
         self.cfg.SetDiskID(dsk, node)
-      result = self.rpc.call_blockdev_getsizes(node, newl)
+      result = self.rpc.call_blockdev_getsize(node, newl)
       if result.fail_msg:
       if result.fail_msg:
-        self.LogWarning("Failure in blockdev_getsizes call to node"
+        self.LogWarning("Failure in blockdev_getsize call to node"
                         " %s, ignoring", node)
         continue
                         " %s, ignoring", node)
         continue
-      if len(result.data) != len(dskl):
+      if len(result.payload) != len(dskl):
+        logging.warning("Invalid result from node %s: len(dksl)=%d,"
+                        " result.payload=%s", node, len(dskl), result.payload)
         self.LogWarning("Invalid result from node %s, ignoring node results",
                         node)
         continue
         self.LogWarning("Invalid result from node %s, ignoring node results",
                         node)
         continue
-      for ((instance, idx, disk), size) in zip(dskl, result.data):
+      for ((instance, idx, disk), size) in zip(dskl, result.payload):
         if size is None:
           self.LogWarning("Disk %d of instance %s did not return size"
                           " information, ignoring", idx, instance.name)
         if size is None:
           self.LogWarning("Disk %d of instance %s did not return size"
                           " information, ignoring", idx, instance.name)
@@ -3215,6 +3232,7 @@ class LUOobCommand(NoHooksLU):
 
   """
   REG_BGL = False
 
   """
   REG_BGL = False
+  _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
 
   def CheckPrereq(self):
     """Check prerequisites.
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -3227,6 +3245,36 @@ class LUOobCommand(NoHooksLU):
 
     """
     self.nodes = []
 
     """
     self.nodes = []
+    self.master_node = self.cfg.GetMasterNode()
+
+    assert self.op.power_delay >= 0.0
+
+    if self.op.node_names:
+      if self.op.command in self._SKIP_MASTER:
+        if self.master_node in self.op.node_names:
+          master_node_obj = self.cfg.GetNodeInfo(self.master_node)
+          master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
+
+          if master_oob_handler:
+            additional_text = ("Run '%s %s %s' if you want to operate on the"
+                               " master regardless") % (master_oob_handler,
+                                                        self.op.command,
+                                                        self.master_node)
+          else:
+            additional_text = "The master node does not support out-of-band"
+
+          raise errors.OpPrereqError(("Operating on the master node %s is not"
+                                      " allowed for %s\n%s") %
+                                     (self.master_node, self.op.command,
+                                      additional_text), errors.ECODE_INVAL)
+    else:
+      self.op.node_names = self.cfg.GetNodeList()
+      if self.op.command in self._SKIP_MASTER:
+        self.op.node_names.remove(self.master_node)
+
+    if self.op.command in self._SKIP_MASTER:
+      assert self.master_node not in self.op.node_names
+
     for node_name in self.op.node_names:
       node = self.cfg.GetNodeInfo(node_name)
 
     for node_name in self.op.node_names:
       node = self.cfg.GetNodeInfo(node_name)
 
@@ -3236,7 +3284,8 @@ class LUOobCommand(NoHooksLU):
       else:
         self.nodes.append(node)
 
       else:
         self.nodes.append(node)
 
-      if (self.op.command == constants.OOB_POWER_OFF and not node.offline):
+      if (not self.op.ignore_status and
+          (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
         raise errors.OpPrereqError(("Cannot power off node %s because it is"
                                     " not marked offline") % node_name,
                                    errors.ECODE_STATE)
         raise errors.OpPrereqError(("Cannot power off node %s because it is"
                                     " not marked offline") % node_name,
                                    errors.ECODE_STATE)
@@ -3248,21 +3297,22 @@ class LUOobCommand(NoHooksLU):
     if self.op.node_names:
       self.op.node_names = [_ExpandNodeName(self.cfg, name)
                             for name in self.op.node_names]
     if self.op.node_names:
       self.op.node_names = [_ExpandNodeName(self.cfg, name)
                             for name in self.op.node_names]
+      lock_names = self.op.node_names
     else:
     else:
-      self.op.node_names = self.cfg.GetNodeList()
+      lock_names = locking.ALL_SET
 
     self.needed_locks = {
 
     self.needed_locks = {
-      locking.LEVEL_NODE: self.op.node_names,
+      locking.LEVEL_NODE: lock_names,
       }
 
   def Exec(self, feedback_fn):
     """Execute OOB and return result if we expect any.
 
     """
       }
 
   def Exec(self, feedback_fn):
     """Execute OOB and return result if we expect any.
 
     """
-    master_node = self.cfg.GetMasterNode()
+    master_node = self.master_node
     ret = []
 
     ret = []
 
-    for node in self.nodes:
+    for idx, node in enumerate(self.nodes):
       node_entry = [(constants.RS_NORMAL, node.name)]
       ret.append(node_entry)
 
       node_entry = [(constants.RS_NORMAL, node.name)]
       ret.append(node_entry)
 
@@ -3316,6 +3366,10 @@ class LUOobCommand(NoHooksLU):
 
           node_entry.append((constants.RS_NORMAL, result.payload))
 
 
           node_entry.append((constants.RS_NORMAL, result.payload))
 
+          if (self.op.command == constants.OOB_POWER_ON and
+              idx < len(self.nodes) - 1):
+            time.sleep(self.op.power_delay)
+
     return ret
 
   def _CheckPayload(self, result):
     return ret
 
   def _CheckPayload(self, result):
@@ -3354,37 +3408,28 @@ class LUOobCommand(NoHooksLU):
       raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
                                utils.CommaJoin(errs))
 
       raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
                                utils.CommaJoin(errs))
 
+class _OsQuery(_QueryBase):
+  FIELDS = query.OS_FIELDS
 
 
-
-class LUOsDiagnose(NoHooksLU):
-  """Logical unit for OS diagnose/query.
-
-  """
-  REQ_BGL = False
-  _HID = "hidden"
-  _BLK = "blacklisted"
-  _VLD = "valid"
-  _FIELDS_STATIC = utils.FieldSet()
-  _FIELDS_DYNAMIC = utils.FieldSet("name", _VLD, "node_status", "variants",
-                                   "parameters", "api_versions", _HID, _BLK)
-
-  def CheckArguments(self):
-    if self.op.names:
-      raise errors.OpPrereqError("Selective OS query not supported",
-                                 errors.ECODE_INVAL)
-
-    _CheckOutputFields(static=self._FIELDS_STATIC,
-                       dynamic=self._FIELDS_DYNAMIC,
-                       selected=self.op.output_fields)
-
-  def ExpandNames(self):
-    # Lock all nodes, in shared mode
+  def ExpandNames(self, lu):
+    # Lock all nodes in shared mode
     # Temporary removal of locks, should be reverted later
     # TODO: reintroduce locks when they are lighter-weight
     # Temporary removal of locks, should be reverted later
     # TODO: reintroduce locks when they are lighter-weight
-    self.needed_locks = {}
+    lu.needed_locks = {}
     #self.share_locks[locking.LEVEL_NODE] = 1
     #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
 
     #self.share_locks[locking.LEVEL_NODE] = 1
     #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
 
+    # The following variables interact with _QueryBase._GetNames
+    if self.names:
+      self.wanted = self.names
+    else:
+      self.wanted = locking.ALL_SET
+
+    self.do_locking = self.use_locking
+
+  def DeclareLocks(self, lu, level):
+    pass
+
   @staticmethod
   def _DiagnoseByOS(rlist):
     """Remaps a per-node return list into an a per-os per-node dictionary
   @staticmethod
   def _DiagnoseByOS(rlist):
     """Remaps a per-node return list into an a per-os per-node dictionary
@@ -3425,71 +3470,100 @@ class LUOsDiagnose(NoHooksLU):
                                         variants, params, api_versions))
     return all_os
 
                                         variants, params, api_versions))
     return all_os
 
-  def Exec(self, feedback_fn):
-    """Compute the list of OSes.
+  def _GetQueryData(self, lu):
+    """Computes the list of nodes and their attributes.
 
     """
 
     """
+    # Locking is not used
+    assert not (lu.acquired_locks or self.do_locking or self.use_locking)
+
     valid_nodes = [node.name
     valid_nodes = [node.name
-                   for node in self.cfg.GetAllNodesInfo().values()
+                   for node in lu.cfg.GetAllNodesInfo().values()
                    if not node.offline and node.vm_capable]
                    if not node.offline and node.vm_capable]
-    node_data = self.rpc.call_os_diagnose(valid_nodes)
-    pol = self._DiagnoseByOS(node_data)
-    output = []
-    cluster = self.cfg.GetClusterInfo()
+    pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes))
+    cluster = lu.cfg.GetClusterInfo()
+
+    data = {}
+
+    for (os_name, os_data) in pol.items():
+      info = query.OsInfo(name=os_name, valid=True, node_status=os_data,
+                          hidden=(os_name in cluster.hidden_os),
+                          blacklisted=(os_name in cluster.blacklisted_os))
+
+      variants = set()
+      parameters = set()
+      api_versions = set()
 
 
-    for os_name in utils.NiceSort(pol.keys()):
-      os_data = pol[os_name]
-      row = []
-      valid = True
-      (variants, params, api_versions) = null_state = (set(), set(), set())
       for idx, osl in enumerate(os_data.values()):
       for idx, osl in enumerate(os_data.values()):
-        valid = bool(valid and osl and osl[0][1])
-        if not valid:
-          (variants, params, api_versions) = null_state
+        info.valid = bool(info.valid and osl and osl[0][1])
+        if not info.valid:
           break
           break
-        node_variants, node_params, node_api = osl[0][3:6]
-        if idx == 0: # first entry
-          variants = set(node_variants)
-          params = set(node_params)
-          api_versions = set(node_api)
-        else: # keep consistency
+
+        (node_variants, node_params, node_api) = osl[0][3:6]
+        if idx == 0:
+          # First entry
+          variants.update(node_variants)
+          parameters.update(node_params)
+          api_versions.update(node_api)
+        else:
+          # Filter out inconsistent values
           variants.intersection_update(node_variants)
           variants.intersection_update(node_variants)
-          params.intersection_update(node_params)
+          parameters.intersection_update(node_params)
           api_versions.intersection_update(node_api)
 
           api_versions.intersection_update(node_api)
 
-      is_hid = os_name in cluster.hidden_os
-      is_blk = os_name in cluster.blacklisted_os
-      if ((self._HID not in self.op.output_fields and is_hid) or
-          (self._BLK not in self.op.output_fields and is_blk) or
-          (self._VLD not in self.op.output_fields and not valid)):
-        continue
+      info.variants = list(variants)
+      info.parameters = list(parameters)
+      info.api_versions = list(api_versions)
 
 
-      for field in self.op.output_fields:
-        if field == "name":
-          val = os_name
-        elif field == self._VLD:
-          val = valid
-        elif field == "node_status":
-          # this is just a copy of the dict
-          val = {}
-          for node_name, nos_list in os_data.items():
-            val[node_name] = nos_list
-        elif field == "variants":
-          val = utils.NiceSort(list(variants))
-        elif field == "parameters":
-          val = list(params)
-        elif field == "api_versions":
-          val = list(api_versions)
-        elif field == self._HID:
-          val = is_hid
-        elif field == self._BLK:
-          val = is_blk
-        else:
-          raise errors.ParameterError(field)
-        row.append(val)
-      output.append(row)
+      data[os_name] = info
 
 
-    return output
+    # Prepare data in requested order
+    return [data[name] for name in self._GetNames(lu, pol.keys(), None)
+            if name in data]
+
+
+class LUOsDiagnose(NoHooksLU):
+  """Logical unit for OS diagnose/query.
+
+  """
+  REQ_BGL = False
+
+  @staticmethod
+  def _BuildFilter(fields, names):
+    """Builds a filter for querying OSes.
+
+    """
+    name_filter = qlang.MakeSimpleFilter("name", names)
+
+    # Legacy behaviour: Hide hidden, blacklisted or invalid OSes if the
+    # respective field is not requested
+    status_filter = [[qlang.OP_NOT, [qlang.OP_TRUE, fname]]
+                     for fname in ["hidden", "blacklisted"]
+                     if fname not in fields]
+    if "valid" not in fields:
+      status_filter.append([qlang.OP_TRUE, "valid"])
+
+    if status_filter:
+      status_filter.insert(0, qlang.OP_AND)
+    else:
+      status_filter = None
+
+    if name_filter and status_filter:
+      return [qlang.OP_AND, name_filter, status_filter]
+    elif name_filter:
+      return name_filter
+    else:
+      return status_filter
+
+  def CheckArguments(self):
+    self.oq = _OsQuery(self._BuildFilter(self.op.output_fields, self.op.names),
+                       self.op.output_fields, False)
+
+  def ExpandNames(self):
+    self.oq.ExpandNames(self)
+
+  def Exec(self, feedback_fn):
+    return self.oq.OldStyleQuery(self)
 
 
 class LUNodeRemove(LogicalUnit):
 
 
 class LUNodeRemove(LogicalUnit):
@@ -3565,12 +3639,7 @@ class LUNodeRemove(LogicalUnit):
     self.context.RemoveNode(node.name)
 
     # Run post hooks on the node before it's removed
     self.context.RemoveNode(node.name)
 
     # Run post hooks on the node before it's removed
-    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
-    try:
-      hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
-    except:
-      # pylint: disable-msg=W0702
-      self.LogWarning("Errors occurred running hooks on %s" % node.name)
+    _RunPostHook(self, node.name)
 
     result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
     msg = result.fail_msg
 
     result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
     msg = result.fail_msg
@@ -3620,7 +3689,10 @@ class _NodeQuery(_QueryBase):
 
     # Gather data as requested
     if query.NQ_LIVE in self.requested_data:
 
     # Gather data as requested
     if query.NQ_LIVE in self.requested_data:
-      node_data = lu.rpc.call_node_info(nodenames, lu.cfg.GetVGName(),
+      # filter out non-vm_capable nodes
+      toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
+
+      node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
                                         lu.cfg.GetHypervisorType())
       live_data = dict((name, nresult.payload)
                        for (name, nresult) in node_data.items()
                                         lu.cfg.GetHypervisorType())
       live_data = dict((name, nresult.payload)
                        for (name, nresult) in node_data.items()
@@ -3669,8 +3741,8 @@ class LUNodeQuery(NoHooksLU):
   REQ_BGL = False
 
   def CheckArguments(self):
   REQ_BGL = False
 
   def CheckArguments(self):
-    self.nq = _NodeQuery(self.op.names, self.op.output_fields,
-                         self.op.use_locking)
+    self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
+                         self.op.output_fields, self.op.use_locking)
 
   def ExpandNames(self):
     self.nq.ExpandNames(self)
 
   def ExpandNames(self):
     self.nq.ExpandNames(self)
@@ -3868,18 +3940,21 @@ class _InstanceQuery(_QueryBase):
     """Computes the list of instances and their attributes.
 
     """
     """Computes the list of instances and their attributes.
 
     """
+    cluster = lu.cfg.GetClusterInfo()
     all_info = lu.cfg.GetAllInstancesInfo()
 
     instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
 
     instance_list = [all_info[name] for name in instance_names]
     all_info = lu.cfg.GetAllInstancesInfo()
 
     instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
 
     instance_list = [all_info[name] for name in instance_names]
-    nodes = frozenset([inst.primary_node for inst in instance_list])
+    nodes = frozenset(itertools.chain(*(inst.all_nodes
+                                        for inst in instance_list)))
     hv_list = list(set([inst.hypervisor for inst in instance_list]))
     bad_nodes = []
     offline_nodes = []
     hv_list = list(set([inst.hypervisor for inst in instance_list]))
     bad_nodes = []
     offline_nodes = []
+    wrongnode_inst = set()
 
     # Gather data as requested
 
     # Gather data as requested
-    if query.IQ_LIVE in self.requested_data:
+    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
       live_data = {}
       node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
       for name in nodes:
       live_data = {}
       node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
       for name in nodes:
@@ -3891,7 +3966,11 @@ class _InstanceQuery(_QueryBase):
         if result.fail_msg:
           bad_nodes.append(name)
         elif result.payload:
         if result.fail_msg:
           bad_nodes.append(name)
         elif result.payload:
-          live_data.update(result.payload)
+          for inst in result.payload:
+            if all_info[inst].primary_node == name:
+              live_data.update(result.payload)
+            else:
+              wrongnode_inst.add(inst)
         # else no instance is alive
     else:
       live_data = {}
         # else no instance is alive
     else:
       live_data = {}
@@ -3905,9 +3984,21 @@ class _InstanceQuery(_QueryBase):
     else:
       disk_usage = None
 
     else:
       disk_usage = None
 
+    if query.IQ_CONSOLE in self.requested_data:
+      consinfo = {}
+      for inst in instance_list:
+        if inst.name in live_data:
+          # Instance is running
+          consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
+        else:
+          consinfo[inst.name] = None
+      assert set(consinfo.keys()) == set(instance_names)
+    else:
+      consinfo = None
+
     return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
                                    disk_usage, offline_nodes, bad_nodes,
     return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
                                    disk_usage, offline_nodes, bad_nodes,
-                                   live_data)
+                                   live_data, wrongnode_inst, consinfo)
 
 
 class LUQuery(NoHooksLU):
 
 
 class LUQuery(NoHooksLU):
@@ -3919,9 +4010,8 @@ class LUQuery(NoHooksLU):
 
   def CheckArguments(self):
     qcls = _GetQueryImplementation(self.op.what)
 
   def CheckArguments(self):
     qcls = _GetQueryImplementation(self.op.what)
-    names = qlang.ReadSimpleFilter("name", self.op.filter)
 
 
-    self.impl = qcls(names, self.op.fields, False)
+    self.impl = qcls(self.op.filter, self.op.fields, False)
 
   def ExpandNames(self):
     self.impl.ExpandNames(self)
 
   def ExpandNames(self):
     self.impl.ExpandNames(self)
@@ -3947,7 +4037,7 @@ class LUQueryFields(NoHooksLU):
     self.needed_locks = {}
 
   def Exec(self, feedback_fn):
     self.needed_locks = {}
 
   def Exec(self, feedback_fn):
-    return self.qcls.FieldsQuery(self.op.fields)
+    return query.QueryFields(self.qcls.FIELDS, self.op.fields)
 
 
 class LUNodeModifyStorage(NoHooksLU):
 
 
 class LUNodeModifyStorage(NoHooksLU):
@@ -4024,9 +4114,12 @@ class LUNodeAdd(LogicalUnit):
       "MASTER_CAPABLE": str(self.op.master_capable),
       "VM_CAPABLE": str(self.op.vm_capable),
       }
       "MASTER_CAPABLE": str(self.op.master_capable),
       "VM_CAPABLE": str(self.op.vm_capable),
       }
-    nodes_0 = self.cfg.GetNodeList()
-    nodes_1 = nodes_0 + [self.op.node_name, ]
-    return env, nodes_0, nodes_1
+
+    # Exclude added node
+    pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name]))
+    post_nodes = pre_nodes + [self.op.node_name, ]
+
+    return (env, pre_nodes, post_nodes)
 
   def CheckPrereq(self):
     """Check prerequisites.
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -4323,7 +4416,7 @@ class LUNodeSetParams(LogicalUnit):
       if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
         for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
           instance = self.context.cfg.GetInstanceInfo(instance_name)
       if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
         for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
           instance = self.context.cfg.GetInstanceInfo(instance_name)
-          i_mirrored = instance.disk_template in constants.DTS_NET_MIRROR
+          i_mirrored = instance.disk_template in constants.DTS_INT_MIRROR
           if i_mirrored and self.op.node_name in instance.all_nodes:
             instances_keep.append(instance_name)
             self.affected_instances.append(instance)
           if i_mirrored and self.op.node_name in instance.all_nodes:
             instances_keep.append(instance_name)
             self.affected_instances.append(instance)
@@ -4630,6 +4723,7 @@ class LUClusterQuery(NoHooksLU):
       "volume_group_name": cluster.volume_group_name,
       "drbd_usermode_helper": cluster.drbd_usermode_helper,
       "file_storage_dir": cluster.file_storage_dir,
       "volume_group_name": cluster.volume_group_name,
       "drbd_usermode_helper": cluster.drbd_usermode_helper,
       "file_storage_dir": cluster.file_storage_dir,
+      "shared_file_storage_dir": cluster.shared_file_storage_dir,
       "maintain_node_health": cluster.maintain_node_health,
       "ctime": cluster.ctime,
       "mtime": cluster.mtime,
       "maintain_node_health": cluster.maintain_node_health,
       "ctime": cluster.ctime,
       "mtime": cluster.mtime,
@@ -4764,13 +4858,13 @@ def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
   # SyncSource, etc.)
 
   # 1st pass, assemble on all nodes in secondary mode
   # SyncSource, etc.)
 
   # 1st pass, assemble on all nodes in secondary mode
-  for inst_disk in disks:
+  for idx, inst_disk in enumerate(disks):
     for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
       if ignore_size:
         node_disk = node_disk.Copy()
         node_disk.UnsetSize()
       lu.cfg.SetDiskID(node_disk, node)
     for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
       if ignore_size:
         node_disk = node_disk.Copy()
         node_disk.UnsetSize()
       lu.cfg.SetDiskID(node_disk, node)
-      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
+      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx)
       msg = result.fail_msg
       if msg:
         lu.proc.LogWarning("Could not prepare block device %s on node %s"
       msg = result.fail_msg
       if msg:
         lu.proc.LogWarning("Could not prepare block device %s on node %s"
@@ -4782,7 +4876,7 @@ def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
   # FIXME: race condition on drbd migration to primary
 
   # 2nd pass, do only the primary node
   # FIXME: race condition on drbd migration to primary
 
   # 2nd pass, do only the primary node
-  for inst_disk in disks:
+  for idx, inst_disk in enumerate(disks):
     dev_path = None
 
     for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
     dev_path = None
 
     for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
@@ -4792,7 +4886,7 @@ def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
         node_disk = node_disk.Copy()
         node_disk.UnsetSize()
       lu.cfg.SetDiskID(node_disk, node)
         node_disk = node_disk.Copy()
         node_disk.UnsetSize()
       lu.cfg.SetDiskID(node_disk, node)
-      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
+      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx)
       msg = result.fail_msg
       if msg:
         lu.proc.LogWarning("Could not prepare block device %s on node %s"
       msg = result.fail_msg
       if msg:
         lu.proc.LogWarning("Could not prepare block device %s on node %s"
@@ -5166,10 +5260,16 @@ class LUInstanceReboot(LogicalUnit):
     ignore_secondaries = self.op.ignore_secondaries
     reboot_type = self.op.reboot_type
 
     ignore_secondaries = self.op.ignore_secondaries
     reboot_type = self.op.reboot_type
 
+    remote_info = self.rpc.call_instance_info(instance.primary_node,
+                                              instance.name,
+                                              instance.hypervisor)
+    remote_info.Raise("Error checking node %s" % instance.primary_node)
+    instance_running = bool(remote_info.payload)
+
     node_current = instance.primary_node
 
     node_current = instance.primary_node
 
-    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
-                       constants.INSTANCE_REBOOT_HARD]:
+    if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
+                                            constants.INSTANCE_REBOOT_HARD]:
       for disk in instance.disks:
         self.cfg.SetDiskID(disk, node_current)
       result = self.rpc.call_instance_reboot(node_current, instance,
       for disk in instance.disks:
         self.cfg.SetDiskID(disk, node_current)
       result = self.rpc.call_instance_reboot(node_current, instance,
@@ -5177,10 +5277,14 @@ class LUInstanceReboot(LogicalUnit):
                                              self.op.shutdown_timeout)
       result.Raise("Could not reboot instance")
     else:
                                              self.op.shutdown_timeout)
       result.Raise("Could not reboot instance")
     else:
-      result = self.rpc.call_instance_shutdown(node_current, instance,
-                                               self.op.shutdown_timeout)
-      result.Raise("Could not shutdown instance for full reboot")
-      _ShutdownInstanceDisks(self, instance)
+      if instance_running:
+        result = self.rpc.call_instance_shutdown(node_current, instance,
+                                                 self.op.shutdown_timeout)
+        result.Raise("Could not shutdown instance for full reboot")
+        _ShutdownInstanceDisks(self, instance)
+      else:
+        self.LogInfo("Instance %s was already stopped, starting now",
+                     instance.name)
       _StartInstanceDisks(self, instance, ignore_secondaries)
       result = self.rpc.call_instance_start(node_current, instance, None, None)
       msg = result.fail_msg
       _StartInstanceDisks(self, instance, ignore_secondaries)
       result = self.rpc.call_instance_start(node_current, instance, None, None)
       msg = result.fail_msg
@@ -5446,6 +5550,11 @@ class LUInstanceRename(LogicalUnit):
       hostname = netutils.GetHostname(name=new_name)
       self.LogInfo("Resolved given name '%s' to '%s'", new_name,
                    hostname.name)
       hostname = netutils.GetHostname(name=new_name)
       self.LogInfo("Resolved given name '%s' to '%s'", new_name,
                    hostname.name)
+      if not utils.MatchNameComponent(self.op.new_name, [hostname.name]):
+        raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
+                                    " same as given hostname '%s'") %
+                                    (hostname.name, self.op.new_name),
+                                    errors.ECODE_INVAL)
       new_name = self.op.new_name = hostname.name
       if (self.op.ip_check and
           netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
       new_name = self.op.new_name = hostname.name
       if (self.op.ip_check and
           netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
@@ -5466,7 +5575,7 @@ class LUInstanceRename(LogicalUnit):
     old_name = inst.name
 
     rename_file_storage = False
     old_name = inst.name
 
     rename_file_storage = False
-    if (inst.disk_template == constants.DT_FILE and
+    if (inst.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE) and
         self.op.new_name != inst.name):
       old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
       rename_file_storage = True
         self.op.new_name != inst.name):
       old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
       rename_file_storage = True
@@ -5596,8 +5705,8 @@ class LUInstanceQuery(NoHooksLU):
   REQ_BGL = False
 
   def CheckArguments(self):
   REQ_BGL = False
 
   def CheckArguments(self):
-    self.iq = _InstanceQuery(self.op.names, self.op.output_fields,
-                             self.op.use_locking)
+    self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
+                             self.op.output_fields, self.op.use_locking)
 
   def ExpandNames(self):
     self.iq.ExpandNames(self)
 
   def ExpandNames(self):
     self.iq.ExpandNames(self)
@@ -5617,14 +5726,34 @@ class LUInstanceFailover(LogicalUnit):
   HTYPE = constants.HTYPE_INSTANCE
   REQ_BGL = False
 
   HTYPE = constants.HTYPE_INSTANCE
   REQ_BGL = False
 
+  def CheckArguments(self):
+    """Check the arguments.
+
+    """
+    self.iallocator = getattr(self.op, "iallocator", None)
+    self.target_node = getattr(self.op, "target_node", None)
+
   def ExpandNames(self):
     self._ExpandAndLockInstance()
   def ExpandNames(self):
     self._ExpandAndLockInstance()
+
+    if self.op.target_node is not None:
+      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
+
     self.needed_locks[locking.LEVEL_NODE] = []
     self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODE:
     self.needed_locks[locking.LEVEL_NODE] = []
     self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODE:
-      self._LockInstancesNodes()
+      instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
+      if instance.disk_template in constants.DTS_EXT_MIRROR:
+        if self.op.target_node is None:
+          self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+        else:
+          self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
+                                                   self.op.target_node]
+        del self.recalculate_locks[locking.LEVEL_NODE]
+      else:
+        self._LockInstancesNodes()
 
   def BuildHooksEnv(self):
     """Build hooks env.
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -5634,15 +5763,19 @@ class LUInstanceFailover(LogicalUnit):
     """
     instance = self.instance
     source_node = instance.primary_node
     """
     instance = self.instance
     source_node = instance.primary_node
-    target_node = instance.secondary_nodes[0]
     env = {
       "IGNORE_CONSISTENCY": self.op.ignore_consistency,
       "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
       "OLD_PRIMARY": source_node,
     env = {
       "IGNORE_CONSISTENCY": self.op.ignore_consistency,
       "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
       "OLD_PRIMARY": source_node,
-      "OLD_SECONDARY": target_node,
-      "NEW_PRIMARY": target_node,
-      "NEW_SECONDARY": source_node,
+      "NEW_PRIMARY": self.op.target_node,
       }
       }
+
+    if instance.disk_template in constants.DTS_INT_MIRROR:
+      env["OLD_SECONDARY"] = instance.secondary_nodes[0]
+      env["NEW_SECONDARY"] = source_node
+    else:
+      env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
+
     env.update(_BuildInstanceHookEnvByObject(self, instance))
     nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
     nl_post = list(nl)
     env.update(_BuildInstanceHookEnvByObject(self, instance))
     nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
     nl_post = list(nl)
@@ -5660,19 +5793,47 @@ class LUInstanceFailover(LogicalUnit):
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
     bep = self.cfg.GetClusterInfo().FillBE(instance)
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
     bep = self.cfg.GetClusterInfo().FillBE(instance)
-    if instance.disk_template not in constants.DTS_NET_MIRROR:
+    if instance.disk_template not in constants.DTS_MIRRORED:
       raise errors.OpPrereqError("Instance's disk layout is not"
       raise errors.OpPrereqError("Instance's disk layout is not"
-                                 " network mirrored, cannot failover.",
+                                 " mirrored, cannot failover.",
                                  errors.ECODE_STATE)
 
                                  errors.ECODE_STATE)
 
-    secondary_nodes = instance.secondary_nodes
-    if not secondary_nodes:
-      raise errors.ProgrammerError("no secondary node but using "
-                                   "a mirrored disk template")
+    if instance.disk_template in constants.DTS_EXT_MIRROR:
+      _CheckIAllocatorOrNode(self, "iallocator", "target_node")
+      if self.op.iallocator:
+        self._RunAllocator()
+        # Release all unnecessary node locks
+        nodes_keep = [instance.primary_node, self.op.target_node]
+        nodes_rel = [node for node in self.acquired_locks[locking.LEVEL_NODE]
+                     if node not in nodes_keep]
+        self.context.glm.release(locking.LEVEL_NODE, nodes_rel)
+        self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
+
+      # self.op.target_node is already populated, either directly or by the
+      # iallocator run
+      target_node = self.op.target_node
 
 
-    target_node = secondary_nodes[0]
+    else:
+      secondary_nodes = instance.secondary_nodes
+      if not secondary_nodes:
+        raise errors.ConfigurationError("No secondary node but using"
+                                        " %s disk template" %
+                                        instance.disk_template)
+      target_node = secondary_nodes[0]
+
+      if self.op.iallocator or (self.op.target_node and
+                                self.op.target_node != target_node):
+        raise errors.OpPrereqError("Instances with disk template %s cannot"
+                                   " be failed over to arbitrary nodes"
+                                   " (neither an iallocator nor a target"
+                                   " node can be passed)" %
+                                   instance.disk_template, errors.ECODE_INVAL)
     _CheckNodeOnline(self, target_node)
     _CheckNodeNotDrained(self, target_node)
     _CheckNodeOnline(self, target_node)
     _CheckNodeNotDrained(self, target_node)
+
+    # Save target_node so that we can use it in BuildHooksEnv
+    self.op.target_node = target_node
+
     if instance.admin_up:
       # check memory requirements on the secondary node
       _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
     if instance.admin_up:
       # check memory requirements on the secondary node
       _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
@@ -5696,7 +5857,7 @@ class LUInstanceFailover(LogicalUnit):
     primary_node = self.cfg.GetNodeInfo(instance.primary_node)
 
     source_node = instance.primary_node
     primary_node = self.cfg.GetNodeInfo(instance.primary_node)
 
     source_node = instance.primary_node
-    target_node = instance.secondary_nodes[0]
+    target_node = self.op.target_node
 
     if instance.admin_up:
       feedback_fn("* checking disk consistency between source and target")
 
     if instance.admin_up:
       feedback_fn("* checking disk consistency between source and target")
@@ -5755,6 +5916,35 @@ class LUInstanceFailover(LogicalUnit):
         raise errors.OpExecError("Could not start instance %s on node %s: %s" %
                                  (instance.name, target_node, msg))
 
         raise errors.OpExecError("Could not start instance %s on node %s: %s" %
                                  (instance.name, target_node, msg))
 
+  def _RunAllocator(self):
+    """Run the allocator based on input opcode.
+
+    """
+    ial = IAllocator(self.cfg, self.rpc,
+                     mode=constants.IALLOCATOR_MODE_RELOC,
+                     name=self.instance.name,
+                     # TODO See why hail breaks with a single node below
+                     relocate_from=[self.instance.primary_node,
+                                    self.instance.primary_node],
+                     )
+
+    ial.Run(self.op.iallocator)
+
+    if not ial.success:
+      raise errors.OpPrereqError("Can't compute nodes using"
+                                 " iallocator '%s': %s" %
+                                 (self.op.iallocator, ial.info),
+                                 errors.ECODE_NORES)
+    if len(ial.result) != ial.required_nodes:
+      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
+                                 " of nodes (%s), required %s" %
+                                 (self.op.iallocator, len(ial.result),
+                                  ial.required_nodes), errors.ECODE_FAULT)
+    self.op.target_node = ial.result[0]
+    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
+                 self.instance.name, self.op.iallocator,
+                 utils.CommaJoin(ial.result))
+
 
 class LUInstanceMigrate(LogicalUnit):
   """Migrate an instance.
 
 class LUInstanceMigrate(LogicalUnit):
   """Migrate an instance.
@@ -5770,16 +5960,29 @@ class LUInstanceMigrate(LogicalUnit):
   def ExpandNames(self):
     self._ExpandAndLockInstance()
 
   def ExpandNames(self):
     self._ExpandAndLockInstance()
 
+    if self.op.target_node is not None:
+      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
+
     self.needed_locks[locking.LEVEL_NODE] = []
     self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
     self._migrater = TLMigrateInstance(self, self.op.instance_name,
     self.needed_locks[locking.LEVEL_NODE] = []
     self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
     self._migrater = TLMigrateInstance(self, self.op.instance_name,
-                                       self.op.cleanup)
+                                       self.op.cleanup, self.op.iallocator,
+                                       self.op.target_node)
     self.tasklets = [self._migrater]
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODE:
     self.tasklets = [self._migrater]
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODE:
-      self._LockInstancesNodes()
+      instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
+      if instance.disk_template in constants.DTS_EXT_MIRROR:
+        if self.op.target_node is None:
+          self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+        else:
+          self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
+                                                   self.op.target_node]
+        del self.recalculate_locks[locking.LEVEL_NODE]
+      else:
+        self._LockInstancesNodes()
 
   def BuildHooksEnv(self):
     """Build hooks env.
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -5789,16 +5992,21 @@ class LUInstanceMigrate(LogicalUnit):
     """
     instance = self._migrater.instance
     source_node = instance.primary_node
     """
     instance = self._migrater.instance
     source_node = instance.primary_node
-    target_node = instance.secondary_nodes[0]
+    target_node = self._migrater.target_node
     env = _BuildInstanceHookEnvByObject(self, instance)
     env["MIGRATE_LIVE"] = self._migrater.live
     env["MIGRATE_CLEANUP"] = self.op.cleanup
     env.update({
         "OLD_PRIMARY": source_node,
     env = _BuildInstanceHookEnvByObject(self, instance)
     env["MIGRATE_LIVE"] = self._migrater.live
     env["MIGRATE_CLEANUP"] = self.op.cleanup
     env.update({
         "OLD_PRIMARY": source_node,
-        "OLD_SECONDARY": target_node,
         "NEW_PRIMARY": target_node,
         "NEW_PRIMARY": target_node,
-        "NEW_SECONDARY": source_node,
         })
         })
+
+    if instance.disk_template in constants.DTS_INT_MIRROR:
+      env["OLD_SECONDARY"] = target_node
+      env["NEW_SECONDARY"] = source_node
+    else:
+      env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
+
     nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
     nl_post = list(nl)
     nl_post.append(source_node)
     nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
     nl_post = list(nl)
     nl_post.append(source_node)
@@ -5930,7 +6138,7 @@ class LUInstanceMove(LogicalUnit):
     for idx, disk in enumerate(instance.disks):
       self.LogInfo("Copying data for disk %d", idx)
       result = self.rpc.call_blockdev_assemble(target_node, disk,
     for idx, disk in enumerate(instance.disks):
       self.LogInfo("Copying data for disk %d", idx)
       result = self.rpc.call_blockdev_assemble(target_node, disk,
-                                               instance.name, True)
+                                               instance.name, True, idx)
       if result.fail_msg:
         self.LogWarning("Can't assemble newly created disk %d: %s",
                         idx, result.fail_msg)
       if result.fail_msg:
         self.LogWarning("Can't assemble newly created disk %d: %s",
                         idx, result.fail_msg)
@@ -5988,32 +6196,46 @@ class LUNodeMigrate(LogicalUnit):
   HTYPE = constants.HTYPE_NODE
   REQ_BGL = False
 
   HTYPE = constants.HTYPE_NODE
   REQ_BGL = False
 
+  def CheckArguments(self):
+    _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
+
   def ExpandNames(self):
     self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
 
   def ExpandNames(self):
     self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
 
-    self.needed_locks = {
-      locking.LEVEL_NODE: [self.op.node_name],
-      }
-
-    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
+    self.needed_locks = {}
 
     # Create tasklets for migrating instances for all instances on this node
     names = []
     tasklets = []
 
 
     # Create tasklets for migrating instances for all instances on this node
     names = []
     tasklets = []
 
+    self.lock_all_nodes = False
+
     for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
       logging.debug("Migrating instance %s", inst.name)
       names.append(inst.name)
 
     for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
       logging.debug("Migrating instance %s", inst.name)
       names.append(inst.name)
 
-      tasklets.append(TLMigrateInstance(self, inst.name, False))
+      tasklets.append(TLMigrateInstance(self, inst.name, False,
+                                        self.op.iallocator, None))
+
+      if inst.disk_template in constants.DTS_EXT_MIRROR:
+        # We need to lock all nodes, as the iallocator will choose the
+        # destination nodes afterwards
+        self.lock_all_nodes = True
 
     self.tasklets = tasklets
 
 
     self.tasklets = tasklets
 
+    # Declare node locks
+    if self.lock_all_nodes:
+      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+    else:
+      self.needed_locks[locking.LEVEL_NODE] = [self.op.node_name]
+      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
+
     # Declare instance locks
     self.needed_locks[locking.LEVEL_INSTANCE] = names
 
   def DeclareLocks(self, level):
     # Declare instance locks
     self.needed_locks[locking.LEVEL_INSTANCE] = names
 
   def DeclareLocks(self, level):
-    if level == locking.LEVEL_NODE:
+    if level == locking.LEVEL_NODE and not self.lock_all_nodes:
       self._LockInstancesNodes()
 
   def BuildHooksEnv(self):
       self._LockInstancesNodes()
 
   def BuildHooksEnv(self):
@@ -6039,7 +6261,8 @@ class TLMigrateInstance(Tasklet):
       this variable is initalized only after CheckPrereq has run
 
   """
       this variable is initalized only after CheckPrereq has run
 
   """
-  def __init__(self, lu, instance_name, cleanup):
+  def __init__(self, lu, instance_name, cleanup,
+               iallocator=None, target_node=None):
     """Initializes this class.
 
     """
     """Initializes this class.
 
     """
@@ -6049,6 +6272,8 @@ class TLMigrateInstance(Tasklet):
     self.instance_name = instance_name
     self.cleanup = cleanup
     self.live = False # will be overridden later
     self.instance_name = instance_name
     self.cleanup = cleanup
     self.live = False # will be overridden later
+    self.iallocator = iallocator
+    self.target_node = target_node
 
   def CheckPrereq(self):
     """Check prerequisites.
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -6059,19 +6284,48 @@ class TLMigrateInstance(Tasklet):
     instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
     instance = self.cfg.GetInstanceInfo(instance_name)
     assert instance is not None
     instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
     instance = self.cfg.GetInstanceInfo(instance_name)
     assert instance is not None
+    self.instance = instance
 
 
-    if instance.disk_template != constants.DT_DRBD8:
-      raise errors.OpPrereqError("Instance's disk layout is not"
-                                 " drbd8, cannot migrate.", errors.ECODE_STATE)
+    if instance.disk_template not in constants.DTS_MIRRORED:
+      raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
+                                 " migrations" % instance.disk_template,
+                                 errors.ECODE_STATE)
+
+    if instance.disk_template in constants.DTS_EXT_MIRROR:
+      _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
+
+      if self.iallocator:
+        self._RunAllocator()
 
 
-    secondary_nodes = instance.secondary_nodes
-    if not secondary_nodes:
-      raise errors.ConfigurationError("No secondary node but using"
-                                      " drbd8 disk template")
+      # self.target_node is already populated, either directly or by the
+      # iallocator run
+      target_node = self.target_node
+
+      if len(self.lu.tasklets) == 1:
+        # It is safe to remove locks only when we're the only tasklet in the LU
+        nodes_keep = [instance.primary_node, self.target_node]
+        nodes_rel = [node for node in self.lu.acquired_locks[locking.LEVEL_NODE]
+                     if node not in nodes_keep]
+        self.lu.context.glm.release(locking.LEVEL_NODE, nodes_rel)
+        self.lu.acquired_locks[locking.LEVEL_NODE] = nodes_keep
+
+    else:
+      secondary_nodes = instance.secondary_nodes
+      if not secondary_nodes:
+        raise errors.ConfigurationError("No secondary node but using"
+                                        " %s disk template" %
+                                        instance.disk_template)
+      target_node = secondary_nodes[0]
+      if self.lu.op.iallocator or (self.lu.op.target_node and
+                                   self.lu.op.target_node != target_node):
+        raise errors.OpPrereqError("Instances with disk template %s cannot"
+                                   " be migrated over to arbitrary nodes"
+                                   " (neither an iallocator nor a target"
+                                   " node can be passed)" %
+                                   instance.disk_template, errors.ECODE_INVAL)
 
     i_be = self.cfg.GetClusterInfo().FillBE(instance)
 
 
     i_be = self.cfg.GetClusterInfo().FillBE(instance)
 
-    target_node = secondary_nodes[0]
     # check memory requirements on the secondary node
     _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
                          instance.name, i_be[constants.BE_MEMORY],
     # check memory requirements on the secondary node
     _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
                          instance.name, i_be[constants.BE_MEMORY],
@@ -6087,7 +6341,35 @@ class TLMigrateInstance(Tasklet):
       result.Raise("Can't migrate, please use failover",
                    prereq=True, ecode=errors.ECODE_STATE)
 
       result.Raise("Can't migrate, please use failover",
                    prereq=True, ecode=errors.ECODE_STATE)
 
-    self.instance = instance
+
+  def _RunAllocator(self):
+    """Run the allocator based on input opcode.
+
+    """
+    ial = IAllocator(self.cfg, self.rpc,
+                     mode=constants.IALLOCATOR_MODE_RELOC,
+                     name=self.instance_name,
+                     # TODO See why hail breaks with a single node below
+                     relocate_from=[self.instance.primary_node,
+                                    self.instance.primary_node],
+                     )
+
+    ial.Run(self.iallocator)
+
+    if not ial.success:
+      raise errors.OpPrereqError("Can't compute nodes using"
+                                 " iallocator '%s': %s" %
+                                 (self.iallocator, ial.info),
+                                 errors.ECODE_NORES)
+    if len(ial.result) != ial.required_nodes:
+      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
+                                 " of nodes (%s), required %s" %
+                                 (self.iallocator, len(ial.result),
+                                  ial.required_nodes), errors.ECODE_FAULT)
+    self.target_node = ial.result[0]
+    self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
+                 self.instance_name, self.iallocator,
+                 utils.CommaJoin(ial.result))
 
     if self.lu.op.live is not None and self.lu.op.mode is not None:
       raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
 
     if self.lu.op.live is not None and self.lu.op.mode is not None:
       raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
@@ -6103,7 +6385,7 @@ class TLMigrateInstance(Tasklet):
       self.lu.op.live = None
     elif self.lu.op.mode is None:
       # read the default value from the hypervisor
       self.lu.op.live = None
     elif self.lu.op.mode is None:
       # read the default value from the hypervisor
-      i_hv = self.cfg.GetClusterInfo().FillHV(instance, skip_globals=False)
+      i_hv = self.cfg.GetClusterInfo().FillHV(self.instance, skip_globals=False)
       self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
 
     self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
       self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
 
     self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
@@ -6223,16 +6505,17 @@ class TLMigrateInstance(Tasklet):
                        " primary node (%s)" % source_node)
       demoted_node = target_node
 
                        " primary node (%s)" % source_node)
       demoted_node = target_node
 
-    self._EnsureSecondary(demoted_node)
-    try:
+    if instance.disk_template in constants.DTS_INT_MIRROR:
+      self._EnsureSecondary(demoted_node)
+      try:
+        self._WaitUntilSync()
+      except errors.OpExecError:
+        # we ignore here errors, since if the device is standalone, it
+        # won't be able to sync
+        pass
+      self._GoStandalone()
+      self._GoReconnect(False)
       self._WaitUntilSync()
       self._WaitUntilSync()
-    except errors.OpExecError:
-      # we ignore here errors, since if the device is standalone, it
-      # won't be able to sync
-      pass
-    self._GoStandalone()
-    self._GoReconnect(False)
-    self._WaitUntilSync()
 
     self.feedback_fn("* done")
 
 
     self.feedback_fn("* done")
 
@@ -6241,6 +6524,9 @@ class TLMigrateInstance(Tasklet):
 
     """
     target_node = self.target_node
 
     """
     target_node = self.target_node
+    if self.instance.disk_template in constants.DTS_EXT_MIRROR:
+      return
+
     try:
       self._EnsureSecondary(target_node)
       self._GoStandalone()
     try:
       self._EnsureSecondary(target_node)
       self._GoStandalone()
@@ -6305,11 +6591,12 @@ class TLMigrateInstance(Tasklet):
 
     self.migration_info = migration_info = result.payload
 
 
     self.migration_info = migration_info = result.payload
 
-    # Then switch the disks to master/master mode
-    self._EnsureSecondary(target_node)
-    self._GoStandalone()
-    self._GoReconnect(True)
-    self._WaitUntilSync()
+    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
+      # Then switch the disks to master/master mode
+      self._EnsureSecondary(target_node)
+      self._GoStandalone()
+      self._GoReconnect(True)
+      self._WaitUntilSync()
 
     self.feedback_fn("* preparing %s to accept the instance" % target_node)
     result = self.rpc.call_accept_instance(target_node,
 
     self.feedback_fn("* preparing %s to accept the instance" % target_node)
     result = self.rpc.call_accept_instance(target_node,
@@ -6358,11 +6645,12 @@ class TLMigrateInstance(Tasklet):
       raise errors.OpExecError("Could not finalize instance migration: %s" %
                                msg)
 
       raise errors.OpExecError("Could not finalize instance migration: %s" %
                                msg)
 
-    self._EnsureSecondary(source_node)
-    self._WaitUntilSync()
-    self._GoStandalone()
-    self._GoReconnect(False)
-    self._WaitUntilSync()
+    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
+      self._EnsureSecondary(source_node)
+      self._WaitUntilSync()
+      self._GoStandalone()
+      self._GoReconnect(False)
+      self._WaitUntilSync()
 
     self.feedback_fn("* done")
 
 
     self.feedback_fn("* done")
 
@@ -6375,7 +6663,13 @@ class TLMigrateInstance(Tasklet):
     self.feedback_fn = feedback_fn
 
     self.source_node = self.instance.primary_node
     self.feedback_fn = feedback_fn
 
     self.source_node = self.instance.primary_node
-    self.target_node = self.instance.secondary_nodes[0]
+
+    # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
+    if self.instance.disk_template in constants.DTS_INT_MIRROR:
+      self.target_node = self.instance.secondary_nodes[0]
+      # Otherwise self.target_node has been populated either
+      # directly, or through an iallocator.
+
     self.all_nodes = [self.source_node, self.target_node]
     self.nodes_ip = {
       self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
     self.all_nodes = [self.source_node, self.target_node]
     self.nodes_ip = {
       self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
@@ -6559,6 +6853,34 @@ def _GenerateDiskTemplate(lu, template_name,
                                                          disk_index)),
                               mode=disk["mode"])
       disks.append(disk_dev)
                                                          disk_index)),
                               mode=disk["mode"])
       disks.append(disk_dev)
+  elif template_name == constants.DT_SHARED_FILE:
+    if len(secondary_nodes) != 0:
+      raise errors.ProgrammerError("Wrong template configuration")
+
+    opcodes.RequireSharedFileStorage()
+
+    for idx, disk in enumerate(disk_info):
+      disk_index = idx + base_index
+      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
+                              iv_name="disk/%d" % disk_index,
+                              logical_id=(file_driver,
+                                          "%s/disk%d" % (file_storage_dir,
+                                                         disk_index)),
+                              mode=disk["mode"])
+      disks.append(disk_dev)
+  elif template_name == constants.DT_BLOCK:
+    if len(secondary_nodes) != 0:
+      raise errors.ProgrammerError("Wrong template configuration")
+
+    for idx, disk in enumerate(disk_info):
+      disk_index = idx + base_index
+      disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV, size=disk["size"],
+                              logical_id=(constants.BLOCKDEV_DRIVER_MANUAL,
+                                          disk["adopt"]),
+                              iv_name="disk/%d" % disk_index,
+                              mode=disk["mode"])
+      disks.append(disk_dev)
+
   else:
     raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
   return disks
   else:
     raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
   return disks
@@ -6595,6 +6917,10 @@ def _WipeDisks(lu, instance):
 
   """
   node = instance.primary_node
 
   """
   node = instance.primary_node
+
+  for device in instance.disks:
+    lu.cfg.SetDiskID(device, node)
+
   logging.info("Pause sync of instance %s disks", instance.name)
   result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
 
   logging.info("Pause sync of instance %s disks", instance.name)
   result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
 
@@ -6606,7 +6932,8 @@ def _WipeDisks(lu, instance):
   try:
     for idx, device in enumerate(instance.disks):
       lu.LogInfo("* Wiping disk %d", idx)
   try:
     for idx, device in enumerate(instance.disks):
       lu.LogInfo("* Wiping disk %d", idx)
-      logging.info("Wiping disk %d for instance %s", idx, instance.name)
+      logging.info("Wiping disk %d for instance %s, node %s",
+                   idx, instance.name, node)
 
       # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
       # MAX_WIPE_CHUNK at max
 
       # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
       # MAX_WIPE_CHUNK at max
@@ -6668,7 +6995,7 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None):
     pnode = target_node
     all_nodes = [pnode]
 
     pnode = target_node
     all_nodes = [pnode]
 
-  if instance.disk_template == constants.DT_FILE:
+  if instance.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
     file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
     result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
 
     file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
     result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
 
@@ -6758,6 +7085,7 @@ def _ComputeDiskSizePerVG(disk_template, disks):
     # 128 MB are added for drbd metadata for each disk
     constants.DT_DRBD8: _compute(disks, 128),
     constants.DT_FILE: {},
     # 128 MB are added for drbd metadata for each disk
     constants.DT_DRBD8: _compute(disks, 128),
     constants.DT_FILE: {},
+    constants.DT_SHARED_FILE: {},
   }
 
   if disk_template not in req_size_dict:
   }
 
   if disk_template not in req_size_dict:
@@ -6778,6 +7106,8 @@ def _ComputeDiskSize(disk_template, disks):
     # 128 MB are added for drbd metadata for each disk
     constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
     constants.DT_FILE: None,
     # 128 MB are added for drbd metadata for each disk
     constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
     constants.DT_FILE: None,
+    constants.DT_SHARED_FILE: 0,
+    constants.DT_BLOCK: 0,
   }
 
   if disk_template not in req_size_dict:
   }
 
   if disk_template not in req_size_dict:
@@ -6787,6 +7117,21 @@ def _ComputeDiskSize(disk_template, disks):
   return req_size_dict[disk_template]
 
 
   return req_size_dict[disk_template]
 
 
+def _FilterVmNodes(lu, nodenames):
+  """Filters out non-vm_capable nodes from a list.
+
+  @type lu: L{LogicalUnit}
+  @param lu: the logical unit for which we check
+  @type nodenames: list
+  @param nodenames: the list of nodes on which we should check
+  @rtype: list
+  @return: the list of vm-capable nodes
+
+  """
+  vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
+  return [name for name in nodenames if name not in vm_nodes]
+
+
 def _CheckHVParams(lu, nodenames, hvname, hvparams):
   """Hypervisor parameter validation.
 
 def _CheckHVParams(lu, nodenames, hvname, hvparams):
   """Hypervisor parameter validation.
 
@@ -6804,6 +7149,7 @@ def _CheckHVParams(lu, nodenames, hvname, hvparams):
   @raise errors.OpPrereqError: if the parameters are not valid
 
   """
   @raise errors.OpPrereqError: if the parameters are not valid
 
   """
+  nodenames = _FilterVmNodes(lu, nodenames)
   hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
                                                   hvname,
                                                   hvparams)
   hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
                                                   hvname,
                                                   hvparams)
@@ -6831,6 +7177,7 @@ def _CheckOSParams(lu, required, nodenames, osname, osparams):
   @raise errors.OpPrereqError: if the parameters are not valid
 
   """
   @raise errors.OpPrereqError: if the parameters are not valid
 
   """
+  nodenames = _FilterVmNodes(lu, nodenames)
   result = lu.rpc.call_os_validate(required, nodenames, osname,
                                    [constants.OS_VALIDATE_PARAMETERS],
                                    osparams)
   result = lu.rpc.call_os_validate(required, nodenames, osname,
                                    [constants.OS_VALIDATE_PARAMETERS],
                                    osparams)
@@ -6896,6 +7243,12 @@ class LUInstanceCreate(LogicalUnit):
       if self.op.mode == constants.INSTANCE_IMPORT:
         raise errors.OpPrereqError("Disk adoption not allowed for"
                                    " instance import", errors.ECODE_INVAL)
       if self.op.mode == constants.INSTANCE_IMPORT:
         raise errors.OpPrereqError("Disk adoption not allowed for"
                                    " instance import", errors.ECODE_INVAL)
+    else:
+      if self.op.disk_template in constants.DTS_MUST_ADOPT:
+        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
+                                   " but no 'adopt' parameter given" %
+                                   self.op.disk_template,
+                                   errors.ECODE_INVAL)
 
     self.adopt_disks = has_adopt
 
 
     self.adopt_disks = has_adopt
 
@@ -6922,7 +7275,7 @@ class LUInstanceCreate(LogicalUnit):
     _CheckIAllocatorOrNode(self, "iallocator", "pnode")
 
     if self.op.pnode is not None:
     _CheckIAllocatorOrNode(self, "iallocator", "pnode")
 
     if self.op.pnode is not None:
-      if self.op.disk_template in constants.DTS_NET_MIRROR:
+      if self.op.disk_template in constants.DTS_INT_MIRROR:
         if self.op.snode is None:
           raise errors.OpPrereqError("The networked disk templates need"
                                      " a mirror node", errors.ECODE_INVAL)
         if self.op.snode is None:
           raise errors.OpPrereqError("The networked disk templates need"
                                      " a mirror node", errors.ECODE_INVAL)
@@ -7357,18 +7710,8 @@ class LUInstanceCreate(LogicalUnit):
                                      " in cluster" % mac,
                                      errors.ECODE_NOTUNIQUE)
 
                                      " in cluster" % mac,
                                      errors.ECODE_NOTUNIQUE)
 
-      # bridge verification
-      bridge = nic.get("bridge", None)
-      link = nic.get("link", None)
-      if bridge and link:
-        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
-                                   " at the same time", errors.ECODE_INVAL)
-      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
-        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
-                                   errors.ECODE_INVAL)
-      elif bridge:
-        link = bridge
-
+      #  Build nic parameters
+      link = nic.get(constants.INIC_LINK, None)
       nicparams = {}
       if nic_mode_req:
         nicparams[constants.NIC_MODE] = nic_mode_req
       nicparams = {}
       if nic_mode_req:
         nicparams[constants.NIC_MODE] = nic_mode_req
@@ -7482,7 +7825,7 @@ class LUInstanceCreate(LogicalUnit):
     self.secondaries = []
 
     # mirror node verification
     self.secondaries = []
 
     # mirror node verification
-    if self.op.disk_template in constants.DTS_NET_MIRROR:
+    if self.op.disk_template in constants.DTS_INT_MIRROR:
       if self.op.snode == pnode.name:
         raise errors.OpPrereqError("The secondary node cannot be the"
                                    " primary node.", errors.ECODE_INVAL)
       if self.op.snode == pnode.name:
         raise errors.OpPrereqError("The secondary node cannot be the"
                                    " primary node.", errors.ECODE_INVAL)
@@ -7498,7 +7841,7 @@ class LUInstanceCreate(LogicalUnit):
       req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
       _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
 
       req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
       _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
 
-    else: # instead, we must check the adoption data
+    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
       all_lvs = set([i["vg"] + "/" + i["adopt"] for i in self.disks])
       if len(all_lvs) != len(self.disks):
         raise errors.OpPrereqError("Duplicate volume names given for adoption",
       all_lvs = set([i["vg"] + "/" + i["adopt"] for i in self.disks])
       if len(all_lvs) != len(self.disks):
         raise errors.OpPrereqError("Duplicate volume names given for adoption",
@@ -7534,6 +7877,34 @@ class LUInstanceCreate(LogicalUnit):
       for dsk in self.disks:
         dsk["size"] = int(float(node_lvs[dsk["vg"] + "/" + dsk["adopt"]][0]))
 
       for dsk in self.disks:
         dsk["size"] = int(float(node_lvs[dsk["vg"] + "/" + dsk["adopt"]][0]))
 
+    elif self.op.disk_template == constants.DT_BLOCK:
+      # Normalize and de-duplicate device paths
+      all_disks = set([os.path.abspath(i["adopt"]) for i in self.disks])
+      if len(all_disks) != len(self.disks):
+        raise errors.OpPrereqError("Duplicate disk names given for adoption",
+                                   errors.ECODE_INVAL)
+      baddisks = [d for d in all_disks
+                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
+      if baddisks:
+        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
+                                   " cannot be adopted" %
+                                   (", ".join(baddisks),
+                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
+                                   errors.ECODE_INVAL)
+
+      node_disks = self.rpc.call_bdev_sizes([pnode.name],
+                                            list(all_disks))[pnode.name]
+      node_disks.Raise("Cannot get block device information from node %s" %
+                       pnode.name)
+      node_disks = node_disks.payload
+      delta = all_disks.difference(node_disks.keys())
+      if delta:
+        raise errors.OpPrereqError("Missing block device(s): %s" %
+                                   utils.CommaJoin(delta),
+                                   errors.ECODE_INVAL)
+      for dsk in self.disks:
+        dsk["size"] = int(float(node_disks[dsk["adopt"]]))
+
     _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
 
     _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
     _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
 
     _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
@@ -7564,7 +7935,7 @@ class LUInstanceCreate(LogicalUnit):
     else:
       network_port = None
 
     else:
       network_port = None
 
-    if constants.ENABLE_FILE_STORAGE:
+    if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
       # this is needed because os.path.join does not accept None arguments
       if self.op.file_storage_dir is None:
         string_file_storage_dir = ""
       # this is needed because os.path.join does not accept None arguments
       if self.op.file_storage_dir is None:
         string_file_storage_dir = ""
@@ -7572,7 +7943,12 @@ class LUInstanceCreate(LogicalUnit):
         string_file_storage_dir = self.op.file_storage_dir
 
       # build the full file storage dir path
         string_file_storage_dir = self.op.file_storage_dir
 
       # build the full file storage dir path
-      file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
+      if self.op.disk_template == constants.DT_SHARED_FILE:
+        get_fsd_fn = self.cfg.GetSharedFileStorageDir
+      else:
+        get_fsd_fn = self.cfg.GetFileStorageDir
+
+      file_storage_dir = utils.PathJoin(get_fsd_fn(),
                                         string_file_storage_dir, instance)
     else:
       file_storage_dir = ""
                                         string_file_storage_dir, instance)
     else:
       file_storage_dir = ""
@@ -7600,17 +7976,18 @@ class LUInstanceCreate(LogicalUnit):
                             )
 
     if self.adopt_disks:
                             )
 
     if self.adopt_disks:
-      # rename LVs to the newly-generated names; we need to construct
-      # 'fake' LV disks with the old data, plus the new unique_id
-      tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
-      rename_to = []
-      for t_dsk, a_dsk in zip (tmp_disks, self.disks):
-        rename_to.append(t_dsk.logical_id)
-        t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
-        self.cfg.SetDiskID(t_dsk, pnode_name)
-      result = self.rpc.call_blockdev_rename(pnode_name,
-                                             zip(tmp_disks, rename_to))
-      result.Raise("Failed to rename adoped LVs")
+      if self.op.disk_template == constants.DT_PLAIN:
+        # rename LVs to the newly-generated names; we need to construct
+        # 'fake' LV disks with the old data, plus the new unique_id
+        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
+        rename_to = []
+        for t_dsk, a_dsk in zip (tmp_disks, self.disks):
+          rename_to.append(t_dsk.logical_id)
+          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
+          self.cfg.SetDiskID(t_dsk, pnode_name)
+        result = self.rpc.call_blockdev_rename(pnode_name,
+                                               zip(tmp_disks, rename_to))
+        result.Raise("Failed to rename adoped LVs")
     else:
       feedback_fn("* creating instance disks...")
       try:
     else:
       feedback_fn("* creating instance disks...")
       try:
@@ -7655,7 +8032,7 @@ class LUInstanceCreate(LogicalUnit):
 
     if self.op.wait_for_sync:
       disk_abort = not _WaitForSync(self, iobj)
 
     if self.op.wait_for_sync:
       disk_abort = not _WaitForSync(self, iobj)
-    elif iobj.disk_template in constants.DTS_NET_MIRROR:
+    elif iobj.disk_template in constants.DTS_INT_MIRROR:
       # make sure the disks are not degraded (still sync-ing is ok)
       time.sleep(15)
       feedback_fn("* checking mirrors status")
       # make sure the disks are not degraded (still sync-ing is ok)
       time.sleep(15)
       feedback_fn("* checking mirrors status")
@@ -7790,26 +8167,36 @@ class LUInstanceConsole(NoHooksLU):
 
     if instance.name not in node_insts.payload:
       if instance.admin_up:
 
     if instance.name not in node_insts.payload:
       if instance.admin_up:
-        state = "ERROR_down"
+        state = constants.INSTST_ERRORDOWN
       else:
       else:
-        state = "ADMIN_down"
+        state = constants.INSTST_ADMINDOWN
       raise errors.OpExecError("Instance %s is not running (state %s)" %
                                (instance.name, state))
 
     logging.debug("Connecting to console of %s on %s", instance.name, node)
 
       raise errors.OpExecError("Instance %s is not running (state %s)" %
                                (instance.name, state))
 
     logging.debug("Connecting to console of %s on %s", instance.name, node)
 
-    hyper = hypervisor.GetHypervisor(instance.hypervisor)
-    cluster = self.cfg.GetClusterInfo()
-    # beparams and hvparams are passed separately, to avoid editing the
-    # instance and then saving the defaults in the instance itself.
-    hvparams = cluster.FillHV(instance)
-    beparams = cluster.FillBE(instance)
-    console = hyper.GetInstanceConsole(instance, hvparams, beparams)
+    return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
 
 
-    assert console.instance == instance.name
-    assert console.Validate()
 
 
-    return console.ToDict()
+def _GetInstanceConsole(cluster, instance):
+  """Returns console information for an instance.
+
+  @type cluster: L{objects.Cluster}
+  @type instance: L{objects.Instance}
+  @rtype: dict
+
+  """
+  hyper = hypervisor.GetHypervisor(instance.hypervisor)
+  # beparams and hvparams are passed separately, to avoid editing the
+  # instance and then saving the defaults in the instance itself.
+  hvparams = cluster.FillHV(instance)
+  beparams = cluster.FillBE(instance)
+  console = hyper.GetInstanceConsole(instance, hvparams, beparams)
+
+  assert console.instance == instance.name
+  assert console.Validate()
+
+  return console.ToDict()
 
 
 class LUInstanceReplaceDisks(LogicalUnit):
 
 
 class LUInstanceReplaceDisks(LogicalUnit):
@@ -7967,6 +8354,30 @@ class TLReplaceDisks(Tasklet):
     return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
                                     node_name, True)
 
     return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
                                     node_name, True)
 
+  def _CheckDisksActivated(self, instance):
+    """Checks if the instance disks are activated.
+
+    @param instance: The instance to check disks
+    @return: True if they are activated, False otherwise
+
+    """
+    nodes = instance.all_nodes
+
+    for idx, dev in enumerate(instance.disks):
+      for node in nodes:
+        self.lu.LogInfo("Checking disk/%d on %s", idx, node)
+        self.cfg.SetDiskID(dev, node)
+
+        result = self.rpc.call_blockdev_find(node, dev)
+
+        if result.offline:
+          continue
+        elif result.fail_msg or not result.payload:
+          return False
+
+    return True
+
+
   def CheckPrereq(self):
     """Check prerequisites.
 
   def CheckPrereq(self):
     """Check prerequisites.
 
@@ -8030,6 +8441,10 @@ class TLReplaceDisks(Tasklet):
                                  errors.ECODE_INVAL)
 
     if self.mode == constants.REPLACE_DISK_AUTO:
                                  errors.ECODE_INVAL)
 
     if self.mode == constants.REPLACE_DISK_AUTO:
+      if not self._CheckDisksActivated(instance):
+        raise errors.OpPrereqError("Please run activate-disks on instance %s"
+                                   " first" % self.instance_name,
+                                   errors.ECODE_STATE)
       faulty_primary = self._FindFaultyDisks(instance.primary_node)
       faulty_secondary = self._FindFaultyDisks(secondary_node)
 
       faulty_primary = self._FindFaultyDisks(instance.primary_node)
       faulty_secondary = self._FindFaultyDisks(secondary_node)
 
@@ -8708,9 +9123,10 @@ class LUInstanceGrowDisk(LogicalUnit):
 
     self.disk = instance.FindDisk(self.op.disk)
 
 
     self.disk = instance.FindDisk(self.op.disk)
 
-    if instance.disk_template != constants.DT_FILE:
-      # TODO: check the free disk space for file, when that feature
-      # will be supported
+    if instance.disk_template not in (constants.DT_FILE,
+                                      constants.DT_SHARED_FILE):
+      # TODO: check the free disk space for file, when that feature will be
+      # supported
       _CheckNodesFreeDiskPerVG(self, nodenames,
                                self.disk.ComputeGrowth(self.op.amount))
 
       _CheckNodesFreeDiskPerVG(self, nodenames,
                                self.disk.ComputeGrowth(self.op.amount))
 
@@ -8969,7 +9385,7 @@ class LUInstanceSetParams(LogicalUnit):
                                  errors.ECODE_INVAL)
 
     if (self.op.disk_template and
                                  errors.ECODE_INVAL)
 
     if (self.op.disk_template and
-        self.op.disk_template in constants.DTS_NET_MIRROR and
+        self.op.disk_template in constants.DTS_INT_MIRROR and
         self.op.remote_node is None):
       raise errors.OpPrereqError("Changing the disk template to a mirrored"
                                  " one requires specifying a secondary node",
         self.op.remote_node is None):
       raise errors.OpPrereqError("Changing the disk template to a mirrored"
                                  " one requires specifying a secondary node",
@@ -9129,7 +9545,7 @@ class LUInstanceSetParams(LogicalUnit):
                                                   self.op.disk_template),
                                    errors.ECODE_INVAL)
       _CheckInstanceDown(self, instance, "cannot change disk template")
                                                   self.op.disk_template),
                                    errors.ECODE_INVAL)
       _CheckInstanceDown(self, instance, "cannot change disk template")
-      if self.op.disk_template in constants.DTS_NET_MIRROR:
+      if self.op.disk_template in constants.DTS_INT_MIRROR:
         if self.op.remote_node == pnode:
           raise errors.OpPrereqError("Given new secondary node %s is the same"
                                      " as the primary node of the instance" %
         if self.op.remote_node == pnode:
           raise errors.OpPrereqError("Given new secondary node %s is the same"
                                      " as the primary node of the instance" %
@@ -9451,7 +9867,8 @@ class LUInstanceSetParams(LogicalUnit):
         result.append(("disk/%d" % device_idx, "remove"))
       elif disk_op == constants.DDM_ADD:
         # add a new disk
         result.append(("disk/%d" % device_idx, "remove"))
       elif disk_op == constants.DDM_ADD:
         # add a new disk
-        if instance.disk_template == constants.DT_FILE:
+        if instance.disk_template in (constants.DT_FILE,
+                                        constants.DT_SHARED_FILE):
           file_driver, file_path = instance.disks[0].logical_id
           file_path = os.path.dirname(file_path)
         else:
           file_driver, file_path = instance.disks[0].logical_id
           file_path = os.path.dirname(file_path)
         else:
@@ -10117,7 +10534,7 @@ class LUGroupAssignNodes(NoHooksLU):
     In particular, it returns information about newly split instances, and
     instances that were already split, and remain so after the change.
 
     In particular, it returns information about newly split instances, and
     instances that were already split, and remain so after the change.
 
-    Only instances whose disk template is listed in constants.DTS_NET_MIRROR are
+    Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
     considered.
 
     @type changes: list of (node_name, new_group_uuid) pairs.
     considered.
 
     @type changes: list of (node_name, new_group_uuid) pairs.
@@ -10140,7 +10557,7 @@ class LUGroupAssignNodes(NoHooksLU):
       return [instance.primary_node] + list(instance.secondary_nodes)
 
     for inst in instance_data.values():
       return [instance.primary_node] + list(instance.secondary_nodes)
 
     for inst in instance_data.values():
-      if inst.disk_template not in constants.DTS_NET_MIRROR:
+      if inst.disk_template not in constants.DTS_INT_MIRROR:
         continue
 
       instance_nodes = InstanceNodes(inst)
         continue
 
       instance_nodes = InstanceNodes(inst)
@@ -10157,7 +10574,6 @@ class LUGroupAssignNodes(NoHooksLU):
 
 
 class _GroupQuery(_QueryBase):
 
 
 class _GroupQuery(_QueryBase):
-
   FIELDS = query.GROUP_FIELDS
 
   def ExpandNames(self, lu):
   FIELDS = query.GROUP_FIELDS
 
   def ExpandNames(self, lu):
@@ -10240,7 +10656,8 @@ class LUGroupQuery(NoHooksLU):
   REQ_BGL = False
 
   def CheckArguments(self):
   REQ_BGL = False
 
   def CheckArguments(self):
-    self.gq = _GroupQuery(self.op.names, self.op.output_fields, False)
+    self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
+                          self.op.output_fields, False)
 
   def ExpandNames(self):
     self.gq.ExpandNames(self)
 
   def ExpandNames(self):
     self.gq.ExpandNames(self)
@@ -10353,9 +10770,9 @@ class LUGroupRemove(LogicalUnit):
 
     # Verify the cluster would not be left group-less.
     if len(self.cfg.GetNodeGroupList()) == 1:
 
     # Verify the cluster would not be left group-less.
     if len(self.cfg.GetNodeGroupList()) == 1:
-      raise errors.OpPrereqError("Group '%s' is the last group in the cluster,"
-                                 " which cannot be left without at least one"
-                                 " group" % self.op.group_name,
+      raise errors.OpPrereqError("Group '%s' is the only group,"
+                                 " cannot be removed" %
+                                 self.op.group_name,
                                  errors.ECODE_STATE)
 
   def BuildHooksEnv(self):
                                  errors.ECODE_STATE)
 
   def BuildHooksEnv(self):
@@ -10388,7 +10805,7 @@ class LUGroupRename(LogicalUnit):
 
   def ExpandNames(self):
     # This raises errors.OpPrereqError on its own:
 
   def ExpandNames(self):
     # This raises errors.OpPrereqError on its own:
-    self.group_uuid = self.cfg.LookupNodeGroup(self.op.old_name)
+    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
 
     self.needed_locks = {
       locking.LEVEL_NODEGROUP: [self.group_uuid],
 
     self.needed_locks = {
       locking.LEVEL_NODEGROUP: [self.group_uuid],
@@ -10397,8 +10814,7 @@ class LUGroupRename(LogicalUnit):
   def CheckPrereq(self):
     """Check prerequisites.
 
   def CheckPrereq(self):
     """Check prerequisites.
 
-    This checks that the given old_name exists as a node group, and that
-    new_name doesn't.
+    Ensures requested new name is not yet used.
 
     """
     try:
 
     """
     try:
@@ -10416,7 +10832,7 @@ class LUGroupRename(LogicalUnit):
 
     """
     env = {
 
     """
     env = {
-      "OLD_NAME": self.op.old_name,
+      "OLD_NAME": self.op.group_name,
       "NEW_NAME": self.op.new_name,
       }
 
       "NEW_NAME": self.op.new_name,
       }
 
@@ -10439,7 +10855,7 @@ class LUGroupRename(LogicalUnit):
 
     if group is None:
       raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
 
     if group is None:
       raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
-                               (self.op.old_name, self.group_uuid))
+                               (self.op.group_name, self.group_uuid))
 
     group.name = self.op.new_name
     self.cfg.Update(group, feedback_fn)
 
     group.name = self.op.new_name
     self.cfg.Update(group, feedback_fn)
@@ -10996,8 +11412,7 @@ class IAllocator(object):
           "i_pri_up_memory": i_p_up_mem,
           }
         pnr_dyn.update(node_results[nname])
           "i_pri_up_memory": i_p_up_mem,
           }
         pnr_dyn.update(node_results[nname])
-
-      node_results[nname] = pnr_dyn
+        node_results[nname] = pnr_dyn
 
     return node_results
 
 
     return node_results
 
@@ -11049,7 +11464,7 @@ class IAllocator(object):
     """
     disk_space = _ComputeDiskSize(self.disk_template, self.disks)
 
     """
     disk_space = _ComputeDiskSize(self.disk_template, self.disks)
 
-    if self.disk_template in constants.DTS_NET_MIRROR:
+    if self.disk_template in constants.DTS_INT_MIRROR:
       self.required_nodes = 2
     else:
       self.required_nodes = 1
       self.required_nodes = 2
     else:
       self.required_nodes = 1
@@ -11082,11 +11497,12 @@ class IAllocator(object):
       raise errors.ProgrammerError("Unknown instance '%s' passed to"
                                    " IAllocator" % self.name)
 
       raise errors.ProgrammerError("Unknown instance '%s' passed to"
                                    " IAllocator" % self.name)
 
-    if instance.disk_template not in constants.DTS_NET_MIRROR:
+    if instance.disk_template not in constants.DTS_MIRRORED:
       raise errors.OpPrereqError("Can't relocate non-mirrored instances",
                                  errors.ECODE_INVAL)
 
       raise errors.OpPrereqError("Can't relocate non-mirrored instances",
                                  errors.ECODE_INVAL)
 
-    if len(instance.secondary_nodes) != 1:
+    if instance.disk_template in constants.DTS_INT_MIRROR and \
+        len(instance.secondary_nodes) != 1:
       raise errors.OpPrereqError("Instance has not exactly one secondary node",
                                  errors.ECODE_STATE)
 
       raise errors.OpPrereqError("Instance has not exactly one secondary node",
                                  errors.ECODE_STATE)
 
@@ -11271,13 +11687,16 @@ _QUERY_IMPL = {
   constants.QR_INSTANCE: _InstanceQuery,
   constants.QR_NODE: _NodeQuery,
   constants.QR_GROUP: _GroupQuery,
   constants.QR_INSTANCE: _InstanceQuery,
   constants.QR_NODE: _NodeQuery,
   constants.QR_GROUP: _GroupQuery,
+  constants.QR_OS: _OsQuery,
   }
 
   }
 
+assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP
+
 
 def _GetQueryImplementation(name):
   """Returns the implemtnation for a query type.
 
 
 def _GetQueryImplementation(name):
   """Returns the implemtnation for a query type.
 
-  @param name: Query type, must be one of L{constants.QR_OP_QUERY}
+  @param name: Query type, must be one of L{constants.QR_VIA_OP}
 
   """
   try:
 
   """
   try: