Honor disks_active of instance when adding disks
[ganeti-local] / lib / cmdlib / instance.py
index 13a9821..10fccc8 100644 (file)
@@ -36,7 +36,6 @@ from ganeti.masterd import iallocator
 from ganeti import masterd
 from ganeti import netutils
 from ganeti import objects
-from ganeti import opcodes
 from ganeti import pathutils
 from ganeti import rpc
 from ganeti import utils
@@ -48,8 +47,9 @@ from ganeti.cmdlib.common import INSTANCE_DOWN, \
   ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
   LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
   IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
-  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
-  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName
+  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
+  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
+  CheckDiskTemplateEnabled
 from ganeti.cmdlib.instance_storage import CreateDisks, \
   CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
   IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
@@ -173,6 +173,7 @@ def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
     net = nic.get(constants.INIC_NETWORK, None)
     link = nic.get(constants.NIC_LINK, None)
     ip = nic.get(constants.INIC_IP, None)
+    vlan = nic.get(constants.INIC_VLAN, None)
 
     if net is None or net.lower() == constants.VALUE_NONE:
       net = None
@@ -182,6 +183,10 @@ def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
                                    " is allowed to be passed",
                                    errors.ECODE_INVAL)
 
+    if vlan is not None and nic_mode != constants.NIC_MODE_OVS:
+      raise errors.OpPrereqError("VLAN is given, but network mode is not"
+                                 " openvswitch", errors.ECODE_INVAL)
+
     # ip validity checks
     if ip is None or ip.lower() == constants.VALUE_NONE:
       nic_ip = None
@@ -230,6 +235,8 @@ def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
       nicparams[constants.NIC_MODE] = nic_mode
     if link:
       nicparams[constants.NIC_LINK] = link
+    if vlan:
+      nicparams[constants.NIC_VLAN] = vlan
 
     check_params = cluster.SimpleFillNIC(nicparams)
     objects.NIC.CheckParameterSyntax(check_params)
@@ -322,40 +329,28 @@ class LUInstanceCreate(LogicalUnit):
   HTYPE = constants.HTYPE_INSTANCE
   REQ_BGL = False
 
-  def CheckArguments(self):
-    """Check arguments.
+  def _CheckDiskTemplateValid(self):
+    """Checks validity of disk template.
 
     """
-    # do not require name_check to ease forward/backward compatibility
-    # for tools
-    if self.op.no_install and self.op.start:
-      self.LogInfo("No-installation mode selected, disabling startup")
-      self.op.start = False
-    # validate/normalize the instance name
-    self.op.instance_name = \
-      netutils.Hostname.GetNormalizedName(self.op.instance_name)
-
-    if self.op.ip_check and not self.op.name_check:
-      # TODO: make the ip check more flexible and not depend on the name check
-      raise errors.OpPrereqError("Cannot do IP address check without a name"
-                                 " check", errors.ECODE_INVAL)
+    cluster = self.cfg.GetClusterInfo()
+    if self.op.disk_template is None:
+      # FIXME: It would be better to take the default disk template from the
+      # ipolicy, but for the ipolicy we need the primary node, which we get from
+      # the iallocator, which wants the disk template as input. To solve this
+      # chicken-and-egg problem, it should be possible to specify just a node
+      # group from the iallocator and take the ipolicy from that.
+      self.op.disk_template = cluster.enabled_disk_templates[0]
+    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
 
-    # check nics' parameter names
-    for nic in self.op.nics:
-      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
-    # check that NIC's parameters names are unique and valid
-    utils.ValidateDeviceNames("NIC", self.op.nics)
+  def _CheckDiskArguments(self):
+    """Checks validity of disk-related arguments.
 
+    """
     # check that disk's names are unique and valid
     utils.ValidateDeviceNames("disk", self.op.disks)
 
-    cluster = self.cfg.GetClusterInfo()
-    if not self.op.disk_template in cluster.enabled_disk_templates:
-      raise errors.OpPrereqError("Cannot create an instance with disk template"
-                                 " '%s', because it is not enabled in the"
-                                 " cluster. Enabled disk templates are: %s." %
-                                 (self.op.disk_template,
-                                  ",".join(cluster.enabled_disk_templates)))
+    self._CheckDiskTemplateValid()
 
     # check disks. parameter names and consistent adopt/no-adopt strategy
     has_adopt = has_no_adopt = False
@@ -390,12 +385,73 @@ class LUInstanceCreate(LogicalUnit):
 
     self.adopt_disks = has_adopt
 
+  def _CheckVLANArguments(self):
+    """ Check validity of VLANs if given
+
+    """
+    for nic in self.op.nics:
+      vlan = nic.get(constants.INIC_VLAN, None)
+      if vlan:
+        if vlan[0] == ".":
+          # vlan starting with dot means single untagged vlan,
+          # might be followed by trunk (:)
+          if not vlan[1:].isdigit():
+            vlanlist = vlan[1:].split(':')
+            for vl in vlanlist:
+              if not vl.isdigit():
+                raise errors.OpPrereqError("Specified VLAN parameter is "
+                                           "invalid : %s" % vlan,
+                                             errors.ECODE_INVAL)
+        elif vlan[0] == ":":
+          # Trunk - tagged only
+          vlanlist = vlan[1:].split(':')
+          for vl in vlanlist:
+            if not vl.isdigit():
+              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
+                                           " : %s" % vlan, errors.ECODE_INVAL)
+        elif vlan.isdigit():
+          # This is the simplest case. No dots, only single digit
+          # -> Create untagged access port, dot needs to be added
+          nic[constants.INIC_VLAN] = "." + vlan
+        else:
+          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
+                                       " : %s" % vlan, errors.ECODE_INVAL)
+
+  def CheckArguments(self):
+    """Check arguments.
+
+    """
+    # do not require name_check to ease forward/backward compatibility
+    # for tools
+    if self.op.no_install and self.op.start:
+      self.LogInfo("No-installation mode selected, disabling startup")
+      self.op.start = False
+    # validate/normalize the instance name
+    self.op.instance_name = \
+      netutils.Hostname.GetNormalizedName(self.op.instance_name)
+
+    if self.op.ip_check and not self.op.name_check:
+      # TODO: make the ip check more flexible and not depend on the name check
+      raise errors.OpPrereqError("Cannot do IP address check without a name"
+                                 " check", errors.ECODE_INVAL)
+
+    # check nics' parameter names
+    for nic in self.op.nics:
+      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
+    # check that NIC's parameters names are unique and valid
+    utils.ValidateDeviceNames("NIC", self.op.nics)
+
+    self._CheckVLANArguments()
+
+    self._CheckDiskArguments()
+    assert self.op.disk_template is not None
+
     # instance name verification
     if self.op.name_check:
-      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
-      self.op.instance_name = self.hostname1.name
+      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
+      self.op.instance_name = self.hostname.name
       # used in CheckPrereq for ip ping check
-      self.check_ip = self.hostname1.ip
+      self.check_ip = self.hostname.ip
     else:
       self.check_ip = None
 
@@ -405,10 +461,11 @@ class LUInstanceCreate(LogicalUnit):
       raise errors.OpPrereqError("Invalid file driver name '%s'" %
                                  self.op.file_driver, errors.ECODE_INVAL)
 
-    if self.op.disk_template == constants.DT_FILE:
-      opcodes.RequireFileStorage()
-    elif self.op.disk_template == constants.DT_SHARED_FILE:
-      opcodes.RequireSharedFileStorage()
+    # set default file_driver if unset and required
+    if (not self.op.file_driver and
+        self.op.disk_template in [constants.DT_FILE,
+                                  constants.DT_SHARED_FILE]):
+      self.op.file_driver = constants.FD_LOOP
 
     ### Node/iallocator related checks
     CheckIAllocatorOrNode(self, "iallocator", "pnode")
@@ -425,8 +482,6 @@ class LUInstanceCreate(LogicalUnit):
 
     _CheckOpportunisticLocking(self.op)
 
-    self._cds = GetClusterDomainSecret()
-
     if self.op.mode == constants.INSTANCE_IMPORT:
       # On import force_variant must be True, because if we forced it at
       # initial install, our only chance when importing it back is that it
@@ -444,11 +499,9 @@ class LUInstanceCreate(LogicalUnit):
         raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
                                    " installation" % self.op.os_type,
                                    errors.ECODE_STATE)
-      if self.op.disk_template is None:
-        raise errors.OpPrereqError("No disk template specified",
-                                   errors.ECODE_INVAL)
-
     elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
+      self._cds = GetClusterDomainSecret()
+
       # Check handshake to ensure both clusters have the same domain secret
       src_handshake = self.op.source_handshake
       if not src_handshake:
@@ -503,7 +556,8 @@ class LUInstanceCreate(LogicalUnit):
 
     # this is just a preventive check, but someone might still add this
     # instance in the meantime, and creation will fail at lock-add time
-    if self.op.instance_name in self.cfg.GetInstanceList():
+    if self.op.instance_name in\
+      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
       raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
                                  self.op.instance_name, errors.ECODE_EXISTS)
 
@@ -551,7 +605,7 @@ class LUInstanceCreate(LogicalUnit):
         if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
           self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
         if not os.path.isabs(src_path):
-          self.op.src_path = src_path = \
+          self.op.src_path = \
             utils.PathJoin(pathutils.EXPORT_DIR, src_path)
 
     self.needed_locks[locking.LEVEL_NODE_RES] = \
@@ -563,15 +617,14 @@ class LUInstanceCreate(LogicalUnit):
     """
     if self.op.opportunistic_locking:
       # Only consider nodes for which a lock is held
-      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
+      node_name_whitelist = self.cfg.GetNodeNames(
+        self.owned_locks(locking.LEVEL_NODE))
     else:
-      node_whitelist = None
+      node_name_whitelist = None
 
-    #TODO Export network to iallocator so that it chooses a pnode
-    #     in a nodegroup that has the desired network connected to
     req = _CreateInstanceAllocRequest(self.op, self.disks,
                                       self.nics, self.be_full,
-                                      self.cfg.GetNodeNames(node_whitelist))
+                                      node_name_whitelist)
     ial = iallocator.IAllocator(self.cfg, self.rpc, req)
 
     ial.Run(self.op.iallocator)
@@ -625,8 +678,9 @@ class LUInstanceCreate(LogicalUnit):
       vcpus=self.be_full[constants.BE_VCPUS],
       nics=NICListToTuple(self, self.nics),
       disk_template=self.op.disk_template,
-      disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
-              d[constants.IDISK_MODE]) for d in self.disks],
+      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
+              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
+             for d in self.disks],
       bep=self.be_full,
       hvp=self.hv_full,
       hypervisor_name=self.op.hypervisor,
@@ -657,13 +711,13 @@ class LUInstanceCreate(LogicalUnit):
       locked_nodes = self.owned_locks(locking.LEVEL_NODE)
       exp_list = self.rpc.call_export_list(locked_nodes)
       found = False
-      for node in exp_list:
-        if exp_list[node].fail_msg:
+      for node_uuid in exp_list:
+        if exp_list[node_uuid].fail_msg:
           continue
-        if self.op.src_path in exp_list[node].payload:
+        if self.op.src_path in exp_list[node_uuid].payload:
           found = True
-          self.op.src_node = node
-          self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
+          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
+          self.op.src_node_uuid = node_uuid
           self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
                                             self.op.src_path)
           break
@@ -698,21 +752,6 @@ class LUInstanceCreate(LogicalUnit):
     """
     self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
 
-    if self.op.disk_template is None:
-      if einfo.has_option(constants.INISECT_INS, "disk_template"):
-        self.op.disk_template = einfo.get(constants.INISECT_INS,
-                                          "disk_template")
-        if self.op.disk_template not in constants.DISK_TEMPLATES:
-          raise errors.OpPrereqError("Disk template specified in configuration"
-                                     " file is not one of the allowed values:"
-                                     " %s" %
-                                     " ".join(constants.DISK_TEMPLATES),
-                                     errors.ECODE_INVAL)
-      else:
-        raise errors.OpPrereqError("No disk template specified and the export"
-                                   " is missing the disk_template information",
-                                   errors.ECODE_INVAL)
-
     if not self.op.disks:
       disks = []
       # TODO: import the disk iv_name too
@@ -732,8 +771,10 @@ class LUInstanceCreate(LogicalUnit):
         if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
           ndict = {}
           for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
-            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
-            ndict[name] = v
+            nic_param_name = "nic%d_%s" % (idx, name)
+            if einfo.has_option(constants.INISECT_INS, nic_param_name):
+              v = einfo.get(constants.INISECT_INS, nic_param_name)
+              ndict[name] = v
           nics.append(ndict)
         else:
           break
@@ -980,7 +1021,7 @@ class LUInstanceCreate(LogicalUnit):
         netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
         if netparams is None:
           raise errors.OpPrereqError("No netparams found for network"
-                                     " %s. Propably not connected to"
+                                     " %s. Probably not connected to"
                                      " node's %s nodegroup" %
                                      (nobj.name, self.pnode.name),
                                      errors.ECODE_INVAL)
@@ -1186,13 +1227,15 @@ class LUInstanceCreate(LogicalUnit):
     else:
       network_port = None
 
+    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
+
     # This is ugly but we got a chicken-egg problem here
     # We can only take the group disk parameters, as the instance
     # has no disks yet (we are generating them right here).
     nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
     disks = GenerateDiskTemplate(self,
                                  self.op.disk_template,
-                                 self.op.instance_name, self.pnode.uuid,
+                                 instance_uuid, self.pnode.uuid,
                                  self.secondaries,
                                  self.disks,
                                  self.instance_file_storage_dir,
@@ -1201,7 +1244,9 @@ class LUInstanceCreate(LogicalUnit):
                                  feedback_fn,
                                  self.cfg.GetGroupDiskParams(nodegroup))
 
-    iobj = objects.Instance(name=self.op.instance_name, os=self.op.os_type,
+    iobj = objects.Instance(name=self.op.instance_name,
+                            uuid=instance_uuid,
+                            os=self.op.os_type,
                             primary_node=self.pnode.uuid,
                             nics=self.nics, disks=disks,
                             disk_template=self.op.disk_template,
@@ -1227,7 +1272,6 @@ class LUInstanceCreate(LogicalUnit):
         for t_dsk, a_dsk in zip(tmp_disks, self.disks):
           rename_to.append(t_dsk.logical_id)
           t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
-          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
         result = self.rpc.call_blockdev_rename(self.pnode.uuid,
                                                zip(tmp_disks, rename_to))
         result.Raise("Failed to rename adoped LVs")
@@ -1279,7 +1323,7 @@ class LUInstanceCreate(LogicalUnit):
 
     if disk_abort:
       RemoveDisks(self, iobj)
-      self.cfg.RemoveInstance(iobj.name)
+      self.cfg.RemoveInstance(iobj.uuid)
       # Make sure the instance lock gets removed
       self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
       raise errors.OpExecError("There are some degraded disks for"
@@ -1292,11 +1336,6 @@ class LUInstanceCreate(LogicalUnit):
     ReleaseLocks(self, locking.LEVEL_NODE_RES)
 
     if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
-      # we need to set the disks ID to the primary node, since the
-      # preceding code might or might have not done it, depending on
-      # disk template and other options
-      for disk in iobj.disks:
-        self.cfg.SetDiskID(disk, self.pnode.uuid)
       if self.op.mode == constants.INSTANCE_CREATE:
         if not self.op.no_install:
           pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
@@ -1344,7 +1383,7 @@ class LUInstanceCreate(LogicalUnit):
             dt = masterd.instance.DiskTransfer("disk/%s" % idx,
                                                constants.IEIO_FILE, (image, ),
                                                constants.IEIO_SCRIPT,
-                                               (iobj.disks[idx], idx),
+                                               ((iobj.disks[idx], iobj), idx),
                                                None)
             transfers.append(dt)
 
@@ -1453,10 +1492,20 @@ class LUInstanceRename(LogicalUnit):
     This checks that the instance is in the cluster and is not running.
 
     """
-    self.op.instance_name = ExpandInstanceName(self.cfg,
-                                               self.op.instance_name)
-    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+    (self.op.instance_uuid, self.op.instance_name) = \
+      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
+                                self.op.instance_name)
+    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
     assert instance is not None
+
+    # It should actually not happen that an instance is running with a disabled
+    # disk template, but in case it does, the renaming of file-based instances
+    # will fail horribly. Thus, we test it before.
+    if (instance.disk_template in constants.DTS_FILEBASED and
+        self.op.new_name != instance.name):
+      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
+                               instance.disk_template)
+
     CheckNodeOnline(self, instance.primary_node)
     CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
                        msg="cannot rename")
@@ -1472,8 +1521,9 @@ class LUInstanceRename(LogicalUnit):
                                    (hostname.ip, new_name),
                                    errors.ECODE_NOTUNIQUE)
 
-    instance_list = self.cfg.GetInstanceList()
-    if new_name in instance_list and new_name != instance.name:
+    instance_names = [inst.name for
+                      inst in self.cfg.GetAllInstancesInfo().values()]
+    if new_name in instance_names and new_name != instance.name:
       raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
                                  new_name, errors.ECODE_EXISTS)
 
@@ -1490,7 +1540,7 @@ class LUInstanceRename(LogicalUnit):
                                self.instance.disks[0].logical_id[1])
       rename_file_storage = True
 
-    self.cfg.RenameInstance(self.instance.name, self.op.new_name)
+    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
     # Change the instance lock. This is definitely safe while we hold the BGL.
     # Otherwise the new lock would have to be added in acquired mode.
     assert self.REQ_BGL
@@ -1499,7 +1549,7 @@ class LUInstanceRename(LogicalUnit):
     self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
 
     # re-read the instance from the configuration after rename
-    renamed_inst = self.cfg.GetInstanceInfo(self.op.new_name)
+    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
 
     if rename_file_storage:
       new_file_storage_dir = os.path.dirname(
@@ -1517,8 +1567,8 @@ class LUInstanceRename(LogicalUnit):
     info = GetInstanceInfoText(renamed_inst)
     for (idx, disk) in enumerate(renamed_inst.disks):
       for node_uuid in renamed_inst.all_nodes:
-        self.cfg.SetDiskID(disk, node_uuid)
-        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
+        result = self.rpc.call_blockdev_setinfo(node_uuid,
+                                                (disk, renamed_inst), info)
         result.Warn("Error setting info on node %s for disk %s" %
                     (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
     try:
@@ -1582,7 +1632,7 @@ class LUInstanceRemove(LogicalUnit):
     This checks that the instance is in the cluster.
 
     """
-    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
@@ -1626,7 +1676,7 @@ class LUInstanceMove(LogicalUnit):
     (self.op.target_node_uuid, self.op.target_node) = \
       ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
                             self.op.target_node)
-    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node]
+    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
     self.needed_locks[locking.LEVEL_NODE_RES] = []
     self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
 
@@ -1668,7 +1718,7 @@ class LUInstanceMove(LogicalUnit):
     This checks that the instance is in the cluster.
 
     """
-    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
@@ -1690,7 +1740,8 @@ class LUInstanceMove(LogicalUnit):
     bep = self.cfg.GetClusterInfo().FillBE(self.instance)
 
     for idx, dsk in enumerate(self.instance.disks):
-      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
+      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
+                              constants.DT_SHARED_FILE):
         raise errors.OpPrereqError("Instance disk %d has a complex layout,"
                                    " cannot copy" % idx, errors.ECODE_STATE)
 
@@ -1750,7 +1801,7 @@ class LUInstanceMove(LogicalUnit):
       CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
     except errors.OpExecError:
       self.LogWarning("Device creation failed")
-      self.cfg.ReleaseDRBDMinors(self.instance.name)
+      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
       raise
 
     cluster_name = self.cfg.GetClusterInfo().cluster_name
@@ -1783,7 +1834,7 @@ class LUInstanceMove(LogicalUnit):
       try:
         RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
       finally:
-        self.cfg.ReleaseDRBDMinors(self.instance.name)
+        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
         raise errors.OpExecError("Errors during disk copy: %s" %
                                  (",".join(errs),))
 
@@ -1839,9 +1890,9 @@ class LUInstanceMultiAlloc(NoHooksLU):
                                  " pnode/snode while others do not",
                                  errors.ECODE_INVAL)
 
-    if self.op.iallocator is None:
+    if not has_nodes and self.op.iallocator is None:
       default_iallocator = self.cfg.GetDefaultIAllocator()
-      if default_iallocator and has_nodes:
+      if default_iallocator:
         self.op.iallocator = default_iallocator
       else:
         raise errors.OpPrereqError("No iallocator or nodes on the instances"
@@ -1881,11 +1932,11 @@ class LUInstanceMultiAlloc(NoHooksLU):
       for inst in self.op.instances:
         (inst.pnode_uuid, inst.pnode) = \
           ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
-        nodeslist.append(inst.pnode)
+        nodeslist.append(inst.pnode_uuid)
         if inst.snode is not None:
           (inst.snode_uuid, inst.snode) = \
             ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
-          nodeslist.append(inst.snode)
+          nodeslist.append(inst.snode_uuid)
 
       self.needed_locks[locking.LEVEL_NODE] = nodeslist
       # Lock resources of instance's primary and secondary nodes (copy to
@@ -1896,36 +1947,37 @@ class LUInstanceMultiAlloc(NoHooksLU):
     """Check prerequisite.
 
     """
-    cluster = self.cfg.GetClusterInfo()
-    default_vg = self.cfg.GetVGName()
-    ec_id = self.proc.GetECId()
+    if self.op.iallocator:
+      cluster = self.cfg.GetClusterInfo()
+      default_vg = self.cfg.GetVGName()
+      ec_id = self.proc.GetECId()
 
-    if self.op.opportunistic_locking:
-      # Only consider nodes for which a lock is held
-      node_whitelist = self.cfg.GetNodeNames(
-                         list(self.owned_locks(locking.LEVEL_NODE)))
-    else:
-      node_whitelist = None
+      if self.op.opportunistic_locking:
+        # Only consider nodes for which a lock is held
+        node_whitelist = self.cfg.GetNodeNames(
+                           list(self.owned_locks(locking.LEVEL_NODE)))
+      else:
+        node_whitelist = None
 
-    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
-                                         _ComputeNics(op, cluster, None,
-                                                      self.cfg, ec_id),
-                                         _ComputeFullBeParams(op, cluster),
-                                         node_whitelist)
-             for op in self.op.instances]
+      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
+                                           _ComputeNics(op, cluster, None,
+                                                        self.cfg, ec_id),
+                                           _ComputeFullBeParams(op, cluster),
+                                           node_whitelist)
+               for op in self.op.instances]
 
-    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
-    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
+      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
+      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
 
-    ial.Run(self.op.iallocator)
+      ial.Run(self.op.iallocator)
 
-    if not ial.success:
-      raise errors.OpPrereqError("Can't compute nodes using"
-                                 " iallocator '%s': %s" %
-                                 (self.op.iallocator, ial.info),
-                                 errors.ECODE_NORES)
+      if not ial.success:
+        raise errors.OpPrereqError("Can't compute nodes using"
+                                   " iallocator '%s': %s" %
+                                   (self.op.iallocator, ial.info),
+                                   errors.ECODE_NORES)
 
-    self.ia_result = ial.result
+      self.ia_result = ial.result
 
     if self.op.dry_run:
       self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
@@ -1936,35 +1988,44 @@ class LUInstanceMultiAlloc(NoHooksLU):
     """Contructs the partial result.
 
     """
-    (allocatable, failed) = self.ia_result
+    if self.op.iallocator:
+      (allocatable, failed_insts) = self.ia_result
+      allocatable_insts = map(compat.fst, allocatable)
+    else:
+      allocatable_insts = [op.instance_name for op in self.op.instances]
+      failed_insts = []
+
     return {
-      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
-        map(compat.fst, allocatable),
-      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
+      constants.ALLOCATABLE_KEY: allocatable_insts,
+      constants.FAILED_KEY: failed_insts,
       }
 
   def Exec(self, feedback_fn):
     """Executes the opcode.
 
     """
-    op2inst = dict((op.instance_name, op) for op in self.op.instances)
-    (allocatable, failed) = self.ia_result
-
     jobs = []
-    for (name, node_names) in allocatable:
-      op = op2inst.pop(name)
+    if self.op.iallocator:
+      op2inst = dict((op.instance_name, op) for op in self.op.instances)
+      (allocatable, failed) = self.ia_result
+
+      for (name, node_names) in allocatable:
+        op = op2inst.pop(name)
 
-      (op.pnode_uuid, op.pnode) = \
-        ExpandNodeUuidAndName(self.cfg, None, node_names[0])
-      if len(node_names) > 1:
-        (op.snode_uuid, op.snode) = \
-          ExpandNodeUuidAndName(self.cfg, None, node_names[1])
+        (op.pnode_uuid, op.pnode) = \
+          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
+        if len(node_names) > 1:
+          (op.snode_uuid, op.snode) = \
+            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
 
-      jobs.append([op])
+          jobs.append([op])
 
-    missing = set(op2inst.keys()) - set(failed)
-    assert not missing, \
-      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
+        missing = set(op2inst.keys()) - set(failed)
+        assert not missing, \
+          "Iallocator did return incomplete result: %s" % \
+          utils.CommaJoin(missing)
+    else:
+      jobs.extend([op] for op in self.op.instances)
 
     return ResultWithJobs(jobs, **self._ConstructPartialResult())
 
@@ -2020,7 +2081,7 @@ def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
       or we cannot check the node
 
   """
-  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs, None)
+  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
   for node_uuid in node_uuids:
     info = nodeinfo[node_uuid]
     node_name = lu.cfg.GetNodeName(node_uuid)
@@ -2075,7 +2136,8 @@ def GetItemFromContainer(identifier, kind, container):
 
 
 def _ApplyContainerMods(kind, container, chgdesc, mods,
-                        create_fn, modify_fn, remove_fn):
+                        create_fn, modify_fn, remove_fn,
+                        post_add_fn=None):
   """Applies descriptions in C{mods} to C{container}.
 
   @type kind: string
@@ -2099,6 +2161,10 @@ def _ApplyContainerMods(kind, container, chgdesc, mods,
   @type remove_fn: callable
   @param remove_fn: Callback on removing item; receives absolute item index,
     item and private data object as added by L{_PrepareContainerMods}
+  @type post_add_fn: callable
+  @param post_add_fn: Callable for post-processing a newly created item after
+    it has been put into the container. It receives the index of the new item
+    and the new item as parameters.
 
   """
   for (op, identifier, params, private) in mods:
@@ -2135,6 +2201,10 @@ def _ApplyContainerMods(kind, container, chgdesc, mods,
         assert idx <= len(container)
         # list.insert does so before the specified index
         container.insert(idx, item)
+
+      if post_add_fn is not None:
+        post_add_fn(addidx, item)
+
     else:
       # Retrieve existing item
       (absidx, item) = GetItemFromContainer(identifier, kind, container)
@@ -2244,12 +2314,7 @@ class LUInstanceSetParams(LogicalUnit):
       if size is None:
         raise errors.OpPrereqError("Required disk parameter '%s' missing" %
                                    constants.IDISK_SIZE, errors.ECODE_INVAL)
-
-      try:
-        size = int(size)
-      except (TypeError, ValueError), err:
-        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
-                                   errors.ECODE_INVAL)
+      size = int(size)
 
       params[constants.IDISK_SIZE] = size
       name = params.get(constants.IDISK_NAME, None)
@@ -2324,8 +2389,8 @@ class LUInstanceSetParams(LogicalUnit):
   def CheckArguments(self):
     if not (self.op.nics or self.op.disks or self.op.disk_template or
             self.op.hvparams or self.op.beparams or self.op.os_name or
-            self.op.offline is not None or self.op.runtime_mem or
-            self.op.pnode):
+            self.op.osparams or self.op.offline is not None or
+            self.op.runtime_mem or self.op.pnode):
       raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
 
     if self.op.hvparams:
@@ -2333,9 +2398,9 @@ class LUInstanceSetParams(LogicalUnit):
                            "hypervisor", "instance", "cluster")
 
     self.op.disks = self._UpgradeDiskNicMods(
-      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
+      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
     self.op.nics = self._UpgradeDiskNicMods(
-      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
+      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
 
     if self.op.disks and self.op.disk_template is not None:
       raise errors.OpPrereqError("Disk template conversion and other disk"
@@ -2374,7 +2439,7 @@ class LUInstanceSetParams(LogicalUnit):
       # Acquire locks for the instance's nodegroups optimistically. Needs
       # to be verified in CheckPrereq
       self.needed_locks[locking.LEVEL_NODEGROUP] = \
-        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
+        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
     elif level == locking.LEVEL_NODE:
       self._LockInstancesNodes()
       if self.op.disk_template and self.op.remote_node:
@@ -2579,6 +2644,10 @@ class LUInstanceSetParams(LogicalUnit):
                                  self.instance.disk_template,
                                  errors.ECODE_INVAL)
 
+    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
+      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
+                                 " cluster." % self.op.disk_template)
+
     if (self.instance.disk_template,
         self.op.disk_template) not in self._DISK_CONVERSIONS:
       raise errors.OpPrereqError("Unsupported disk template conversion from"
@@ -2679,6 +2748,14 @@ class LUInstanceSetParams(LogicalUnit):
                                       constants.DT_EXT),
                                      errors.ECODE_INVAL)
 
+    if not self.op.wait_for_sync and self.instance.disks_active:
+      for mod in self.diskmod:
+        if mod[0] == constants.DDM_ADD:
+          raise errors.OpPrereqError("Can't add a disk to an instance with"
+                                     " activated disks and"
+                                     " --no-wait-for-sync given.",
+                                     errors.ECODE_INVAL)
+
     if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
       raise errors.OpPrereqError("Disk operations not supported for"
                                  " diskless instances", errors.ECODE_INVAL)
@@ -2712,7 +2789,7 @@ class LUInstanceSetParams(LogicalUnit):
 
     """
     assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
-    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
     self.cluster = self.cfg.GetClusterInfo()
 
     assert self.instance is not None, \
@@ -2850,7 +2927,7 @@ class LUInstanceSetParams(LogicalUnit):
       hvspecs = [(self.instance.hypervisor,
                   self.cluster.hvparams[self.instance.hypervisor])]
       nodeinfo = self.rpc.call_node_info(mem_check_list, None,
-                                         hvspecs, False)
+                                         hvspecs)
       pninfo = nodeinfo[pnode_uuid]
       msg = pninfo.fail_msg
       if msg:
@@ -2908,7 +2985,8 @@ class LUInstanceSetParams(LogicalUnit):
     if self.op.runtime_mem:
       remote_info = self.rpc.call_instance_info(
          self.instance.primary_node, self.instance.name,
-         self.instance.hypervisor, self.instance.hvparams)
+         self.instance.hypervisor,
+         self.cluster.hvparams[self.instance.hypervisor])
       remote_info.Raise("Error checking node %s" %
                         self.cfg.GetNodeName(self.instance.primary_node))
       if not remote_info.payload: # not running already
@@ -3028,11 +3106,10 @@ class LUInstanceSetParams(LogicalUnit):
                   constants.IDISK_NAME: d.name}
                  for d in self.instance.disks]
     new_disks = GenerateDiskTemplate(self, self.op.disk_template,
-                                     self.instance.name, pnode_uuid,
+                                     self.instance.uuid, pnode_uuid,
                                      [snode_uuid], disk_info, None, None, 0,
                                      feedback_fn, self.diskparams)
-    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
-                                        self.diskparams)
+    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
     p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
     s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
     info = GetInstanceInfoText(self.instance)
@@ -3065,8 +3142,6 @@ class LUInstanceSetParams(LogicalUnit):
     except errors.GenericError, e:
       feedback_fn("Initializing of DRBD devices failed;"
                   " renaming back original volumes...")
-      for disk in new_disks:
-        self.cfg.SetDiskID(disk, pnode_uuid)
       rename_back_list = [(n.children[0], o.logical_id)
                           for (n, o) in zip(new_disks, self.instance.disks)]
       result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
@@ -3127,22 +3202,20 @@ class LUInstanceSetParams(LogicalUnit):
 
     feedback_fn("Removing volumes on the secondary node...")
     for disk in old_disks:
-      self.cfg.SetDiskID(disk, snode_uuid)
-      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
-      if msg:
-        self.LogWarning("Could not remove block device %s on node %s,"
-                        " continuing anyway: %s", disk.iv_name,
-                        self.cfg.GetNodeName(snode_uuid), msg)
+      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
+      result.Warn("Could not remove block device %s on node %s,"
+                  " continuing anyway" %
+                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
+                  self.LogWarning)
 
     feedback_fn("Removing unneeded volumes on the primary node...")
     for idx, disk in enumerate(old_disks):
       meta = disk.children[1]
-      self.cfg.SetDiskID(meta, pnode_uuid)
-      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
-      if msg:
-        self.LogWarning("Could not remove metadata for disk %d on node %s,"
-                        " continuing anyway: %s", idx,
-                        self.cfg.GetNodeName(pnode_uuid), msg)
+      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
+      result.Warn("Could not remove metadata for disk %d on node %s,"
+                  " continuing anyway" %
+                  (idx, self.cfg.GetNodeName(pnode_uuid)),
+                  self.LogWarning)
 
   def _CreateNewDisk(self, idx, params, _):
     """Creates a new disk.
@@ -3157,7 +3230,7 @@ class LUInstanceSetParams(LogicalUnit):
 
     disk = \
       GenerateDiskTemplate(self, self.instance.disk_template,
-                           self.instance.name, self.instance.primary_node,
+                           self.instance.uuid, self.instance.primary_node,
                            self.instance.secondary_nodes, [params], file_path,
                            file_driver, idx, self.Log, self.diskparams)[0]
 
@@ -3173,6 +3246,17 @@ class LUInstanceSetParams(LogicalUnit):
       ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
       ])
 
+  def _PostAddDisk(self, _, disk):
+    if not WaitForSync(self, self.instance, disks=[disk],
+                       oneshot=not self.op.wait_for_sync):
+      raise errors.OpExecError("Failed to sync disks of %s" %
+                               self.instance.name)
+
+    # the disk is active at this point, so deactivate it if the instance disks
+    # are supposed to be inactive
+    if not self.instance.disks_active:
+      ShutdownInstanceDisks(self, self.instance, disks=[disk])
+
   @staticmethod
   def _ModifyDisk(idx, disk, params, _):
     """Modifies a disk.
@@ -3197,8 +3281,8 @@ class LUInstanceSetParams(LogicalUnit):
     (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
     for node_uuid, disk in anno_disk.ComputeNodeTree(
                              self.instance.primary_node):
-      self.cfg.SetDiskID(disk, node_uuid)
-      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
+      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
+              .fail_msg
       if msg:
         self.LogWarning("Could not remove disk/%d on node '%s': %s,"
                         " continuing anyway", idx,
@@ -3289,7 +3373,7 @@ class LUInstanceSetParams(LogicalUnit):
     # Apply disk changes
     _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
                         self._CreateNewDisk, self._ModifyDisk,
-                        self._RemoveDisk)
+                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
     _UpdateIvNames(0, self.instance.disks)
 
     if self.op.disk_template:
@@ -3311,7 +3395,7 @@ class LUInstanceSetParams(LogicalUnit):
       try:
         self._DISK_CONVERSIONS[mode](self, feedback_fn)
       except:
-        self.cfg.ReleaseDRBDMinors(self.instance.name)
+        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
         raise
       result.append(("disk_template", self.op.disk_template))
 
@@ -3356,11 +3440,11 @@ class LUInstanceSetParams(LogicalUnit):
       pass
     elif self.op.offline:
       # Mark instance as offline
-      self.cfg.MarkInstanceOffline(self.instance.name)
+      self.cfg.MarkInstanceOffline(self.instance.uuid)
       result.append(("admin_state", constants.ADMINST_OFFLINE))
     else:
       # Mark instance as online, but stopped
-      self.cfg.MarkInstanceDown(self.instance.name)
+      self.cfg.MarkInstanceDown(self.instance.uuid)
       result.append(("admin_state", constants.ADMINST_DOWN))
 
     self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
@@ -3410,7 +3494,7 @@ class LUInstanceChangeGroup(LogicalUnit):
 
         # Lock all groups used by instance optimistically; this requires going
         # via the node before it's locked, requiring verification later on
-        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
+        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
         lock_groups.update(instance_groups)
       else:
         # No target groups, need to lock all of them
@@ -3426,7 +3510,7 @@ class LUInstanceChangeGroup(LogicalUnit):
 
         # Lock all nodes in all potential target groups
         lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
-                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
+                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
         member_nodes = [node_uuid
                         for group in lock_groups
                         for node_uuid in self.cfg.GetNodeGroup(group).members]
@@ -3436,23 +3520,23 @@ class LUInstanceChangeGroup(LogicalUnit):
         self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
 
   def CheckPrereq(self):
-    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
     owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
     owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
 
     assert (self.req_target_uuids is None or
             owned_groups.issuperset(self.req_target_uuids))
-    assert owned_instances == set([self.op.instance_name])
+    assert owned_instance_names == set([self.op.instance_name])
 
     # Get instance information
-    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
 
     # Check if node groups for locked instance are still correct
     assert owned_nodes.issuperset(self.instance.all_nodes), \
       ("Instance %s's nodes changed while we kept the lock" %
        self.op.instance_name)
 
-    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
+    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
                                           owned_groups)
 
     if self.req_target_uuids: