Honor disks_active of instance when adding disks
[ganeti-local] / lib / cmdlib / instance.py
index 56806bf..10fccc8 100644 (file)
@@ -36,7 +36,6 @@ from ganeti.masterd import iallocator
 from ganeti import masterd
 from ganeti import netutils
 from ganeti import objects
-from ganeti import opcodes
 from ganeti import pathutils
 from ganeti import rpc
 from ganeti import utils
@@ -49,7 +48,8 @@ from ganeti.cmdlib.common import INSTANCE_DOWN, \
   LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
   IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
   AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
-  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName
+  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
+  CheckDiskTemplateEnabled
 from ganeti.cmdlib.instance_storage import CreateDisks, \
   CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
   IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
@@ -173,6 +173,7 @@ def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
     net = nic.get(constants.INIC_NETWORK, None)
     link = nic.get(constants.NIC_LINK, None)
     ip = nic.get(constants.INIC_IP, None)
+    vlan = nic.get(constants.INIC_VLAN, None)
 
     if net is None or net.lower() == constants.VALUE_NONE:
       net = None
@@ -182,6 +183,10 @@ def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
                                    " is allowed to be passed",
                                    errors.ECODE_INVAL)
 
+    if vlan is not None and nic_mode != constants.NIC_MODE_OVS:
+      raise errors.OpPrereqError("VLAN is given, but network mode is not"
+                                 " openvswitch", errors.ECODE_INVAL)
+
     # ip validity checks
     if ip is None or ip.lower() == constants.VALUE_NONE:
       nic_ip = None
@@ -230,6 +235,8 @@ def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
       nicparams[constants.NIC_MODE] = nic_mode
     if link:
       nicparams[constants.NIC_LINK] = link
+    if vlan:
+      nicparams[constants.NIC_VLAN] = vlan
 
     check_params = cluster.SimpleFillNIC(nicparams)
     objects.NIC.CheckParameterSyntax(check_params)
@@ -334,12 +341,7 @@ class LUInstanceCreate(LogicalUnit):
       # chicken-and-egg problem, it should be possible to specify just a node
       # group from the iallocator and take the ipolicy from that.
       self.op.disk_template = cluster.enabled_disk_templates[0]
-    if not self.op.disk_template in cluster.enabled_disk_templates:
-      raise errors.OpPrereqError("Cannot create an instance with disk template"
-                                 " '%s', because it is not enabled in the"
-                                 " cluster. Enabled disk templates are: %s." %
-                                 (self.op.disk_template,
-                                  ",".join(cluster.enabled_disk_templates)))
+    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
 
   def _CheckDiskArguments(self):
     """Checks validity of disk-related arguments.
@@ -383,6 +385,38 @@ class LUInstanceCreate(LogicalUnit):
 
     self.adopt_disks = has_adopt
 
+  def _CheckVLANArguments(self):
+    """ Check validity of VLANs if given
+
+    """
+    for nic in self.op.nics:
+      vlan = nic.get(constants.INIC_VLAN, None)
+      if vlan:
+        if vlan[0] == ".":
+          # vlan starting with dot means single untagged vlan,
+          # might be followed by trunk (:)
+          if not vlan[1:].isdigit():
+            vlanlist = vlan[1:].split(':')
+            for vl in vlanlist:
+              if not vl.isdigit():
+                raise errors.OpPrereqError("Specified VLAN parameter is "
+                                           "invalid : %s" % vlan,
+                                             errors.ECODE_INVAL)
+        elif vlan[0] == ":":
+          # Trunk - tagged only
+          vlanlist = vlan[1:].split(':')
+          for vl in vlanlist:
+            if not vl.isdigit():
+              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
+                                           " : %s" % vlan, errors.ECODE_INVAL)
+        elif vlan.isdigit():
+          # This is the simplest case. No dots, only single digit
+          # -> Create untagged access port, dot needs to be added
+          nic[constants.INIC_VLAN] = "." + vlan
+        else:
+          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
+                                       " : %s" % vlan, errors.ECODE_INVAL)
+
   def CheckArguments(self):
     """Check arguments.
 
@@ -407,7 +441,10 @@ class LUInstanceCreate(LogicalUnit):
     # check that NIC's parameters names are unique and valid
     utils.ValidateDeviceNames("NIC", self.op.nics)
 
+    self._CheckVLANArguments()
+
     self._CheckDiskArguments()
+    assert self.op.disk_template is not None
 
     # instance name verification
     if self.op.name_check:
@@ -424,10 +461,11 @@ class LUInstanceCreate(LogicalUnit):
       raise errors.OpPrereqError("Invalid file driver name '%s'" %
                                  self.op.file_driver, errors.ECODE_INVAL)
 
-    if self.op.disk_template == constants.DT_FILE:
-      opcodes.RequireFileStorage()
-    elif self.op.disk_template == constants.DT_SHARED_FILE:
-      opcodes.RequireSharedFileStorage()
+    # set default file_driver if unset and required
+    if (not self.op.file_driver and
+        self.op.disk_template in [constants.DT_FILE,
+                                  constants.DT_SHARED_FILE]):
+      self.op.file_driver = constants.FD_LOOP
 
     ### Node/iallocator related checks
     CheckIAllocatorOrNode(self, "iallocator", "pnode")
@@ -444,8 +482,6 @@ class LUInstanceCreate(LogicalUnit):
 
     _CheckOpportunisticLocking(self.op)
 
-    self._cds = GetClusterDomainSecret()
-
     if self.op.mode == constants.INSTANCE_IMPORT:
       # On import force_variant must be True, because if we forced it at
       # initial install, our only chance when importing it back is that it
@@ -463,11 +499,9 @@ class LUInstanceCreate(LogicalUnit):
         raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
                                    " installation" % self.op.os_type,
                                    errors.ECODE_STATE)
-      if self.op.disk_template is None:
-        raise errors.OpPrereqError("No disk template specified",
-                                   errors.ECODE_INVAL)
-
     elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
+      self._cds = GetClusterDomainSecret()
+
       # Check handshake to ensure both clusters have the same domain secret
       src_handshake = self.op.source_handshake
       if not src_handshake:
@@ -571,7 +605,7 @@ class LUInstanceCreate(LogicalUnit):
         if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
           self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
         if not os.path.isabs(src_path):
-          self.op.src_path = src_path = \
+          self.op.src_path = \
             utils.PathJoin(pathutils.EXPORT_DIR, src_path)
 
     self.needed_locks[locking.LEVEL_NODE_RES] = \
@@ -588,8 +622,6 @@ class LUInstanceCreate(LogicalUnit):
     else:
       node_name_whitelist = None
 
-    #TODO Export network to iallocator so that it chooses a pnode
-    #     in a nodegroup that has the desired network connected to
     req = _CreateInstanceAllocRequest(self.op, self.disks,
                                       self.nics, self.be_full,
                                       node_name_whitelist)
@@ -679,13 +711,13 @@ class LUInstanceCreate(LogicalUnit):
       locked_nodes = self.owned_locks(locking.LEVEL_NODE)
       exp_list = self.rpc.call_export_list(locked_nodes)
       found = False
-      for node in exp_list:
-        if exp_list[node].fail_msg:
+      for node_uuid in exp_list:
+        if exp_list[node_uuid].fail_msg:
           continue
-        if self.op.src_path in exp_list[node].payload:
+        if self.op.src_path in exp_list[node_uuid].payload:
           found = True
-          self.op.src_node = node
-          self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
+          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
+          self.op.src_node_uuid = node_uuid
           self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
                                             self.op.src_path)
           break
@@ -739,8 +771,10 @@ class LUInstanceCreate(LogicalUnit):
         if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
           ndict = {}
           for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
-            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
-            ndict[name] = v
+            nic_param_name = "nic%d_%s" % (idx, name)
+            if einfo.has_option(constants.INISECT_INS, nic_param_name):
+              v = einfo.get(constants.INISECT_INS, nic_param_name)
+              ndict[name] = v
           nics.append(ndict)
         else:
           break
@@ -987,7 +1021,7 @@ class LUInstanceCreate(LogicalUnit):
         netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
         if netparams is None:
           raise errors.OpPrereqError("No netparams found for network"
-                                     " %s. Propably not connected to"
+                                     " %s. Probably not connected to"
                                      " node's %s nodegroup" %
                                      (nobj.name, self.pnode.name),
                                      errors.ECODE_INVAL)
@@ -1238,7 +1272,6 @@ class LUInstanceCreate(LogicalUnit):
         for t_dsk, a_dsk in zip(tmp_disks, self.disks):
           rename_to.append(t_dsk.logical_id)
           t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
-          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
         result = self.rpc.call_blockdev_rename(self.pnode.uuid,
                                                zip(tmp_disks, rename_to))
         result.Raise("Failed to rename adoped LVs")
@@ -1303,11 +1336,6 @@ class LUInstanceCreate(LogicalUnit):
     ReleaseLocks(self, locking.LEVEL_NODE_RES)
 
     if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
-      # we need to set the disks ID to the primary node, since the
-      # preceding code might or might have not done it, depending on
-      # disk template and other options
-      for disk in iobj.disks:
-        self.cfg.SetDiskID(disk, self.pnode.uuid)
       if self.op.mode == constants.INSTANCE_CREATE:
         if not self.op.no_install:
           pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
@@ -1355,7 +1383,7 @@ class LUInstanceCreate(LogicalUnit):
             dt = masterd.instance.DiskTransfer("disk/%s" % idx,
                                                constants.IEIO_FILE, (image, ),
                                                constants.IEIO_SCRIPT,
-                                               (iobj.disks[idx], idx),
+                                               ((iobj.disks[idx], iobj), idx),
                                                None)
             transfers.append(dt)
 
@@ -1469,6 +1497,15 @@ class LUInstanceRename(LogicalUnit):
                                 self.op.instance_name)
     instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
     assert instance is not None
+
+    # It should actually not happen that an instance is running with a disabled
+    # disk template, but in case it does, the renaming of file-based instances
+    # will fail horribly. Thus, we test it before.
+    if (instance.disk_template in constants.DTS_FILEBASED and
+        self.op.new_name != instance.name):
+      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
+                               instance.disk_template)
+
     CheckNodeOnline(self, instance.primary_node)
     CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
                        msg="cannot rename")
@@ -1530,8 +1567,8 @@ class LUInstanceRename(LogicalUnit):
     info = GetInstanceInfoText(renamed_inst)
     for (idx, disk) in enumerate(renamed_inst.disks):
       for node_uuid in renamed_inst.all_nodes:
-        self.cfg.SetDiskID(disk, node_uuid)
-        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
+        result = self.rpc.call_blockdev_setinfo(node_uuid,
+                                                (disk, renamed_inst), info)
         result.Warn("Error setting info on node %s for disk %s" %
                     (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
     try:
@@ -1639,7 +1676,7 @@ class LUInstanceMove(LogicalUnit):
     (self.op.target_node_uuid, self.op.target_node) = \
       ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
                             self.op.target_node)
-    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node]
+    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
     self.needed_locks[locking.LEVEL_NODE_RES] = []
     self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
 
@@ -1703,7 +1740,8 @@ class LUInstanceMove(LogicalUnit):
     bep = self.cfg.GetClusterInfo().FillBE(self.instance)
 
     for idx, dsk in enumerate(self.instance.disks):
-      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
+      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
+                              constants.DT_SHARED_FILE):
         raise errors.OpPrereqError("Instance disk %d has a complex layout,"
                                    " cannot copy" % idx, errors.ECODE_STATE)
 
@@ -1852,9 +1890,9 @@ class LUInstanceMultiAlloc(NoHooksLU):
                                  " pnode/snode while others do not",
                                  errors.ECODE_INVAL)
 
-    if self.op.iallocator is None:
+    if not has_nodes and self.op.iallocator is None:
       default_iallocator = self.cfg.GetDefaultIAllocator()
-      if default_iallocator and has_nodes:
+      if default_iallocator:
         self.op.iallocator = default_iallocator
       else:
         raise errors.OpPrereqError("No iallocator or nodes on the instances"
@@ -1894,11 +1932,11 @@ class LUInstanceMultiAlloc(NoHooksLU):
       for inst in self.op.instances:
         (inst.pnode_uuid, inst.pnode) = \
           ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
-        nodeslist.append(inst.pnode)
+        nodeslist.append(inst.pnode_uuid)
         if inst.snode is not None:
           (inst.snode_uuid, inst.snode) = \
             ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
-          nodeslist.append(inst.snode)
+          nodeslist.append(inst.snode_uuid)
 
       self.needed_locks[locking.LEVEL_NODE] = nodeslist
       # Lock resources of instance's primary and secondary nodes (copy to
@@ -1909,36 +1947,37 @@ class LUInstanceMultiAlloc(NoHooksLU):
     """Check prerequisite.
 
     """
-    cluster = self.cfg.GetClusterInfo()
-    default_vg = self.cfg.GetVGName()
-    ec_id = self.proc.GetECId()
+    if self.op.iallocator:
+      cluster = self.cfg.GetClusterInfo()
+      default_vg = self.cfg.GetVGName()
+      ec_id = self.proc.GetECId()
 
-    if self.op.opportunistic_locking:
-      # Only consider nodes for which a lock is held
-      node_whitelist = self.cfg.GetNodeNames(
-                         list(self.owned_locks(locking.LEVEL_NODE)))
-    else:
-      node_whitelist = None
+      if self.op.opportunistic_locking:
+        # Only consider nodes for which a lock is held
+        node_whitelist = self.cfg.GetNodeNames(
+                           list(self.owned_locks(locking.LEVEL_NODE)))
+      else:
+        node_whitelist = None
 
-    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
-                                         _ComputeNics(op, cluster, None,
-                                                      self.cfg, ec_id),
-                                         _ComputeFullBeParams(op, cluster),
-                                         node_whitelist)
-             for op in self.op.instances]
+      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
+                                           _ComputeNics(op, cluster, None,
+                                                        self.cfg, ec_id),
+                                           _ComputeFullBeParams(op, cluster),
+                                           node_whitelist)
+               for op in self.op.instances]
 
-    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
-    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
+      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
+      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
 
-    ial.Run(self.op.iallocator)
+      ial.Run(self.op.iallocator)
 
-    if not ial.success:
-      raise errors.OpPrereqError("Can't compute nodes using"
-                                 " iallocator '%s': %s" %
-                                 (self.op.iallocator, ial.info),
-                                 errors.ECODE_NORES)
+      if not ial.success:
+        raise errors.OpPrereqError("Can't compute nodes using"
+                                   " iallocator '%s': %s" %
+                                   (self.op.iallocator, ial.info),
+                                   errors.ECODE_NORES)
 
-    self.ia_result = ial.result
+      self.ia_result = ial.result
 
     if self.op.dry_run:
       self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
@@ -1949,35 +1988,44 @@ class LUInstanceMultiAlloc(NoHooksLU):
     """Contructs the partial result.
 
     """
-    (allocatable, failed) = self.ia_result
+    if self.op.iallocator:
+      (allocatable, failed_insts) = self.ia_result
+      allocatable_insts = map(compat.fst, allocatable)
+    else:
+      allocatable_insts = [op.instance_name for op in self.op.instances]
+      failed_insts = []
+
     return {
-      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
-        map(compat.fst, allocatable),
-      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
+      constants.ALLOCATABLE_KEY: allocatable_insts,
+      constants.FAILED_KEY: failed_insts,
       }
 
   def Exec(self, feedback_fn):
     """Executes the opcode.
 
     """
-    op2inst = dict((op.instance_name, op) for op in self.op.instances)
-    (allocatable, failed) = self.ia_result
-
     jobs = []
-    for (name, node_names) in allocatable:
-      op = op2inst.pop(name)
+    if self.op.iallocator:
+      op2inst = dict((op.instance_name, op) for op in self.op.instances)
+      (allocatable, failed) = self.ia_result
 
-      (op.pnode_uuid, op.pnode) = \
-        ExpandNodeUuidAndName(self.cfg, None, node_names[0])
-      if len(node_names) > 1:
-        (op.snode_uuid, op.snode) = \
-          ExpandNodeUuidAndName(self.cfg, None, node_names[1])
+      for (name, node_names) in allocatable:
+        op = op2inst.pop(name)
 
-      jobs.append([op])
+        (op.pnode_uuid, op.pnode) = \
+          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
+        if len(node_names) > 1:
+          (op.snode_uuid, op.snode) = \
+            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
 
-    missing = set(op2inst.keys()) - set(failed)
-    assert not missing, \
-      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
+          jobs.append([op])
+
+        missing = set(op2inst.keys()) - set(failed)
+        assert not missing, \
+          "Iallocator did return incomplete result: %s" % \
+          utils.CommaJoin(missing)
+    else:
+      jobs.extend([op] for op in self.op.instances)
 
     return ResultWithJobs(jobs, **self._ConstructPartialResult())
 
@@ -2088,7 +2136,8 @@ def GetItemFromContainer(identifier, kind, container):
 
 
 def _ApplyContainerMods(kind, container, chgdesc, mods,
-                        create_fn, modify_fn, remove_fn):
+                        create_fn, modify_fn, remove_fn,
+                        post_add_fn=None):
   """Applies descriptions in C{mods} to C{container}.
 
   @type kind: string
@@ -2112,6 +2161,10 @@ def _ApplyContainerMods(kind, container, chgdesc, mods,
   @type remove_fn: callable
   @param remove_fn: Callback on removing item; receives absolute item index,
     item and private data object as added by L{_PrepareContainerMods}
+  @type post_add_fn: callable
+  @param post_add_fn: Callable for post-processing a newly created item after
+    it has been put into the container. It receives the index of the new item
+    and the new item as parameters.
 
   """
   for (op, identifier, params, private) in mods:
@@ -2148,6 +2201,10 @@ def _ApplyContainerMods(kind, container, chgdesc, mods,
         assert idx <= len(container)
         # list.insert does so before the specified index
         container.insert(idx, item)
+
+      if post_add_fn is not None:
+        post_add_fn(addidx, item)
+
     else:
       # Retrieve existing item
       (absidx, item) = GetItemFromContainer(identifier, kind, container)
@@ -2257,12 +2314,7 @@ class LUInstanceSetParams(LogicalUnit):
       if size is None:
         raise errors.OpPrereqError("Required disk parameter '%s' missing" %
                                    constants.IDISK_SIZE, errors.ECODE_INVAL)
-
-      try:
-        size = int(size)
-      except (TypeError, ValueError), err:
-        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
-                                   errors.ECODE_INVAL)
+      size = int(size)
 
       params[constants.IDISK_SIZE] = size
       name = params.get(constants.IDISK_NAME, None)
@@ -2337,8 +2389,8 @@ class LUInstanceSetParams(LogicalUnit):
   def CheckArguments(self):
     if not (self.op.nics or self.op.disks or self.op.disk_template or
             self.op.hvparams or self.op.beparams or self.op.os_name or
-            self.op.offline is not None or self.op.runtime_mem or
-            self.op.pnode):
+            self.op.osparams or self.op.offline is not None or
+            self.op.runtime_mem or self.op.pnode):
       raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
 
     if self.op.hvparams:
@@ -2346,9 +2398,9 @@ class LUInstanceSetParams(LogicalUnit):
                            "hypervisor", "instance", "cluster")
 
     self.op.disks = self._UpgradeDiskNicMods(
-      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
+      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
     self.op.nics = self._UpgradeDiskNicMods(
-      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
+      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
 
     if self.op.disks and self.op.disk_template is not None:
       raise errors.OpPrereqError("Disk template conversion and other disk"
@@ -2592,6 +2644,10 @@ class LUInstanceSetParams(LogicalUnit):
                                  self.instance.disk_template,
                                  errors.ECODE_INVAL)
 
+    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
+      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
+                                 " cluster." % self.op.disk_template)
+
     if (self.instance.disk_template,
         self.op.disk_template) not in self._DISK_CONVERSIONS:
       raise errors.OpPrereqError("Unsupported disk template conversion from"
@@ -2692,6 +2748,14 @@ class LUInstanceSetParams(LogicalUnit):
                                       constants.DT_EXT),
                                      errors.ECODE_INVAL)
 
+    if not self.op.wait_for_sync and self.instance.disks_active:
+      for mod in self.diskmod:
+        if mod[0] == constants.DDM_ADD:
+          raise errors.OpPrereqError("Can't add a disk to an instance with"
+                                     " activated disks and"
+                                     " --no-wait-for-sync given.",
+                                     errors.ECODE_INVAL)
+
     if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
       raise errors.OpPrereqError("Disk operations not supported for"
                                  " diskless instances", errors.ECODE_INVAL)
@@ -3045,8 +3109,7 @@ class LUInstanceSetParams(LogicalUnit):
                                      self.instance.uuid, pnode_uuid,
                                      [snode_uuid], disk_info, None, None, 0,
                                      feedback_fn, self.diskparams)
-    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
-                                        self.diskparams)
+    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
     p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
     s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
     info = GetInstanceInfoText(self.instance)
@@ -3079,8 +3142,6 @@ class LUInstanceSetParams(LogicalUnit):
     except errors.GenericError, e:
       feedback_fn("Initializing of DRBD devices failed;"
                   " renaming back original volumes...")
-      for disk in new_disks:
-        self.cfg.SetDiskID(disk, pnode_uuid)
       rename_back_list = [(n.children[0], o.logical_id)
                           for (n, o) in zip(new_disks, self.instance.disks)]
       result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
@@ -3141,22 +3202,20 @@ class LUInstanceSetParams(LogicalUnit):
 
     feedback_fn("Removing volumes on the secondary node...")
     for disk in old_disks:
-      self.cfg.SetDiskID(disk, snode_uuid)
-      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
-      if msg:
-        self.LogWarning("Could not remove block device %s on node %s,"
-                        " continuing anyway: %s", disk.iv_name,
-                        self.cfg.GetNodeName(snode_uuid), msg)
+      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
+      result.Warn("Could not remove block device %s on node %s,"
+                  " continuing anyway" %
+                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
+                  self.LogWarning)
 
     feedback_fn("Removing unneeded volumes on the primary node...")
     for idx, disk in enumerate(old_disks):
       meta = disk.children[1]
-      self.cfg.SetDiskID(meta, pnode_uuid)
-      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
-      if msg:
-        self.LogWarning("Could not remove metadata for disk %d on node %s,"
-                        " continuing anyway: %s", idx,
-                        self.cfg.GetNodeName(pnode_uuid), msg)
+      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
+      result.Warn("Could not remove metadata for disk %d on node %s,"
+                  " continuing anyway" %
+                  (idx, self.cfg.GetNodeName(pnode_uuid)),
+                  self.LogWarning)
 
   def _CreateNewDisk(self, idx, params, _):
     """Creates a new disk.
@@ -3187,6 +3246,17 @@ class LUInstanceSetParams(LogicalUnit):
       ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
       ])
 
+  def _PostAddDisk(self, _, disk):
+    if not WaitForSync(self, self.instance, disks=[disk],
+                       oneshot=not self.op.wait_for_sync):
+      raise errors.OpExecError("Failed to sync disks of %s" %
+                               self.instance.name)
+
+    # the disk is active at this point, so deactivate it if the instance disks
+    # are supposed to be inactive
+    if not self.instance.disks_active:
+      ShutdownInstanceDisks(self, self.instance, disks=[disk])
+
   @staticmethod
   def _ModifyDisk(idx, disk, params, _):
     """Modifies a disk.
@@ -3211,8 +3281,8 @@ class LUInstanceSetParams(LogicalUnit):
     (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
     for node_uuid, disk in anno_disk.ComputeNodeTree(
                              self.instance.primary_node):
-      self.cfg.SetDiskID(disk, node_uuid)
-      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
+      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
+              .fail_msg
       if msg:
         self.LogWarning("Could not remove disk/%d on node '%s': %s,"
                         " continuing anyway", idx,
@@ -3303,7 +3373,7 @@ class LUInstanceSetParams(LogicalUnit):
     # Apply disk changes
     _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
                         self._CreateNewDisk, self._ModifyDisk,
-                        self._RemoveDisk)
+                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
     _UpdateIvNames(0, self.instance.disks)
 
     if self.op.disk_template: