Merge branch 'stable-2.8' into master
[ganeti-local] / lib / cmdlib / instance.py
index 2d4864e..6498ce2 100644 (file)
@@ -1021,12 +1021,12 @@ class LUInstanceCreate(LogicalUnit):
                         " from the first disk's node group will be"
                         " used")
 
-    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
-      nodes = [pnode]
-      if self.op.disk_template in constants.DTS_INT_MIRROR:
-        nodes.append(snode)
-      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
-      if compat.any(map(has_es, nodes)):
+    nodes = [pnode]
+    if self.op.disk_template in constants.DTS_INT_MIRROR:
+      nodes.append(snode)
+    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
+    if compat.any(map(has_es, nodes)):
+      if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
         raise errors.OpPrereqError("Disk template %s not supported with"
                                    " exclusive storage" % self.op.disk_template,
                                    errors.ECODE_STATE)
@@ -2601,47 +2601,16 @@ class LUInstanceSetParams(LogicalUnit):
                                            self.op.disk_template))
         raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
 
-  def CheckPrereq(self):
-    """Check prerequisites.
+  def _PreCheckDisks(self, ispec):
+    """CheckPrereq checks related to disk changes.
 
-    This only checks the instance list against the existing names.
+    @type ispec: dict
+    @param ispec: instance specs to be updated with the new disks
 
     """
-    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
-    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
-
-    cluster = self.cluster = self.cfg.GetClusterInfo()
-    assert self.instance is not None, \
-      "Cannot retrieve locked instance %s" % self.op.instance_name
-
-    pnode = instance.primary_node
-
-    self.warn = []
-
-    if (self.op.pnode is not None and self.op.pnode != pnode and
-        not self.op.force):
-      # verify that the instance is not up
-      instance_info = self.rpc.call_instance_info(pnode, instance.name,
-                                                  instance.hypervisor)
-      if instance_info.fail_msg:
-        self.warn.append("Can't get instance runtime information: %s" %
-                         instance_info.fail_msg)
-      elif instance_info.payload:
-        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
-                                   errors.ECODE_STATE)
-
-    assert pnode in self.owned_locks(locking.LEVEL_NODE)
-    nodelist = list(instance.all_nodes)
-    pnode_info = self.cfg.GetNodeInfo(pnode)
+    instance = self.instance
     self.diskparams = self.cfg.GetInstanceDiskParams(instance)
 
-    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
-    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
-    group_info = self.cfg.GetNodeGroup(pnode_info.group)
-
-    # dictionary with instance information after the modification
-    ispec = {}
-
     # Check disk modifications. This is done here and not in CheckArguments
     # (as with NICs), because we need to know the instance's disk template
     if instance.disk_template == constants.DT_EXT:
@@ -2651,9 +2620,7 @@ class LUInstanceSetParams(LogicalUnit):
       self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
                       self._VerifyDiskModification)
 
-    # Prepare disk/NIC modifications
     self.diskmod = _PrepareContainerMods(self.op.disks, None)
-    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
 
     # Check the validity of the `provider' parameter
     if instance.disk_template in constants.DT_EXT:
@@ -2682,6 +2649,75 @@ class LUInstanceSetParams(LogicalUnit):
                                       constants.DT_EXT),
                                      errors.ECODE_INVAL)
 
+    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
+      raise errors.OpPrereqError("Disk operations not supported for"
+                                 " diskless instances", errors.ECODE_INVAL)
+
+    def _PrepareDiskMod(_, disk, params, __):
+      disk.name = params.get(constants.IDISK_NAME, None)
+
+    # Verify disk changes (operating on a copy)
+    disks = copy.deepcopy(instance.disks)
+    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
+                        _PrepareDiskMod, None)
+    utils.ValidateDeviceNames("disk", disks)
+    if len(disks) > constants.MAX_DISKS:
+      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
+                                 " more" % constants.MAX_DISKS,
+                                 errors.ECODE_STATE)
+    disk_sizes = [disk.size for disk in instance.disks]
+    disk_sizes.extend(params["size"] for (op, idx, params, private) in
+                      self.diskmod if op == constants.DDM_ADD)
+    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
+    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
+
+    if self.op.offline is not None and self.op.offline:
+      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
+                         msg="can't change to offline")
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This only checks the instance list against the existing names.
+
+    """
+    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
+    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+
+    cluster = self.cluster = self.cfg.GetClusterInfo()
+    assert self.instance is not None, \
+      "Cannot retrieve locked instance %s" % self.op.instance_name
+
+    pnode = instance.primary_node
+
+    self.warn = []
+
+    if (self.op.pnode is not None and self.op.pnode != pnode and
+        not self.op.force):
+      # verify that the instance is not up
+      instance_info = self.rpc.call_instance_info(pnode, instance.name,
+                                                  instance.hypervisor)
+      if instance_info.fail_msg:
+        self.warn.append("Can't get instance runtime information: %s" %
+                         instance_info.fail_msg)
+      elif instance_info.payload:
+        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
+                                   errors.ECODE_STATE)
+
+    assert pnode in self.owned_locks(locking.LEVEL_NODE)
+    nodelist = list(instance.all_nodes)
+    pnode_info = self.cfg.GetNodeInfo(pnode)
+
+    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
+    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
+    group_info = self.cfg.GetNodeGroup(pnode_info.group)
+
+    # dictionary with instance information after the modification
+    ispec = {}
+
+    # Prepare NIC modifications
+    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
+
     # OS change
     if self.op.os_name and not self.op.force:
       CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
@@ -2696,6 +2732,8 @@ class LUInstanceSetParams(LogicalUnit):
     if self.op.disk_template:
       self._PreCheckDiskTemplate(pnode_info)
 
+    self._PreCheckDisks(ispec)
+
     # hvparams processing
     if self.op.hvparams:
       hv_type = instance.hypervisor
@@ -2850,10 +2888,6 @@ class LUInstanceSetParams(LogicalUnit):
                             "ballooning memory for instance %s" %
                             instance.name, delta, instance.hypervisor)
 
-    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
-      raise errors.OpPrereqError("Disk operations not supported for"
-                                 " diskless instances", errors.ECODE_INVAL)
-
     def _PrepareNicCreate(_, params, private):
       self._PrepareNicModification(params, private, None, None,
                                    {}, cluster, pnode)
@@ -2879,28 +2913,6 @@ class LUInstanceSetParams(LogicalUnit):
                                  " (%d), cannot add more" % constants.MAX_NICS,
                                  errors.ECODE_STATE)
 
-    def _PrepareDiskMod(_, disk, params, __):
-      disk.name = params.get(constants.IDISK_NAME, None)
-
-    # Verify disk changes (operating on a copy)
-    disks = copy.deepcopy(instance.disks)
-    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
-                        _PrepareDiskMod, None)
-    utils.ValidateDeviceNames("disk", disks)
-    if len(disks) > constants.MAX_DISKS:
-      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
-                                 " more" % constants.MAX_DISKS,
-                                 errors.ECODE_STATE)
-    disk_sizes = [disk.size for disk in instance.disks]
-    disk_sizes.extend(params["size"] for (op, idx, params, private) in
-                      self.diskmod if op == constants.DDM_ADD)
-    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
-    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
-
-    if self.op.offline is not None and self.op.offline:
-      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
-                         msg="can't change to offline")
-
     # Pre-compute NIC changes (necessary to use result in hooks)
     self._nic_chgdesc = []
     if self.nicmod: