Allow proper cleanup of partially created disks
[ganeti-local] / lib / cmdlib.py
index 3704ec7..228abd5 100644 (file)
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -827,10 +827,10 @@ def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
       raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
                                  errors.ECODE_INVAL)
     if key in constants.IPOLICY_ISPECS:
-      utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
       ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
                                        use_none=use_none,
                                        use_default=use_default)
+      utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
     else:
       if (not value or value == [constants.VALUE_DEFAULT] or
           value == constants.VALUE_DEFAULT):
@@ -1231,6 +1231,7 @@ def _ComputeMinMaxSpec(name, qualifier, ipolicy, value):
 
 def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
                                  nic_count, disk_sizes, spindle_use,
+                                 disk_template,
                                  _compute_fn=_ComputeMinMaxSpec):
   """Verifies ipolicy against provided specs.
 
@@ -1248,6 +1249,8 @@ def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
   @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
   @type spindle_use: int
   @param spindle_use: The number of spindles this instance uses
+  @type disk_template: string
+  @param disk_template: The disk template of the instance
   @param _compute_fn: The compute function (unittest only)
   @return: A list of violations, or an empty list of no violations are found
 
@@ -1257,18 +1260,25 @@ def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
   test_settings = [
     (constants.ISPEC_MEM_SIZE, "", mem_size),
     (constants.ISPEC_CPU_COUNT, "", cpu_count),
-    (constants.ISPEC_DISK_COUNT, "", disk_count),
     (constants.ISPEC_NIC_COUNT, "", nic_count),
     (constants.ISPEC_SPINDLE_USE, "", spindle_use),
     ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
          for idx, d in enumerate(disk_sizes)]
+  if disk_template != constants.DT_DISKLESS:
+    # This check doesn't make sense for diskless instances
+    test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
+  ret = []
+  allowed_dts = ipolicy[constants.IPOLICY_DTS]
+  if disk_template not in allowed_dts:
+    ret.append("Disk template %s is not allowed (allowed templates: %s)" %
+               (disk_template, utils.CommaJoin(allowed_dts)))
 
-  return filter(None,
-                (_compute_fn(name, qualifier, ipolicy, value)
-                 for (name, qualifier, value) in test_settings))
+  return ret + filter(None,
+                      (_compute_fn(name, qualifier, ipolicy, value)
+                       for (name, qualifier, value) in test_settings))
 
 
-def _ComputeIPolicyInstanceViolation(ipolicy, instance,
+def _ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
                                      _compute_fn=_ComputeIPolicySpecViolation):
   """Compute if instance meets the specs of ipolicy.
 
@@ -1276,29 +1286,36 @@ def _ComputeIPolicyInstanceViolation(ipolicy, instance,
   @param ipolicy: The ipolicy to verify against
   @type instance: L{objects.Instance}
   @param instance: The instance to verify
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
   """
-  mem_size = instance.beparams.get(constants.BE_MAXMEM, None)
-  cpu_count = instance.beparams.get(constants.BE_VCPUS, None)
-  spindle_use = instance.beparams.get(constants.BE_SPINDLE_USE, None)
+  be_full = cfg.GetClusterInfo().FillBE(instance)
+  mem_size = be_full[constants.BE_MAXMEM]
+  cpu_count = be_full[constants.BE_VCPUS]
+  spindle_use = be_full[constants.BE_SPINDLE_USE]
   disk_count = len(instance.disks)
   disk_sizes = [disk.size for disk in instance.disks]
   nic_count = len(instance.nics)
+  disk_template = instance.disk_template
 
   return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
-                     disk_sizes, spindle_use)
+                     disk_sizes, spindle_use, disk_template)
 
 
 def _ComputeIPolicyInstanceSpecViolation(
-  ipolicy, instance_spec, _compute_fn=_ComputeIPolicySpecViolation):
+  ipolicy, instance_spec, disk_template,
+  _compute_fn=_ComputeIPolicySpecViolation):
   """Compute if instance specs meets the specs of ipolicy.
 
   @type ipolicy: dict
   @param ipolicy: The ipolicy to verify against
   @param instance_spec: dict
   @param instance_spec: The instance spec to verify
+  @type disk_template: string
+  @param disk_template: the disk template of the instance
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
@@ -1311,11 +1328,11 @@ def _ComputeIPolicyInstanceSpecViolation(
   spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
 
   return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
-                     disk_sizes, spindle_use)
+                     disk_sizes, spindle_use, disk_template)
 
 
 def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
-                                 target_group,
+                                 target_group, cfg,
                                  _compute_fn=_ComputeIPolicyInstanceViolation):
   """Compute if instance meets the specs of the new target group.
 
@@ -1323,6 +1340,8 @@ def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
   @param instance: The instance object to verify
   @param current_group: The current group of the instance
   @param target_group: The new group of the instance
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
@@ -1330,23 +1349,25 @@ def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
   if current_group == target_group:
     return []
   else:
-    return _compute_fn(ipolicy, instance)
+    return _compute_fn(ipolicy, instance, cfg)
 
 
-def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
+def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
                             _compute_fn=_ComputeIPolicyNodeViolation):
   """Checks that the target node is correct in terms of instance policy.
 
   @param ipolicy: The ipolicy to verify
   @param instance: The instance object to verify
   @param node: The new node to relocate
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @param ignore: Ignore violations of the ipolicy
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
   """
   primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
-  res = _compute_fn(ipolicy, instance, primary_node.group, node.group)
+  res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg)
 
   if res:
     msg = ("Instance does not meet target node group's (%s) instance"
@@ -1357,18 +1378,20 @@ def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
       raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
 
-def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances):
+def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
   """Computes a set of any instances that would violate the new ipolicy.
 
   @param old_ipolicy: The current (still in-place) ipolicy
   @param new_ipolicy: The new (to become) ipolicy
   @param instances: List of instances to verify
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @return: A list of instances which violates the new ipolicy but
       did not before
 
   """
-  return (_ComputeViolatingInstances(new_ipolicy, instances) -
-          _ComputeViolatingInstances(old_ipolicy, instances))
+  return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) -
+          _ComputeViolatingInstances(old_ipolicy, instances, cfg))
 
 
 def _ExpandItemName(fn, name, kind):
@@ -1490,7 +1513,7 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
     "INSTANCE_STATUS": status,
     "INSTANCE_MINMEM": minmem,
     "INSTANCE_MAXMEM": maxmem,
-    # TODO(2.7) remove deprecated "memory" value
+    # TODO(2.9) remove deprecated "memory" value
     "INSTANCE_MEMORY": maxmem,
     "INSTANCE_VCPUS": vcpus,
     "INSTANCE_DISK_TEMPLATE": disk_template,
@@ -1512,7 +1535,7 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
         # FIXME: broken network reference: the instance NIC specifies a
         # network, but the relevant network entry was not in the config. This
         # should be made impossible.
-        env["INSTANCE_NIC%d_NETWORK" % idx] = net
+        env["INSTANCE_NIC%d_NETWORK_NAME" % idx] = net
       if mode == constants.NIC_MODE_BRIDGED:
         env["INSTANCE_NIC%d_BRIDGE" % idx] = link
   else:
@@ -1551,20 +1574,15 @@ def _NICToTuple(lu, nic):
   @param nic: nic to convert to hooks tuple
 
   """
-  ip = nic.ip
-  mac = nic.mac
   cluster = lu.cfg.GetClusterInfo()
   filled_params = cluster.SimpleFillNIC(nic.nicparams)
   mode = filled_params[constants.NIC_MODE]
   link = filled_params[constants.NIC_LINK]
-  net = nic.network
   netinfo = None
-  if net:
-    net_uuid = lu.cfg.LookupNetwork(net)
-    if net_uuid:
-      nobj = lu.cfg.GetNetwork(net_uuid)
-      netinfo = objects.Network.ToDict(nobj)
-  return (ip, mac, mode, link, net, netinfo)
+  if nic.network:
+    nobj = lu.cfg.GetNetwork(nic.network)
+    netinfo = objects.Network.ToDict(nobj)
+  return (nic.ip, nic.mac, mode, link, nic.network, netinfo)
 
 
 def _NICListToTuple(lu, nics):
@@ -1652,17 +1670,19 @@ def _DecideSelfPromotion(lu, exceptions=None):
   return mc_now < mc_should
 
 
-def _ComputeViolatingInstances(ipolicy, instances):
+def _ComputeViolatingInstances(ipolicy, instances, cfg):
   """Computes a set of instances who violates given ipolicy.
 
   @param ipolicy: The ipolicy to verify
-  @type instances: object.Instance
+  @type instances: L{objects.Instance}
   @param instances: List of instances to verify
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @return: A frozenset of instance names violating the ipolicy
 
   """
   return frozenset([inst.name for inst in instances
-                    if _ComputeIPolicyInstanceViolation(ipolicy, inst)])
+                    if _ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
 
 
 def _CheckNicsBridgesExist(lu, target_nics, target_node):
@@ -2631,7 +2651,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     cluster = self.cfg.GetClusterInfo()
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                             self.group_info)
-    err = _ComputeIPolicyInstanceViolation(ipolicy, inst_config)
+    err = _ComputeIPolicyInstanceViolation(ipolicy, inst_config, self.cfg)
     _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err),
              code=self.ETYPE_WARNING)
 
@@ -4271,7 +4291,7 @@ class LUClusterSetParams(LogicalUnit):
         new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
         ipol = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group)
         new = _ComputeNewInstanceViolations(ipol,
-                                            new_ipolicy, instances)
+                                            new_ipolicy, instances, self.cfg)
         if new:
           violations.update(new)
 
@@ -5756,6 +5776,7 @@ class _InstanceQuery(_QueryBase):
       lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
       lu.needed_locks[locking.LEVEL_NODEGROUP] = []
       lu.needed_locks[locking.LEVEL_NODE] = []
+      lu.needed_locks[locking.LEVEL_NETWORK] = []
       lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
     self.do_grouplocks = (self.do_locking and
@@ -5775,6 +5796,12 @@ class _InstanceQuery(_QueryBase):
       elif level == locking.LEVEL_NODE:
         lu._LockInstancesNodes() # pylint: disable=W0212
 
+      elif level == locking.LEVEL_NETWORK:
+        lu.needed_locks[locking.LEVEL_NETWORK] = \
+          frozenset(net_uuid
+                    for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
+                    for net_uuid in lu.cfg.GetInstanceNetworks(instance_name))
+
   @staticmethod
   def _CheckGroupLocks(lu):
     owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
@@ -5865,10 +5892,17 @@ class _InstanceQuery(_QueryBase):
       nodes = None
       groups = None
 
+    if query.IQ_NETWORKS in self.requested_data:
+      net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.name)
+                                    for i in instance_list))
+      networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
+    else:
+      networks = None
+
     return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
                                    disk_usage, offline_nodes, bad_nodes,
                                    live_data, wrongnode_inst, consinfo,
-                                   nodes, groups)
+                                   nodes, groups, networks)
 
 
 class LUQuery(NoHooksLU):
@@ -8128,8 +8162,8 @@ def _ExpandNamesForMigration(lu):
   lu.needed_locks[locking.LEVEL_NODE_RES] = []
   lu.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
 
-  # The node allocation lock is actually only needed for replicated instances
-  # (e.g. DRBD8) and if an iallocator is used.
+  # The node allocation lock is actually only needed for externally replicated
+  # instances (e.g. sharedfile or RBD) and if an iallocator is used.
   lu.needed_locks[locking.LEVEL_NODE_ALLOC] = []
 
 
@@ -8354,6 +8388,10 @@ class LUInstanceMove(LogicalUnit):
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
+    if instance.disk_template not in constants.DTS_COPYABLE:
+      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
+                                 instance.disk_template, errors.ECODE_STATE)
+
     node = self.cfg.GetNodeInfo(self.op.target_node)
     assert node is not None, \
       "Cannot retrieve locked node %s" % self.op.target_node
@@ -8378,7 +8416,7 @@ class LUInstanceMove(LogicalUnit):
     cluster = self.cfg.GetClusterInfo()
     group_info = self.cfg.GetNodeGroup(node.group)
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
-    _CheckTargetNodeIPolicy(self, ipolicy, instance, node,
+    _CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
                             ignore=self.op.ignore_ipolicy)
 
     if instance.admin_state == constants.ADMINST_UP:
@@ -8429,12 +8467,9 @@ class LUInstanceMove(LogicalUnit):
     try:
       _CreateDisks(self, instance, target_node=target_node)
     except errors.OpExecError:
-      self.LogWarning("Device creation failed, reverting...")
-      try:
-        _RemoveDisks(self, instance, target_node=target_node)
-      finally:
-        self.cfg.ReleaseDRBDMinors(instance.name)
-        raise
+      self.LogWarning("Device creation failed")
+      self.cfg.ReleaseDRBDMinors(instance.name)
+      raise
 
     cluster_name = self.cfg.GetClusterInfo().cluster_name
 
@@ -8637,11 +8672,10 @@ class TLMigrateInstance(Tasklet):
                                  errors.ECODE_STATE)
 
     if instance.disk_template in constants.DTS_EXT_MIRROR:
-      assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
-
       _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
 
       if self.lu.op.iallocator:
+        assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
         self._RunAllocator()
       else:
         # We set set self.target_node as it is required by
@@ -8653,7 +8687,7 @@ class TLMigrateInstance(Tasklet):
       group_info = self.cfg.GetNodeGroup(nodeinfo.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               group_info)
-      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
+      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
                               ignore=self.ignore_ipolicy)
 
       # self.target_node is already populated, either directly or by the
@@ -8697,7 +8731,7 @@ class TLMigrateInstance(Tasklet):
       group_info = self.cfg.GetNodeGroup(nodeinfo.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               group_info)
-      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
+      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
                               ignore=self.ignore_ipolicy)
 
     i_be = cluster.FillBE(instance)
@@ -9306,20 +9340,34 @@ def _CreateBlockDevInner(lu, node, instance, device, force_create,
   @type excl_stor: boolean
   @param excl_stor: Whether exclusive_storage is active for the node
 
+  @return: list of created devices
   """
-  if device.CreateOnSecondary():
-    force_create = True
+  created_devices = []
+  try:
+    if device.CreateOnSecondary():
+      force_create = True
 
-  if device.children:
-    for child in device.children:
-      _CreateBlockDevInner(lu, node, instance, child, force_create,
-                           info, force_open, excl_stor)
+    if device.children:
+      for child in device.children:
+        devs = _CreateBlockDevInner(lu, node, instance, child, force_create,
+                                    info, force_open, excl_stor)
+        created_devices.extend(devs)
 
-  if not force_create:
-    return
+    if not force_create:
+      return created_devices
 
-  _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
-                        excl_stor)
+    _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
+                          excl_stor)
+    # The device has been completely created, so there is no point in keeping
+    # its subdevices in the list. We just add the device itself instead.
+    created_devices = [(node, device)]
+    return created_devices
+
+  except errors.DeviceCreationError, e:
+    e.created_devices.extend(created_devices)
+    raise e
+  except errors.OpExecError, e:
+    raise errors.DeviceCreationError(str(e), created_devices)
 
 
 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
@@ -9666,6 +9714,7 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None):
     result.Raise("Failed to create directory '%s' on"
                  " node %s" % (file_storage_dir, pnode))
 
+  disks_created = []
   # Note: this needs to be kept in sync with adding of disks in
   # LUInstanceSetParams
   for idx, device in enumerate(instance.disks):
@@ -9675,7 +9724,23 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None):
     #HARDCODE
     for node in all_nodes:
       f_create = node == pnode
-      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
+      try:
+        _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
+        disks_created.append((node, device))
+      except errors.OpExecError:
+        logging.warning("Creating disk %s for instance '%s' failed",
+                        idx, instance.name)
+      except errors.DeviceCreationError, e:
+        logging.warning("Creating disk %s for instance '%s' failed",
+                        idx, instance.name)
+        disks_created.extend(e.created_devices)
+        for (node, disk) in disks_created:
+          lu.cfg.SetDiskID(disk, node)
+          result = lu.rpc.call_blockdev_remove(node, disk)
+          if result.fail_msg:
+            logging.warning("Failed to remove newly-created disk %s on node %s:"
+                            " %s", device, node, result.fail_msg)
+        raise errors.OpExecError(e.message)
 
 
 def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
@@ -9683,8 +9748,7 @@ def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
 
   This abstracts away some work from `AddInstance()` and
   `RemoveInstance()`. Note that in case some of the devices couldn't
-  be removed, the removal will continue with the other ones (compare
-  with `_CreateDisks()`).
+  be removed, the removal will continue with the other ones.
 
   @type lu: L{LogicalUnit}
   @param lu: the logical unit on whose behalf we execute
@@ -9954,8 +10018,9 @@ def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
 
     check_params = cluster.SimpleFillNIC(nicparams)
     objects.NIC.CheckParameterSyntax(check_params)
+    net_uuid = cfg.LookupNetwork(net)
     nics.append(objects.NIC(mac=mac, ip=nic_ip,
-                            network=net, nicparams=nicparams))
+                            network=net_uuid, nicparams=nicparams))
 
   return nics
 
@@ -10691,14 +10756,15 @@ class LUInstanceCreate(LogicalUnit):
     # Fill in any IPs from IP pools. This must happen here, because we need to
     # know the nic's primary node, as specified by the iallocator
     for idx, nic in enumerate(self.nics):
-      net = nic.network
-      if net is not None:
-        netparams = self.cfg.GetGroupNetParams(net, self.pnode.name)
+      net_uuid = nic.network
+      if net_uuid is not None:
+        nobj = self.cfg.GetNetwork(net_uuid)
+        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
         if netparams is None:
           raise errors.OpPrereqError("No netparams found for network"
                                      " %s. Propably not connected to"
                                      " node's %s nodegroup" %
-                                     (net, self.pnode.name),
+                                     (nobj.name, self.pnode.name),
                                      errors.ECODE_INVAL)
         self.LogInfo("NIC/%d inherits netparams %s" %
                      (idx, netparams.values()))
@@ -10706,19 +10772,19 @@ class LUInstanceCreate(LogicalUnit):
         if nic.ip is not None:
           if nic.ip.lower() == constants.NIC_IP_POOL:
             try:
-              nic.ip = self.cfg.GenerateIp(net, self.proc.GetECId())
+              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
             except errors.ReservationError:
               raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
                                          " from the address pool" % idx,
                                          errors.ECODE_STATE)
-            self.LogInfo("Chose IP %s from network %s", nic.ip, net)
+            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
           else:
             try:
-              self.cfg.ReserveIp(net, nic.ip, self.proc.GetECId())
+              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
             except errors.ReservationError:
               raise errors.OpPrereqError("IP address %s already in use"
                                          " or does not belong to network %s" %
-                                         (nic.ip, net),
+                                         (nic.ip, nobj.name),
                                          errors.ECODE_NOTUNIQUE)
 
       # net is None, ip None or given
@@ -10754,25 +10820,6 @@ class LUInstanceCreate(LogicalUnit):
 
     nodenames = [pnode.name] + self.secondaries
 
-    # Verify instance specs
-    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
-    ispec = {
-      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
-      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
-      constants.ISPEC_DISK_COUNT: len(self.disks),
-      constants.ISPEC_DISK_SIZE: [disk["size"] for disk in self.disks],
-      constants.ISPEC_NIC_COUNT: len(self.nics),
-      constants.ISPEC_SPINDLE_USE: spindle_use,
-      }
-
-    group_info = self.cfg.GetNodeGroup(pnode.group)
-    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
-    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
-    if not self.op.ignore_ipolicy and res:
-      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
-             (pnode.group, group_info.name, utils.CommaJoin(res)))
-      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
-
     if not self.adopt_disks:
       if self.op.disk_template == constants.DT_RBD:
         # _CheckRADOSFreeSpace() is just a placeholder.
@@ -10871,12 +10918,12 @@ class LUInstanceCreate(LogicalUnit):
 
     group_info = self.cfg.GetNodeGroup(pnode.group)
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
-    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
+    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
+                                               self.op.disk_template)
     if not self.op.ignore_ipolicy and res:
-      raise errors.OpPrereqError(("Instance allocation to group %s violates"
-                                  " policy: %s") % (pnode.group,
-                                                    utils.CommaJoin(res)),
-                                  errors.ECODE_INVAL)
+      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
+             (pnode.group, group_info.name, utils.CommaJoin(res)))
+      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
     _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
 
@@ -10967,12 +11014,9 @@ class LUInstanceCreate(LogicalUnit):
       try:
         _CreateDisks(self, iobj)
       except errors.OpExecError:
-        self.LogWarning("Device creation failed, reverting...")
-        try:
-          _RemoveDisks(self, iobj)
-        finally:
-          self.cfg.ReleaseDRBDMinors(instance)
-          raise
+        self.LogWarning("Device creation failed")
+        self.cfg.ReleaseDRBDMinors(instance)
+        raise
 
     feedback_fn("adding instance %s to cluster config" % instance)
 
@@ -11723,7 +11767,7 @@ class TLReplaceDisks(Tasklet):
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               new_group_info)
       _CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
-                              ignore=self.ignore_ipolicy)
+                              self.cfg, ignore=self.ignore_ipolicy)
 
     for node in check_nodes:
       _CheckNodeOnline(self.lu, node)
@@ -12819,12 +12863,13 @@ class LUInstanceQueryData(NoHooksLU):
 
       self.needed_locks[locking.LEVEL_NODEGROUP] = []
       self.needed_locks[locking.LEVEL_NODE] = []
+      self.needed_locks[locking.LEVEL_NETWORK] = []
       self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
   def DeclareLocks(self, level):
     if self.op.use_locking:
+      owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
       if level == locking.LEVEL_NODEGROUP:
-        owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
 
         # Lock all groups used by instances optimistically; this requires going
         # via the node before it's locked, requiring verification later on
@@ -12837,6 +12882,13 @@ class LUInstanceQueryData(NoHooksLU):
       elif level == locking.LEVEL_NODE:
         self._LockInstancesNodes()
 
+      elif level == locking.LEVEL_NETWORK:
+        self.needed_locks[locking.LEVEL_NETWORK] = \
+          frozenset(net_uuid
+                    for instance_name in owned_instances
+                    for net_uuid in
+                       self.cfg.GetInstanceNetworks(instance_name))
+
   def CheckPrereq(self):
     """Check prerequisites.
 
@@ -12846,6 +12898,7 @@ class LUInstanceQueryData(NoHooksLU):
     owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
     owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
     owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
+    owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
 
     if self.wanted_names is None:
       assert self.op.use_locking, "Locking was not used"
@@ -12857,7 +12910,8 @@ class LUInstanceQueryData(NoHooksLU):
       _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
                                 None)
     else:
-      assert not (owned_instances or owned_groups or owned_nodes)
+      assert not (owned_instances or owned_groups or
+                  owned_nodes or owned_networks)
 
     self.wanted_instances = instances.values()
 
@@ -12941,7 +12995,6 @@ class LUInstanceQueryData(NoHooksLU):
                                                  for node in nodes.values()))
 
     group2name_fn = lambda uuid: groups[uuid].name
-
     for instance in self.wanted_instances:
       pnode = nodes[instance.primary_node]
 
@@ -13385,7 +13438,7 @@ class LUInstanceSetParams(LogicalUnit):
     nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
     return (nl, nl)
 
-  def _PrepareNicModification(self, params, private, old_ip, old_net,
+  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
                               old_params, cluster, pnode):
 
     update_params_dict = dict([(key, params[key])
@@ -13395,13 +13448,21 @@ class LUInstanceSetParams(LogicalUnit):
     req_link = update_params_dict.get(constants.NIC_LINK, None)
     req_mode = update_params_dict.get(constants.NIC_MODE, None)
 
-    new_net = params.get(constants.INIC_NETWORK, old_net)
-    if new_net is not None:
-      netparams = self.cfg.GetGroupNetParams(new_net, pnode)
-      if netparams is None:
+    new_net_uuid = None
+    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
+    if new_net_uuid_or_name:
+      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
+      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
+
+    if old_net_uuid:
+      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
+
+    if new_net_uuid:
+      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
+      if not netparams:
         raise errors.OpPrereqError("No netparams found for the network"
-                                   " %s, probably not connected" % new_net,
-                                   errors.ECODE_INVAL)
+                                   " %s, probably not connected" %
+                                   new_net_obj.name, errors.ECODE_INVAL)
       new_params = dict(netparams)
     else:
       new_params = _GetUpdatedParams(old_params, update_params_dict)
@@ -13440,7 +13501,7 @@ class LUInstanceSetParams(LogicalUnit):
       elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
         # otherwise generate the MAC address
         params[constants.INIC_MAC] = \
-          self.cfg.GenerateMAC(new_net, self.proc.GetECId())
+          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
       else:
         # or validate/reserve the current one
         try:
@@ -13449,62 +13510,66 @@ class LUInstanceSetParams(LogicalUnit):
           raise errors.OpPrereqError("MAC address '%s' already in use"
                                      " in cluster" % mac,
                                      errors.ECODE_NOTUNIQUE)
-    elif new_net != old_net:
-
-      def get_net_prefix(net):
-        if net:
-          uuid = self.cfg.LookupNetwork(net)
-          if uuid:
-            nobj = self.cfg.GetNetwork(uuid)
-            return nobj.mac_prefix
-        return None
-
-      new_prefix = get_net_prefix(new_net)
-      old_prefix = get_net_prefix(old_net)
+    elif new_net_uuid != old_net_uuid:
+
+      def get_net_prefix(net_uuid):
+        mac_prefix = None
+        if net_uuid:
+          nobj = self.cfg.GetNetwork(net_uuid)
+          mac_prefix = nobj.mac_prefix
+
+        return mac_prefix
+
+      new_prefix = get_net_prefix(new_net_uuid)
+      old_prefix = get_net_prefix(old_net_uuid)
       if old_prefix != new_prefix:
         params[constants.INIC_MAC] = \
-          self.cfg.GenerateMAC(new_net, self.proc.GetECId())
+          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
 
-    #if there is a change in nic-network configuration
+    # if there is a change in (ip, network) tuple
     new_ip = params.get(constants.INIC_IP, old_ip)
-    if (new_ip, new_net) != (old_ip, old_net):
+    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
       if new_ip:
-        if new_net:
-          if new_ip.lower() == constants.NIC_IP_POOL:
+        # if IP is pool then require a network and generate one IP
+        if new_ip.lower() == constants.NIC_IP_POOL:
+          if new_net_uuid:
             try:
-              new_ip = self.cfg.GenerateIp(new_net, self.proc.GetECId())
+              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
             except errors.ReservationError:
               raise errors.OpPrereqError("Unable to get a free IP"
                                          " from the address pool",
                                          errors.ECODE_STATE)
-            self.LogInfo("Chose IP %s from pool %s", new_ip, new_net)
+            self.LogInfo("Chose IP %s from network %s",
+                         new_ip,
+                         new_net_obj.name)
             params[constants.INIC_IP] = new_ip
-          elif new_ip != old_ip or new_net != old_net:
-            try:
-              self.LogInfo("Reserving IP %s in pool %s", new_ip, new_net)
-              self.cfg.ReserveIp(new_net, new_ip, self.proc.GetECId())
-            except errors.ReservationError:
-              raise errors.OpPrereqError("IP %s not available in network %s" %
-                                         (new_ip, new_net),
-                                         errors.ECODE_NOTUNIQUE)
-        elif new_ip.lower() == constants.NIC_IP_POOL:
-          raise errors.OpPrereqError("ip=pool, but no network found",
-                                     errors.ECODE_INVAL)
-
-        # new net is None
+          else:
+            raise errors.OpPrereqError("ip=pool, but no network found",
+                                       errors.ECODE_INVAL)
+        # Reserve new IP if in the new network if any
+        elif new_net_uuid:
+          try:
+            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
+            self.LogInfo("Reserving IP %s in network %s",
+                         new_ip, new_net_obj.name)
+          except errors.ReservationError:
+            raise errors.OpPrereqError("IP %s not available in network %s" %
+                                       (new_ip, new_net_obj.name),
+                                       errors.ECODE_NOTUNIQUE)
+        # new network is None so check if new IP is a conflicting IP
         elif self.op.conflicts_check:
           _CheckForConflictingIp(self, new_ip, pnode)
 
-      if old_ip:
-        if old_net:
-          try:
-            self.cfg.ReleaseIp(old_net, old_ip, self.proc.GetECId())
-          except errors.AddressPoolError:
-            logging.warning("Release IP %s not contained in network %s",
-                            old_ip, old_net)
+      # release old IP if old network is not None
+      if old_ip and old_net_uuid:
+        try:
+          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
+        except errors.AddressPoolError:
+          logging.warning("Release IP %s not contained in network %s",
+                          old_ip, old_net_obj.name)
 
-    # there are no changes in (net, ip) tuple
-    elif (old_net is not None and
+    # there are no changes in (ip, network) tuple and old network is not None
+    elif (old_net_uuid is not None and
           (req_link is not None or req_mode is not None)):
       raise errors.OpPrereqError("Not allowed to change link or mode of"
                                  " a NIC that is connected to a network",
@@ -13550,7 +13615,7 @@ class LUInstanceSetParams(LogicalUnit):
       snode_group = self.cfg.GetNodeGroup(snode_info.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               snode_group)
-      _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
+      _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
                               ignore=self.op.ignore_ipolicy)
       if pnode_info.group != snode_info.group:
         self.LogWarning("The primary and secondary nodes are in two"
@@ -13877,14 +13942,20 @@ class LUInstanceSetParams(LogicalUnit):
                                                          None)
 
       # Copy ispec to verify parameters with min/max values separately
+      if self.op.disk_template:
+        new_disk_template = self.op.disk_template
+      else:
+        new_disk_template = instance.disk_template
       ispec_max = ispec.copy()
       ispec_max[constants.ISPEC_MEM_SIZE] = \
         self.be_new.get(constants.BE_MAXMEM, None)
-      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max)
+      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
+                                                     new_disk_template)
       ispec_min = ispec.copy()
       ispec_min[constants.ISPEC_MEM_SIZE] = \
         self.be_new.get(constants.BE_MINMEM, None)
-      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min)
+      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
+                                                     new_disk_template)
 
       if (res_max or res_min):
         # FIXME: Improve error message by including information about whether
@@ -13990,6 +14061,7 @@ class LUInstanceSetParams(LogicalUnit):
     # update instance structure
     instance.disks = new_disks
     instance.disk_template = constants.DT_PLAIN
+    _UpdateIvNames(0, instance.disks)
     self.cfg.Update(instance, feedback_fn)
 
     # Release locks in case removing disks takes a while
@@ -14076,18 +14148,19 @@ class LUInstanceSetParams(LogicalUnit):
     if root.dev_type in constants.LDS_DRBD:
       self.cfg.AddTcpUdpPort(root.logical_id[2])
 
-  @staticmethod
-  def _CreateNewNic(idx, params, private):
+  def _CreateNewNic(self, idx, params, private):
     """Creates data structure for a new network interface.
 
     """
     mac = params[constants.INIC_MAC]
     ip = params.get(constants.INIC_IP, None)
     net = params.get(constants.INIC_NETWORK, None)
+    net_uuid = self.cfg.LookupNetwork(net)
     #TODO: not private.filled?? can a nic have no nicparams??
     nicparams = private.filled
+    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, nicparams=nicparams)
 
-    return (objects.NIC(mac=mac, ip=ip, network=net, nicparams=nicparams), [
+    return (nobj, [
       ("nic.%d" % idx,
        "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
        (mac, ip, private.filled[constants.NIC_MODE],
@@ -14095,18 +14168,23 @@ class LUInstanceSetParams(LogicalUnit):
        net)),
       ])
 
-  @staticmethod
-  def _ApplyNicMods(idx, nic, params, private):
+  def _ApplyNicMods(self, idx, nic, params, private):
     """Modifies a network interface.
 
     """
     changes = []
 
-    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NETWORK]:
+    for key in [constants.INIC_MAC, constants.INIC_IP]:
       if key in params:
         changes.append(("nic.%s/%d" % (key, idx), params[key]))
         setattr(nic, key, params[key])
 
+    new_net = params.get(constants.INIC_NETWORK, nic.network)
+    new_net_uuid = self.cfg.LookupNetwork(new_net)
+    if new_net_uuid != nic.network:
+      changes.append(("nic.network/%d" % idx, new_net))
+      nic.network = new_net_uuid
+
     if private.filled:
       nic.nicparams = private.filled
 
@@ -15310,7 +15388,7 @@ class LUGroupSetParams(LogicalUnit):
       violations = \
           _ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
                                                                   self.group),
-                                        new_ipolicy, instances)
+                                        new_ipolicy, instances, self.cfg)
 
       if violations:
         self.LogWarning("After the ipolicy change the following instances"
@@ -16130,7 +16208,8 @@ class LUTestAllocator(NoHooksLU):
                                           nics=self.op.nics,
                                           vcpus=self.op.vcpus,
                                           spindle_use=self.op.spindle_use,
-                                          hypervisor=self.op.hypervisor)
+                                          hypervisor=self.op.hypervisor,
+                                          node_whitelist=None)
     elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
       req = iallocator.IAReqRelocate(name=self.op.name,
                                      relocate_from=list(self.relocate_from))
@@ -16207,11 +16286,15 @@ class LUNetworkAdd(LogicalUnit):
       raise errors.OpPrereqError("Network must be given",
                                  errors.ECODE_INVAL)
 
-    uuid = self.cfg.LookupNetwork(self.op.network_name)
-
-    if uuid:
-      raise errors.OpPrereqError(("Network with name '%s' already exists" %
-                                  self.op.network_name), errors.ECODE_EXISTS)
+    try:
+      existing_uuid = self.cfg.LookupNetwork(self.op.network_name)
+    except errors.OpPrereqError:
+      pass
+    else:
+      raise errors.OpPrereqError("Desired network name '%s' already exists as a"
+                                 " network (UUID: %s)" %
+                                 (self.op.network_name, existing_uuid),
+                                 errors.ECODE_EXISTS)
 
     # Check tag validity
     for tag in self.op.tags:
@@ -16246,8 +16329,9 @@ class LUNetworkAdd(LogicalUnit):
     # Initialize the associated address pool
     try:
       pool = network.AddressPool.InitializeNetwork(nobj)
-    except errors.AddressPoolError, e:
-      raise errors.OpExecError("Cannot create IP pool for this network: %s" % e)
+    except errors.AddressPoolError, err:
+      raise errors.OpExecError("Cannot create IP address pool for network"
+                               " '%s': %s" % (self.op.network_name, err))
 
     # Check if we need to reserve the nodes and the cluster master IP
     # These may not be allocated to any instances in routed mode, as
@@ -16260,25 +16344,26 @@ class LUNetworkAdd(LogicalUnit):
               pool.Reserve(ip)
               self.LogInfo("Reserved IP address of node '%s' (%s)",
                            node.name, ip)
-          except errors.AddressPoolError:
-            self.LogWarning("Cannot reserve IP address of node '%s' (%s)",
-                            node.name, ip)
+          except errors.AddressPoolError, err:
+            self.LogWarning("Cannot reserve IP address '%s' of node '%s': %s",
+                            ip, node.name, err)
 
       master_ip = self.cfg.GetClusterInfo().master_ip
       try:
         if pool.Contains(master_ip):
           pool.Reserve(master_ip)
           self.LogInfo("Reserved cluster master IP address (%s)", master_ip)
-      except errors.AddressPoolError:
-        self.LogWarning("Cannot reserve cluster master IP address (%s)",
-                        master_ip)
+      except errors.AddressPoolError, err:
+        self.LogWarning("Cannot reserve cluster master IP address (%s): %s",
+                        master_ip, err)
 
     if self.op.add_reserved_ips:
       for ip in self.op.add_reserved_ips:
         try:
           pool.Reserve(ip, external=True)
-        except errors.AddressPoolError, e:
-          raise errors.OpExecError("Cannot reserve IP %s. %s " % (ip, e))
+        except errors.AddressPoolError, err:
+          raise errors.OpExecError("Cannot reserve IP address '%s': %s" %
+                                   (ip, err))
 
     if self.op.tags:
       for tag in self.op.tags:
@@ -16296,10 +16381,6 @@ class LUNetworkRemove(LogicalUnit):
   def ExpandNames(self):
     self.network_uuid = self.cfg.LookupNetwork(self.op.network_name)
 
-    if not self.network_uuid:
-      raise errors.OpPrereqError(("Network '%s' not found" %
-                                  self.op.network_name), errors.ECODE_NOENT)
-
     self.share_locks[locking.LEVEL_NODEGROUP] = 1
     self.needed_locks = {
       locking.LEVEL_NETWORK: [self.network_uuid],
@@ -16368,9 +16449,6 @@ class LUNetworkSetParams(LogicalUnit):
 
   def ExpandNames(self):
     self.network_uuid = self.cfg.LookupNetwork(self.op.network_name)
-    if self.network_uuid is None:
-      raise errors.OpPrereqError(("Network '%s' not found" %
-                                  self.op.network_name), errors.ECODE_NOENT)
 
     self.needed_locks = {
       locking.LEVEL_NETWORK: [self.network_uuid],
@@ -16540,8 +16618,6 @@ class _NetworkQuery(_QueryBase):
     network_uuids = self._GetNames(lu, all_networks.keys(),
                                    locking.LEVEL_NETWORK)
 
-    name_to_uuid = dict((n.name, n.uuid) for n in all_networks.values())
-
     do_instances = query.NETQ_INST in self.requested_data
     do_groups = query.NETQ_GROUP in self.requested_data
 
@@ -16566,10 +16642,8 @@ class _NetworkQuery(_QueryBase):
       network_to_instances = dict((uuid, []) for uuid in network_uuids)
       for instance in all_instances.values():
         for nic in instance.nics:
-          if nic.network:
-            net_uuid = name_to_uuid[nic.network]
-            if net_uuid in network_uuids:
-              network_to_instances[net_uuid].append(instance.name)
+          if nic.network in network_uuids:
+            network_to_instances[nic.network].append(instance.name)
             break
 
     if query.NETQ_STATS in self.requested_data:
@@ -16632,14 +16706,7 @@ class LUNetworkConnect(LogicalUnit):
     self.network_link = self.op.network_link
 
     self.network_uuid = self.cfg.LookupNetwork(self.network_name)
-    if self.network_uuid is None:
-      raise errors.OpPrereqError("Network '%s' does not exist" %
-                                 self.network_name, errors.ECODE_NOENT)
-
     self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
-    if self.group_uuid is None:
-      raise errors.OpPrereqError("Group '%s' does not exist" %
-                                 self.group_name, errors.ECODE_NOENT)
 
     self.needed_locks = {
       locking.LEVEL_INSTANCE: [],
@@ -16678,6 +16745,11 @@ class LUNetworkConnect(LogicalUnit):
 
     assert self.group_uuid in owned_groups
 
+    # Check if locked instances are still correct
+    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+    if self.op.conflicts_check:
+      _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+
     self.netparams = {
       constants.NIC_MODE: self.network_mode,
       constants.NIC_LINK: self.network_link,
@@ -16692,23 +16764,22 @@ class LUNetworkConnect(LogicalUnit):
       self.LogWarning("Network '%s' is already mapped to group '%s'" %
                       (self.network_name, self.group.name))
       self.connected = True
-      return
 
-    if self.op.conflicts_check:
+    # check only if not already connected
+    elif self.op.conflicts_check:
       pool = network.AddressPool(self.cfg.GetNetwork(self.network_uuid))
 
       _NetworkConflictCheck(self, lambda nic: pool.Contains(nic.ip),
-                            "connect to")
+                            "connect to", owned_instances)
 
   def Exec(self, feedback_fn):
-    if self.connected:
-      return
-
-    self.group.networks[self.network_uuid] = self.netparams
-    self.cfg.Update(self.group, feedback_fn)
+    # Connect the network and update the group only if not already connected
+    if not self.connected:
+      self.group.networks[self.network_uuid] = self.netparams
+      self.cfg.Update(self.group, feedback_fn)
 
 
-def _NetworkConflictCheck(lu, check_fn, action):
+def _NetworkConflictCheck(lu, check_fn, action, instances):
   """Checks for network interface conflicts with a network.
 
   @type lu: L{LogicalUnit}
@@ -16720,13 +16791,9 @@ def _NetworkConflictCheck(lu, check_fn, action):
   @raise errors.OpPrereqError: If conflicting IP addresses are found.
 
   """
-  # Check if locked instances are still correct
-  owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
-  _CheckNodeGroupInstances(lu.cfg, lu.group_uuid, owned_instances)
-
   conflicts = []
 
-  for (_, instance) in lu.cfg.GetMultiInstanceInfo(owned_instances):
+  for (_, instance) in lu.cfg.GetMultiInstanceInfo(instances):
     instconflicts = [(idx, nic.ip)
                      for (idx, nic) in enumerate(instance.nics)
                      if check_fn(nic)]
@@ -16768,14 +16835,7 @@ class LUNetworkDisconnect(LogicalUnit):
     self.group_name = self.op.group_name
 
     self.network_uuid = self.cfg.LookupNetwork(self.network_name)
-    if self.network_uuid is None:
-      raise errors.OpPrereqError("Network '%s' does not exist" %
-                                 self.network_name, errors.ECODE_NOENT)
-
     self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
-    if self.group_uuid is None:
-      raise errors.OpPrereqError("Group '%s' does not exist" %
-                                 self.group_name, errors.ECODE_NOENT)
 
     self.needed_locks = {
       locking.LEVEL_INSTANCE: [],
@@ -16789,9 +16849,8 @@ class LUNetworkDisconnect(LogicalUnit):
 
       # Lock instances optimistically, needs verification once group lock has
       # been acquired
-      if self.op.conflicts_check:
-        self.needed_locks[locking.LEVEL_INSTANCE] = \
-          self.cfg.GetNodeGroupInstances(self.group_uuid)
+      self.needed_locks[locking.LEVEL_INSTANCE] = \
+        self.cfg.GetNodeGroupInstances(self.group_uuid)
 
   def BuildHooksEnv(self):
     ret = {
@@ -16808,24 +16867,27 @@ class LUNetworkDisconnect(LogicalUnit):
 
     assert self.group_uuid in owned_groups
 
+    # Check if locked instances are still correct
+    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+
     self.group = self.cfg.GetNodeGroup(self.group_uuid)
     self.connected = True
     if self.network_uuid not in self.group.networks:
       self.LogWarning("Network '%s' is not mapped to group '%s'",
                       self.network_name, self.group.name)
       self.connected = False
-      return
 
-    if self.op.conflicts_check:
-      _NetworkConflictCheck(self, lambda nic: nic.network == self.network_name,
-                            "disconnect from")
+    # We need this check only if network is not already connected
+    else:
+      _NetworkConflictCheck(self, lambda nic: nic.network == self.network_uuid,
+                            "disconnect from", owned_instances)
 
   def Exec(self, feedback_fn):
-    if not self.connected:
-      return
-
-    del self.group.networks[self.network_uuid]
-    self.cfg.Update(self.group, feedback_fn)
+    # Disconnect the network and update the group only if network is connected
+    if self.connected:
+      del self.group.networks[self.network_uuid]
+      self.cfg.Update(self.group, feedback_fn)
 
 
 #: Query type implementations