Refactor _GetPCIInfo() in cmdlib
[ganeti-local] / lib / cmdlib.py
index ad04acf..e4491ba 100644 (file)
@@ -1313,6 +1313,42 @@ def _ExpandInstanceName(cfg, name):
   """Wrapper over L{_ExpandItemName} for instance."""
   return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
 
+def _BuildNetworkHookEnv(name, network, gateway, network6, gateway6,
+                         network_type, mac_prefix, tags):
+  env = dict()
+  if name:
+    env["NETWORK_NAME"] = name
+  if network:
+    env["NETWORK_SUBNET"] = network
+  if gateway:
+    env["NETWORK_GATEWAY"] = gateway
+  if network6:
+    env["NETWORK_SUBNET6"] = network6
+  if gateway6:
+    env["NETWORK_GATEWAY6"] = gateway6
+  if mac_prefix:
+    env["NETWORK_MAC_PREFIX"] = mac_prefix
+  if network_type:
+    env["NETWORK_TYPE"] = network_type
+  if tags:
+    env["NETWORK_TAGS"] = " ".join(tags)
+
+  return env
+
+
+def _BuildNetworkHookEnvByObject(lu, network):
+  args = {
+    "name": network.name,
+    "network": network.network,
+    "gateway": network.gateway,
+    "network6": network.network6,
+    "gateway6": network.gateway6,
+    "network_type": network.network_type,
+    "mac_prefix": network.mac_prefix,
+    "tags" : network.tags,
+  }
+  return _BuildNetworkHookEnv(**args)
+
 
 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
                           minmem, maxmem, vcpus, nics, disk_template, disks,
@@ -1373,14 +1409,31 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
   }
   if nics:
     nic_count = len(nics)
-    for idx, (ip, mac, mode, link, network) in enumerate(nics):
+    for idx, (ip, mac, mode, link, network, netinfo) in enumerate(nics):
       if ip is None:
         ip = ""
       env["INSTANCE_NIC%d_IP" % idx] = ip
       env["INSTANCE_NIC%d_MAC" % idx] = mac
       env["INSTANCE_NIC%d_MODE" % idx] = mode
       env["INSTANCE_NIC%d_LINK" % idx] = link
-      env["INSTANCE_NIC%d_NETWORK" % idx] = network
+      if network:
+        env["INSTANCE_NIC%d_NETWORK" % idx] = network
+        if netinfo:
+          nobj = objects.Network.FromDict(netinfo)
+          if nobj.network:
+            env["INSTANCE_NIC%d_NETWORK_SUBNET" % idx] = nobj.network
+          if nobj.gateway:
+            env["INSTANCE_NIC%d_NETWORK_GATEWAY" % idx] = nobj.gateway
+          if nobj.network6:
+            env["INSTANCE_NIC%d_NETWORK_SUBNET6" % idx] = nobj.network6
+          if nobj.gateway6:
+            env["INSTANCE_NIC%d_NETWORK_GATEWAY6" % idx] = nobj.gateway6
+          if nobj.mac_prefix:
+            env["INSTANCE_NIC%d_NETWORK_MAC_PREFIX" % idx] = nobj.mac_prefix
+          if nobj.network_type:
+            env["INSTANCE_NIC%d_NETWORK_TYPE" % idx] = nobj.network_type
+          if nobj.tags:
+            env["INSTANCE_NIC%d_NETWORK_TAGS" % idx] = " ".join(nobj.tags)
       if mode == constants.NIC_MODE_BRIDGED:
         env["INSTANCE_NIC%d_BRIDGE" % idx] = link
   else:
@@ -1409,6 +1462,29 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
 
   return env
 
+def _NICToTuple(lu, nic):
+  """Build a tupple of nic information.
+
+  @type lu:  L{LogicalUnit}
+  @param lu: the logical unit on whose behalf we execute
+  @type nic: L{objects.NIC}
+  @param nic: nic to convert to hooks tuple
+
+  """
+  cluster = lu.cfg.GetClusterInfo()
+  ip = nic.ip
+  mac = nic.mac
+  filled_params = cluster.SimpleFillNIC(nic.nicparams)
+  mode = filled_params[constants.NIC_MODE]
+  link = filled_params[constants.NIC_LINK]
+  network = nic.network
+  netinfo = None
+  if network:
+    net_uuid = lu.cfg.LookupNetwork(network)
+    if net_uuid:
+      nobj = lu.cfg.GetNetwork(net_uuid)
+      netinfo = objects.Network.ToDict(nobj)
+  return (ip, mac, mode, link, network, netinfo)
 
 def _NICListToTuple(lu, nics):
   """Build a list of nic information tuples.
@@ -1425,16 +1501,9 @@ def _NICListToTuple(lu, nics):
   hooks_nics = []
   cluster = lu.cfg.GetClusterInfo()
   for nic in nics:
-    ip = nic.ip
-    mac = nic.mac
-    filled_params = cluster.SimpleFillNIC(nic.nicparams)
-    mode = filled_params[constants.NIC_MODE]
-    link = filled_params[constants.NIC_LINK]
-    network = nic.network
-    hooks_nics.append((ip, mac, mode, link, network))
+    hooks_nics.append(_NICToTuple(lu, nic))
   return hooks_nics
 
-
 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
   """Builds instance related env variables for hooks from an object.
 
@@ -4934,6 +5003,159 @@ class LUOsDiagnose(NoHooksLU):
     return self.oq.OldStyleQuery(self)
 
 
+class _ExtStorageQuery(_QueryBase):
+  FIELDS = query.EXTSTORAGE_FIELDS
+
+  def ExpandNames(self, lu):
+    # Lock all nodes in shared mode
+    # Temporary removal of locks, should be reverted later
+    # TODO: reintroduce locks when they are lighter-weight
+    lu.needed_locks = {}
+    #self.share_locks[locking.LEVEL_NODE] = 1
+    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+
+    # The following variables interact with _QueryBase._GetNames
+    if self.names:
+      self.wanted = self.names
+    else:
+      self.wanted = locking.ALL_SET
+
+    self.do_locking = self.use_locking
+
+  def DeclareLocks(self, lu, level):
+    pass
+
+  @staticmethod
+  def _DiagnoseByProvider(rlist):
+    """Remaps a per-node return list into an a per-provider per-node dictionary
+
+    @param rlist: a map with node names as keys and ExtStorage objects as values
+
+    @rtype: dict
+    @return: a dictionary with extstorage providers as keys and as
+        value another map, with nodes as keys and tuples of
+        (path, status, diagnose, parameters) as values, eg::
+
+          {"provider1": {"node1": [(/usr/lib/..., True, "", [])]
+                         "node2": [(/srv/..., False, "missing file")]
+                         "node3": [(/srv/..., True, "", [])]
+          }
+
+    """
+    all_es = {}
+    # we build here the list of nodes that didn't fail the RPC (at RPC
+    # level), so that nodes with a non-responding node daemon don't
+    # make all OSes invalid
+    good_nodes = [node_name for node_name in rlist
+                  if not rlist[node_name].fail_msg]
+    for node_name, nr in rlist.items():
+      if nr.fail_msg or not nr.payload:
+        continue
+      for (name, path, status, diagnose, params) in nr.payload:
+        if name not in all_es:
+          # build a list of nodes for this os containing empty lists
+          # for each node in node_list
+          all_es[name] = {}
+          for nname in good_nodes:
+            all_es[name][nname] = []
+        # convert params from [name, help] to (name, help)
+        params = [tuple(v) for v in params]
+        all_es[name][node_name].append((path, status, diagnose, params))
+    return all_es
+
+  def _GetQueryData(self, lu):
+    """Computes the list of nodes and their attributes.
+
+    """
+    # Locking is not used
+    assert not (compat.any(lu.glm.is_owned(level)
+                           for level in locking.LEVELS
+                           if level != locking.LEVEL_CLUSTER) or
+                self.do_locking or self.use_locking)
+
+    valid_nodes = [node.name
+                   for node in lu.cfg.GetAllNodesInfo().values()
+                   if not node.offline and node.vm_capable]
+    pol = self._DiagnoseByProvider(lu.rpc.call_extstorage_diagnose(valid_nodes))
+
+    data = {}
+
+    nodegroup_list = lu.cfg.GetNodeGroupList()
+
+    for (es_name, es_data) in pol.items():
+      # For every provider compute the nodegroup validity.
+      # To do this we need to check the validity of each node in es_data
+      # and then construct the corresponding nodegroup dict:
+      #      { nodegroup1: status
+      #        nodegroup2: status
+      #      }
+      ndgrp_data = {}
+      for nodegroup in nodegroup_list:
+        ndgrp = lu.cfg.GetNodeGroup(nodegroup)
+
+        nodegroup_nodes = ndgrp.members
+        nodegroup_name = ndgrp.name
+        node_statuses = []
+
+        for node in nodegroup_nodes:
+          if node in valid_nodes:
+            if es_data[node] != []:
+              node_status = es_data[node][0][1]
+              node_statuses.append(node_status)
+            else:
+              node_statuses.append(False)
+
+        if False in node_statuses:
+          ndgrp_data[nodegroup_name] = False
+        else:
+          ndgrp_data[nodegroup_name] = True
+
+      # Compute the provider's parameters
+      parameters = set()
+      for idx, esl in enumerate(es_data.values()):
+        valid = bool(esl and esl[0][1])
+        if not valid:
+          break
+
+        node_params = esl[0][3]
+        if idx == 0:
+          # First entry
+          parameters.update(node_params)
+        else:
+          # Filter out inconsistent values
+          parameters.intersection_update(node_params)
+
+      params = list(parameters)
+
+      # Now fill all the info for this provider
+      info = query.ExtStorageInfo(name=es_name, node_status=es_data,
+                                  nodegroup_status=ndgrp_data,
+                                  parameters=params)
+
+      data[es_name] = info
+
+    # Prepare data in requested order
+    return [data[name] for name in self._GetNames(lu, pol.keys(), None)
+            if name in data]
+
+
+class LUExtStorageDiagnose(NoHooksLU):
+  """Logical unit for ExtStorage diagnose/query.
+
+  """
+  REQ_BGL = False
+
+  def CheckArguments(self):
+    self.eq = _ExtStorageQuery(qlang.MakeSimpleFilter("name", self.op.names),
+                               self.op.output_fields, False)
+
+  def ExpandNames(self):
+    self.eq.ExpandNames(self)
+
+  def Exec(self, feedback_fn):
+    return self.eq.OldStyleQuery(self)
+
+
 class LUNodeRemove(LogicalUnit):
   """Logical unit for removing a node.
 
@@ -6332,7 +6554,7 @@ class LUInstanceActivateDisks(NoHooksLU):
 
 
 def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
-                           ignore_size=False):
+                           ignore_size=False, check=True):
   """Prepare the block devices for an instance.
 
   This sets up the block devices on all nodes.
@@ -6358,7 +6580,8 @@ def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
   device_info = []
   disks_ok = True
   iname = instance.name
-  disks = _ExpandCheckDisks(instance, disks)
+  if check:
+    disks = _ExpandCheckDisks(instance, disks)
 
   # With the two passes mechanism we try to reduce the window of
   # opportunity for the race condition of switching DRBD to primary
@@ -7062,6 +7285,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
     # TODO: Implement support changing VG while recreating
     constants.IDISK_VG,
     constants.IDISK_METAVG,
+    constants.IDISK_PROVIDER,
     ]))
 
   def CheckArguments(self):
@@ -8493,9 +8717,9 @@ class TLMigrateInstance(Tasklet):
       self._GoReconnect(False)
       self._WaitUntilSync()
 
-    # If the instance's disk template is `rbd' and there was a successful
-    # migration, unmap the device from the source node.
-    if self.instance.disk_template == constants.DT_RBD:
+    # If the instance's disk template is `rbd' or `ext' and there was a
+    # successful migration, unmap the device from the source node.
+    if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT):
       disks = _ExpandCheckDisks(instance, instance.disks)
       self.feedback_fn("* unmapping instance's disks from %s" % source_node)
       for disk in disks:
@@ -8716,6 +8940,26 @@ def _GenerateUniqueNames(lu, exts):
     results.append("%s%s" % (new_id, val))
   return results
 
+def _GetPCIInfo(lu, dev_type):
+
+  if lu.op.hotplug:
+    # case of InstanceCreate()
+    if hasattr(lu, 'hotplug_info'):
+      if lu.hotplug_info is not None:
+        idx = getattr(lu.hotplug_info, dev_type)
+        setattr(lu.hotplug_info, dev_type, idx+1)
+        pci = lu.hotplug_info.pci_pool.pop()
+        lu.LogInfo("Choosing pci slot %d" % pci)
+        return idx, pci
+    # case of InstanceSetParams()
+    elif lu.instance.hotplug_info is not None:
+      idx, pci = lu.cfg.GetPCIInfo(lu.instance.name, dev_type)
+      lu.LogInfo("Choosing pci slot %d" % pci)
+      return idx, pci
+
+    lu.LogWarning("Hotplug not supported for this instance.")
+  return None, None
+
 
 def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
                          iv_name, p_minor, s_minor):
@@ -8732,7 +8976,10 @@ def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
   dev_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
                           logical_id=(vgnames[1], names[1]),
                           params={})
-  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
+
+  disk_idx, pci = _GetPCIInfo(lu, 'disks')
+  drbd_dev = objects.Disk(idx=disk_idx, pci=pci,
+                          dev_type=constants.LD_DRBD8, size=size,
                           logical_id=(primary, secondary, port,
                                       p_minor, s_minor,
                                       shared_secret),
@@ -8744,6 +8991,7 @@ def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
 _DISK_TEMPLATE_NAME_PREFIX = {
   constants.DT_PLAIN: "",
   constants.DT_RBD: ".rbd",
+  constants.DT_EXT: ".ext",
   }
 
 
@@ -8753,6 +9001,7 @@ _DISK_TEMPLATE_DEVICE_TYPE = {
   constants.DT_SHARED_FILE: constants.LD_FILE,
   constants.DT_BLOCK: constants.LD_BLOCKDEV,
   constants.DT_RBD: constants.LD_RBD,
+  constants.DT_EXT: constants.LD_EXT,
   }
 
 
@@ -8831,21 +9080,39 @@ def _GenerateDiskTemplate(lu, template_name, instance_name, primary_node,
                                        disk[constants.IDISK_ADOPT])
     elif template_name == constants.DT_RBD:
       logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
+    elif template_name == constants.DT_EXT:
+      def logical_id_fn(idx, _, disk):
+        provider = disk.get(constants.IDISK_PROVIDER, None)
+        if provider is None:
+          raise errors.ProgrammerError("Disk template is %s, but '%s' is"
+                                       " not found", constants.DT_EXT,
+                                       constants.IDISK_PROVIDER)
+        return (provider, names[idx])
     else:
       raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
 
     dev_type = _DISK_TEMPLATE_DEVICE_TYPE[template_name]
 
     for idx, disk in enumerate(disk_info):
+      params={}
+      # Only for the Ext template add disk_info to params
+      if template_name == constants.DT_EXT:
+        params[constants.IDISK_PROVIDER] = disk[constants.IDISK_PROVIDER]
+        for key in disk:
+          if key not in constants.IDISK_PARAMS:
+            params[key] = disk[key]
       disk_index = idx + base_index
       size = disk[constants.IDISK_SIZE]
       feedback_fn("* disk %s, size %s" %
                   (disk_index, utils.FormatUnit(size, "h")))
+
+      disk_idx, pci = _GetPCIInfo(lu, 'disks')
+
       disks.append(objects.Disk(dev_type=dev_type, size=size,
                                 logical_id=logical_id_fn(idx, disk_index, disk),
                                 iv_name="disk/%d" % disk_index,
                                 mode=disk[constants.IDISK_MODE],
-                                params={}))
+                                params=params, idx=disk_idx, pci=pci))
 
   return disks
 
@@ -9101,6 +9368,7 @@ def _ComputeDiskSize(disk_template, disks):
     constants.DT_SHARED_FILE: sum(d[constants.IDISK_SIZE] for d in disks),
     constants.DT_BLOCK: 0,
     constants.DT_RBD: sum(d[constants.IDISK_SIZE] for d in disks),
+    constants.DT_EXT: sum(d[constants.IDISK_SIZE] for d in disks),
   }
 
   if disk_template not in req_size_dict:
@@ -9218,7 +9486,8 @@ class LUInstanceCreate(LogicalUnit):
     # check disks. parameter names and consistent adopt/no-adopt strategy
     has_adopt = has_no_adopt = False
     for disk in self.op.disks:
-      utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
+      if self.op.disk_template != constants.DT_EXT:
+        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
       if constants.IDISK_ADOPT in disk:
         has_adopt = True
       else:
@@ -9736,6 +10005,11 @@ class LUInstanceCreate(LogicalUnit):
     if self.op.identify_defaults:
       self._RevertToDefaults(cluster)
 
+    self.hotplug_info = None
+    if self.op.hotplug:
+      self.LogInfo("Enabling hotplug.")
+      self.hotplug_info = objects.HotplugInfo(disks=0, nics=0,
+                                              pci_pool=list(range(16,32)))
     # NIC buildup
     self.nics = []
     for idx, nic in enumerate(self.op.nics):
@@ -9806,8 +10080,10 @@ class LUInstanceCreate(LogicalUnit):
 
       check_params = cluster.SimpleFillNIC(nicparams)
       objects.NIC.CheckParameterSyntax(check_params)
-      self.nics.append(objects.NIC(mac=mac, ip=nic_ip,
-                                   network=net, nicparams=check_params))
+      nic_idx, pci = _GetPCIInfo(self, 'nics')
+      self.nics.append(objects.NIC(idx=nic_idx, pci=pci,
+                                   mac=mac, ip=nic_ip, network=net,
+                                   nicparams=check_params))
 
     # disk checks/pre-build
     default_vg = self.cfg.GetVGName()
@@ -9826,16 +10102,37 @@ class LUInstanceCreate(LogicalUnit):
         raise errors.OpPrereqError("Invalid disk size '%s'" % size,
                                    errors.ECODE_INVAL)
 
+      ext_provider = disk.get(constants.IDISK_PROVIDER, None)
+      if ext_provider and self.op.disk_template != constants.DT_EXT:
+        raise errors.OpPrereqError("The '%s' option is only valid for the %s"
+                                   " disk template, not %s" %
+                                   (constants.IDISK_PROVIDER, constants.DT_EXT,
+                                   self.op.disk_template), errors.ECODE_INVAL)
+
       data_vg = disk.get(constants.IDISK_VG, default_vg)
       new_disk = {
         constants.IDISK_SIZE: size,
         constants.IDISK_MODE: mode,
         constants.IDISK_VG: data_vg,
         }
+
       if constants.IDISK_METAVG in disk:
         new_disk[constants.IDISK_METAVG] = disk[constants.IDISK_METAVG]
       if constants.IDISK_ADOPT in disk:
         new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
+
+      # For extstorage, demand the `provider' option and add any
+      # additional parameters (ext-params) to the dict
+      if self.op.disk_template == constants.DT_EXT:
+        if ext_provider:
+          new_disk[constants.IDISK_PROVIDER] = ext_provider
+          for key in disk:
+            if key not in constants.IDISK_PARAMS:
+              new_disk[key] = disk[key]
+        else:
+          raise errors.OpPrereqError("Missing provider for template '%s'" %
+                                     constants.DT_EXT, errors.ECODE_INVAL)
+
       self.disks.append(new_disk)
 
     if self.op.mode == constants.INSTANCE_IMPORT:
@@ -9877,7 +10174,7 @@ class LUInstanceCreate(LogicalUnit):
     # creation job will fail.
     for nic in self.nics:
       if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
-        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
+        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
 
     #### allocator run
 
@@ -9994,6 +10291,9 @@ class LUInstanceCreate(LogicalUnit):
         # Any function that checks prerequisites can be placed here.
         # Check if there is enough space on the RADOS cluster.
         _CheckRADOSFreeSpace()
+      elif self.op.disk_template == constants.DT_EXT:
+        # FIXME: Function that checks prereqs if needed
+        pass
       else:
         # Check lv size requirements, if not adopting
         req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
@@ -10077,6 +10377,9 @@ class LUInstanceCreate(LogicalUnit):
 
     _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
 
+    #TODO: _CheckExtParams (remotely)
+    # Check parameters for extstorage
+
     # memory check on primary node
     #TODO(dynmem): use MINMEM for checking
     if self.op.start:
@@ -10130,6 +10433,7 @@ class LUInstanceCreate(LogicalUnit):
                             hvparams=self.op.hvparams,
                             hypervisor=self.op.hypervisor,
                             osparams=self.op.osparams,
+                            hotplug_info=self.hotplug_info,
                             )
 
     if self.op.tags:
@@ -11706,7 +12010,8 @@ class LUInstanceGrowDisk(LogicalUnit):
 
     if instance.disk_template not in (constants.DT_FILE,
                                       constants.DT_SHARED_FILE,
-                                      constants.DT_RBD):
+                                      constants.DT_RBD,
+                                      constants.DT_EXT):
       # TODO: check the free disk space for file, when that feature will be
       # supported
       _CheckNodesFreeDiskPerVG(self, nodenames,
@@ -12095,13 +12400,16 @@ def ApplyContainerMods(kind, container, chgdesc, mods,
         if remove_fn is not None:
           remove_fn(absidx, item, private)
 
+        #TODO: include a hotplugged msg in changes
         changes = [("%s/%s" % (kind, absidx), "remove")]
 
         assert container[absidx] == item
         del container[absidx]
       elif op == constants.DDM_MODIFY:
         if modify_fn is not None:
+          #TODO: include a hotplugged msg in changes
           changes = modify_fn(absidx, item, params, private)
+
       else:
         raise errors.ProgrammerError("Unhandled operation '%s'" % op)
 
@@ -12175,7 +12483,10 @@ class LUInstanceSetParams(LogicalUnit):
     for (op, _, params) in mods:
       assert ht.TDict(params)
 
-      utils.ForceDictType(params, key_types)
+      # If key_types is an empty dict, we assume we have an 'ext' template
+      # and thus do not ForceDictType
+      if key_types:
+        utils.ForceDictType(params, key_types)
 
       if op == constants.DDM_REMOVE:
         if params:
@@ -12211,9 +12522,18 @@ class LUInstanceSetParams(LogicalUnit):
 
       params[constants.IDISK_SIZE] = size
 
-    elif op == constants.DDM_MODIFY and constants.IDISK_SIZE in params:
-      raise errors.OpPrereqError("Disk size change not possible, use"
-                                 " grow-disk", errors.ECODE_INVAL)
+    elif op == constants.DDM_MODIFY:
+      if constants.IDISK_SIZE in params:
+        raise errors.OpPrereqError("Disk size change not possible, use"
+                                   " grow-disk", errors.ECODE_INVAL)
+      if constants.IDISK_MODE not in params:
+        raise errors.OpPrereqError("Disk 'mode' is the only kind of"
+                                   " modification supported, but missing",
+                                   errors.ECODE_NOENT)
+      if len(params) > 1:
+        raise errors.OpPrereqError("Disk modification doesn't support"
+                                   " additional arbitrary parameters",
+                                   errors.ECODE_INVAL)
 
   @staticmethod
   def _VerifyNicModification(op, params):
@@ -12272,16 +12592,26 @@ class LUInstanceSetParams(LogicalUnit):
     if self.op.hvparams:
       _CheckGlobalHvParams(self.op.hvparams)
 
-    self.op.disks = \
-      self._UpgradeDiskNicMods("disk", self.op.disks,
-        opcodes.OpInstanceSetParams.TestDiskModifications)
+    if self.op.allow_arbit_params:
+      self.op.disks = \
+        self._UpgradeDiskNicMods("disk", self.op.disks,
+          opcodes.OpInstanceSetParams.TestExtDiskModifications)
+    else:
+      self.op.disks = \
+        self._UpgradeDiskNicMods("disk", self.op.disks,
+          opcodes.OpInstanceSetParams.TestDiskModifications)
+
     self.op.nics = \
       self._UpgradeDiskNicMods("NIC", self.op.nics,
         opcodes.OpInstanceSetParams.TestNicModifications)
 
     # Check disk modifications
-    self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
-                    self._VerifyDiskModification)
+    if self.op.allow_arbit_params:
+      self._CheckMods("disk", self.op.disks, {},
+                      self._VerifyDiskModification)
+    else:
+      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
+                      self._VerifyDiskModification)
 
     if self.op.disks and self.op.disk_template is not None:
       raise errors.OpPrereqError("Disk template conversion and other disk"
@@ -12339,10 +12669,10 @@ class LUInstanceSetParams(LogicalUnit):
       nics = []
 
       for nic in self._new_nics:
-        nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
-        mode = nicparams[constants.NIC_MODE]
-        link = nicparams[constants.NIC_LINK]
-        nics.append((nic.ip, nic.mac, mode, link, nic.network))
+        n = copy.deepcopy(nic)
+        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
+        n.nicparams = nicparams
+        nics.append(_NICToTuple(self, n))
 
       args["nics"] = nics
 
@@ -12412,7 +12742,7 @@ class LUInstanceSetParams(LogicalUnit):
       elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
         # otherwise generate the MAC address
         params[constants.INIC_MAC] = \
-          self.cfg.GenerateMAC(self.proc.GetECId())
+          self.cfg.GenerateMAC(new_net, self.proc.GetECId())
       else:
         # or validate/reserve the current one
         try:
@@ -12433,7 +12763,7 @@ class LUInstanceSetParams(LogicalUnit):
       old_prefix = get_net_prefix(old_net)
       if old_prefix != new_prefix:
         params[constants.INIC_MAC] = \
-          self.cfg.GenerateMAC(self.proc.GetECId())
+          self.cfg.GenerateMAC(new_net, self.proc.GetECId())
 
     #if there is a change in nic-network configuration
     new_ip = params.get(constants.INIC_IP, old_ip)
@@ -12480,6 +12810,8 @@ class LUInstanceSetParams(LogicalUnit):
                                  " a NIC that is connected to a network.",
                                  errors.ECODE_INVAL)
 
+    logging.info("new_params %s", new_params)
+    logging.info("new_filled_params %s", new_filled_params)
     private.params = new_params
     private.filled = new_filled_params
 
@@ -12503,6 +12835,31 @@ class LUInstanceSetParams(LogicalUnit):
     # Prepare disk/NIC modifications
     self.diskmod = PrepareContainerMods(self.op.disks, None)
     self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
+    logging.info("nicmod %s", self.nicmod)
+
+    # Check the validity of the `provider' parameter
+    if instance.disk_template in constants.DT_EXT:
+      for mod in self.diskmod:
+        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
+        if mod[0] == constants.DDM_ADD:
+          if ext_provider is None:
+            raise errors.OpPrereqError("Instance template is '%s' and parameter"
+                                       " '%s' missing, during disk add" %
+                                       (constants.DT_EXT,
+                                        constants.IDISK_PROVIDER),
+                                       errors.ECODE_NOENT)
+        elif mod[0] == constants.DDM_MODIFY:
+          if ext_provider:
+            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
+                                       " modification" % constants.IDISK_PROVIDER,
+                                       errors.ECODE_INVAL)
+    else:
+      for mod in self.diskmod:
+        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
+        if ext_provider is not None:
+          raise errors.OpPrereqError("Parameter '%s' is only valid for instances"
+                                     " of type '%s'" % (constants.IDISK_PROVIDER,
+                                      constants.DT_EXT), errors.ECODE_INVAL)
 
     # OS change
     if self.op.os_name and not self.op.force:
@@ -12741,9 +13098,11 @@ class LUInstanceSetParams(LogicalUnit):
                                  " (%d), cannot add more" % constants.MAX_NICS,
                                  errors.ECODE_STATE)
 
+
     # Verify disk changes (operating on a copy)
     disks = instance.disks[:]
-    ApplyContainerMods("disk", disks, None, self.diskmod, None, None, None)
+    ApplyContainerMods("disk", disks, None, self.diskmod,
+                       None, None, None)
     if len(disks) > constants.MAX_DISKS:
       raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
                                  " more" % constants.MAX_DISKS,
@@ -12762,11 +13121,13 @@ class LUInstanceSetParams(LogicalUnit):
       # Operate on copies as this is still in prereq
       nics = [nic.Copy() for nic in instance.nics]
       ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
-                         self._CreateNewNic, self._ApplyNicMods, None)
+                         self._CreateNewNic, self._ApplyNicMods,
+                         self._RemoveNic)
       self._new_nics = nics
     else:
       self._new_nics = None
 
+
   def _ConvertPlainToDrbd(self, feedback_fn):
     """Converts an instance from plain to drbd.
 
@@ -12914,6 +13275,13 @@ class LUInstanceSetParams(LogicalUnit):
         self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
                         disk.iv_name, disk, node, err)
 
+    if self.op.hotplug and disk.pci:
+      self.LogInfo("Trying to hotplug device.")
+      disk_ok, device_info = _AssembleInstanceDisks(self, self.instance,
+                                                    [disk], check=False)
+      _, _, dev_path = device_info[0]
+      result = self.rpc.call_hot_add_disk(self.instance.primary_node,
+                                          self.instance, disk, dev_path, idx)
     return (disk, [
       ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
       ])
@@ -12933,6 +13301,20 @@ class LUInstanceSetParams(LogicalUnit):
     """Removes a disk.
 
     """
+    #TODO: log warning in case hotplug is not possible
+    #      handle errors
+    if root.pci and not self.op.hotplug:
+      raise errors.OpPrereqError("Cannot remove a disk that has"
+                                 " been hotplugged"
+                                 " without removing it with hotplug",
+                                 errors.ECODE_INVAL)
+    if self.op.hotplug and root.pci:
+      self.LogInfo("Trying to hotplug device.")
+      self.rpc.call_hot_del_disk(self.instance.primary_node,
+                                 self.instance, root, idx)
+      _ShutdownInstanceDisks(self, self.instance, [root])
+      self.cfg.UpdatePCIInfo(self.instance.name, root.pci)
+
     (anno_disk,) = _AnnotateDiskParams(self.instance, [root], self.cfg)
     for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
       self.cfg.SetDiskID(disk, node)
@@ -12945,8 +13327,7 @@ class LUInstanceSetParams(LogicalUnit):
     if root.dev_type in constants.LDS_DRBD:
       self.cfg.AddTcpUdpPort(root.logical_id[2])
 
-  @staticmethod
-  def _CreateNewNic(idx, params, private):
+  def _CreateNewNic(self, idx, params, private):
     """Creates data structure for a new network interface.
 
     """
@@ -12956,16 +13337,28 @@ class LUInstanceSetParams(LogicalUnit):
     #TODO: not private.filled?? can a nic have no nicparams??
     nicparams = private.filled
 
-    return (objects.NIC(mac=mac, ip=ip, network=network, nicparams=nicparams), [
+    nic = objects.NIC(mac=mac, ip=ip, network=network, nicparams=nicparams)
+
+    #TODO: log warning in case hotplug is not possible
+    #      handle errors
+    #      return changes
+    if self.op.hotplug:
+      nic_idx, pci = _GetPCIInfo(self, 'nics')
+      if pci is not None:
+        nic.idx = nic_idx
+        nic.pci = pci
+        result = self.rpc.call_hot_add_nic(self.instance.primary_node,
+                                           self.instance, nic, idx)
+    desc =  [
       ("nic.%d" % idx,
        "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
        (mac, ip, private.filled[constants.NIC_MODE],
        private.filled[constants.NIC_LINK],
        network)),
-      ])
+      ]
+    return (nic, desc)
 
-  @staticmethod
-  def _ApplyNicMods(idx, nic, params, private):
+  def _ApplyNicMods(self, idx, nic, params, private):
     """Modifies a network interface.
 
     """
@@ -12982,8 +13375,30 @@ class LUInstanceSetParams(LogicalUnit):
       for (key, val) in nic.nicparams.items():
         changes.append(("nic.%s/%d" % (key, idx), val))
 
+    #TODO: log warning in case hotplug is not possible
+    #      handle errors
+    if self.op.hotplug and nic.pci:
+      self.LogInfo("Trying to hotplug device.")
+      self.rpc.call_hot_del_nic(self.instance.primary_node,
+                                self.instance, nic, idx)
+      result = self.rpc.call_hot_add_nic(self.instance.primary_node,
+                                         self.instance, nic, idx)
     return changes
 
+  def _RemoveNic(self, idx, nic, private):
+    if nic.pci and not self.op.hotplug:
+      raise errors.OpPrereqError("Cannot remove a nic that has been hotplugged"
+                                 " without removing it with hotplug",
+                                 errors.ECODE_INVAL)
+    #TODO: log warning in case hotplug is not possible
+    #      handle errors
+    if self.op.hotplug and nic.pci:
+      self.LogInfo("Trying to hotplug device.")
+      self.rpc.call_hot_del_nic(self.instance.primary_node,
+                                self.instance, nic, idx)
+      self.cfg.UpdatePCIInfo(self.instance.name, nic.pci)
+
+
   def Exec(self, feedback_fn):
     """Modifies an instance.
 
@@ -14518,6 +14933,10 @@ class TagsLU(NoHooksLU): # pylint: disable=W0223
       self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
       lock_level = locking.LEVEL_NODEGROUP
       lock_name = self.group_uuid
+    elif self.op.kind == constants.TAG_NETWORK:
+      self.network_uuid = self.cfg.LookupNetwork(self.op.name)
+      lock_level = locking.LEVEL_NETWORK
+      lock_name = self.network_uuid
     else:
       lock_level = None
       lock_name = None
@@ -14540,6 +14959,8 @@ class TagsLU(NoHooksLU): # pylint: disable=W0223
       self.target = self.cfg.GetInstanceInfo(self.op.name)
     elif self.op.kind == constants.TAG_NODEGROUP:
       self.target = self.cfg.GetNodeGroup(self.group_uuid)
+    elif self.op.kind == constants.TAG_NETWORK:
+      self.target = self.cfg.GetNetwork(self.network_uuid)
     else:
       raise errors.OpPrereqError("Wrong tag type requested (%s)" %
                                  str(self.op.kind), errors.ECODE_INVAL)
@@ -15507,21 +15928,29 @@ class LUNetworkAdd(LogicalUnit):
       raise errors.OpPrereqError("Network '%s' already defined" %
                                  self.op.network, errors.ECODE_EXISTS)
 
+    if self.op.mac_prefix:
+      utils.NormalizeAndValidateMac(self.op.mac_prefix+":00:00:00")
+
+    # Check tag validity
+    for tag in self.op.tags:
+      objects.TaggableObject.ValidateTag(tag)
+
 
   def BuildHooksEnv(self):
     """Build hooks env.
 
     """
-    env = {
-      "NETWORK_NAME": self.op.network_name,
-      "NETWORK_SUBNET": self.op.network,
-      "NETWORK_GATEWAY": self.op.gateway,
-      "NETWORK_SUBNET6": self.op.network6,
-      "NETWORK_GATEWAY6": self.op.gateway6,
-      "NETWORK_MAC_PREFIX": self.op.mac_prefix,
-      "NETWORK_TYPE": self.op.network_type,
+    args = {
+      "name": self.op.network_name,
+      "network": self.op.network,
+      "gateway": self.op.gateway,
+      "network6": self.op.network6,
+      "gateway6": self.op.gateway6,
+      "mac_prefix": self.op.mac_prefix,
+      "network_type": self.op.network_type,
+      "tags": self.op.tags,
       }
-    return env
+    return _BuildNetworkHookEnv(**args)
 
   def Exec(self, feedback_fn):
     """Add the ip pool to the cluster.
@@ -15568,6 +15997,10 @@ class LUNetworkAdd(LogicalUnit):
         except errors.AddressPoolError, e:
           raise errors.OpExecError("Cannot reserve IP %s. %s " % (ip, e))
 
+    if self.op.tags:
+      for tag in self.op.tags:
+        nobj.AddTag(tag)
+
     self.cfg.AddNetwork(nobj, self.proc.GetECId(), check_uuid=False)
     del self.remove_locks[locking.LEVEL_NETWORK]
 
@@ -15673,6 +16106,7 @@ class LUNetworkSetParams(LogicalUnit):
     self.mac_prefix = self.network.mac_prefix
     self.network6 = self.network.network6
     self.gateway6 = self.network.gateway6
+    self.tags = self.network.tags
 
     self.pool = network.AddressPool(self.network)
 
@@ -15695,6 +16129,7 @@ class LUNetworkSetParams(LogicalUnit):
       if self.op.mac_prefix == constants.VALUE_NONE:
         self.mac_prefix = None
       else:
+        utils.NormalizeAndValidateMac(self.op.mac_prefix+":00:00:00")
         self.mac_prefix = self.op.mac_prefix
 
     if self.op.gateway6:
@@ -15715,16 +16150,17 @@ class LUNetworkSetParams(LogicalUnit):
     """Build hooks env.
 
     """
-    env = {
-      "NETWORK_NAME": self.op.network_name,
-      "NETWORK_SUBNET": self.network.network,
-      "NETWORK_GATEWAY": self.gateway,
-      "NETWORK_SUBNET6": self.network6,
-      "NETWORK_GATEWAY6": self.gateway6,
-      "NETWORK_MAC_PREFIX": self.mac_prefix,
-      "NETWORK_TYPE": self.network_type,
+    args = {
+      "name": self.op.network_name,
+      "network": self.network.network,
+      "gateway": self.gateway,
+      "network6": self.network6,
+      "gateway6": self.gateway6,
+      "mac_prefix": self.mac_prefix,
+      "network_type": self.network_type,
+      "tags": self.tags,
       }
-    return env
+    return _BuildNetworkHookEnv(**args)
 
   def BuildHooksNodes(self):
     """Build hooks nodes.
@@ -15943,9 +16379,9 @@ class LUNetworkConnect(LogicalUnit):
   def BuildHooksEnv(self):
     ret = dict()
     ret["GROUP_NAME"] = self.group_name
-    ret["GROUP_NETWORK_NAME"] = self.network_name
     ret["GROUP_NETWORK_MODE"] = self.network_mode
     ret["GROUP_NETWORK_LINK"] = self.network_link
+    ret.update(_BuildNetworkHookEnvByObject(self, self.network))
     return ret
 
   def BuildHooksNodes(self):
@@ -16037,7 +16473,7 @@ class LUNetworkDisconnect(LogicalUnit):
   def BuildHooksEnv(self):
     ret = dict()
     ret["GROUP_NAME"] = self.group_name
-    ret["GROUP_NETWORK_NAME"] = self.network_name
+    ret.update(_BuildNetworkHookEnvByObject(self, self.network))
     return ret
 
   def BuildHooksNodes(self):
@@ -16092,6 +16528,7 @@ _QUERY_IMPL = {
   constants.QR_GROUP: _GroupQuery,
   constants.QR_NETWORK: _NetworkQuery,
   constants.QR_OS: _OsQuery,
+  constants.QR_EXTSTORAGE: _ExtStorageQuery,
   constants.QR_EXPORT: _ExportQuery,
   }