Merge remote branch 'google/stable-2.6' into stable-2.6-ippool-hotplug-esi
authorDimitris Aragiorgis <dimara@grnet.gr>
Wed, 12 Sep 2012 13:21:02 +0000 (16:21 +0300)
committerDimitris Aragiorgis <dimara@grnet.gr>
Wed, 12 Sep 2012 13:21:02 +0000 (16:21 +0300)
Conflicts:
lib/rpc.py

Signed-off-by: Dimitris Aragiorgis <dimara@grnet.gr>

43 files changed:
.gitignore
Makefile.am
autotools/build-bash-completion
configure.ac
doc/design-shared-storage.rst
htools/Ganeti/HTools/Cluster.hs
htools/Ganeti/HTools/Instance.hs
htools/Ganeti/HTools/Types.hs
htools/Ganeti/Objects.hs
lib/backend.py
lib/bdev.py
lib/bootstrap.py
lib/cli.py
lib/client/gnt_cluster.py
lib/client/gnt_instance.py
lib/client/gnt_network.py [new file with mode: 0644]
lib/client/gnt_storage.py [new file with mode: 0644]
lib/cmdlib.py
lib/config.py
lib/constants.py
lib/errors.py
lib/hypervisor/hv_kvm.py
lib/locking.py
lib/luxi.py
lib/network.py [new file with mode: 0644]
lib/objects.py
lib/opcodes.py
lib/ovf.py
lib/query.py
lib/rapi/client.py
lib/rapi/connector.py
lib/rapi/rlib2.py
lib/rpc.py
lib/rpc_defs.py
lib/server/masterd.py
lib/server/noded.py
lib/ssconf.py
man/footer.rst
man/ganeti-extstorage-interface.rst [new file with mode: 0644]
man/gnt-network.rst [new file with mode: 0644]
man/gnt-storage.rst [new file with mode: 0644]
tools/burnin
tools/cfgupgrade

index 8631a91..fe31650 100644 (file)
 /scripts/gnt-job
 /scripts/gnt-node
 /scripts/gnt-os
+/scripts/gnt-storage
 
 # htools-specific rules
 /htools/apidoc
index f7b2a51..b3a4cf1 100644 (file)
@@ -233,6 +233,7 @@ pkgpython_PYTHON = \
        lib/ssh.py \
        lib/storage.py \
        lib/uidpool.py \
+       lib/network.py \
        lib/workerpool.py
 
 client_PYTHON = \
@@ -244,7 +245,9 @@ client_PYTHON = \
        lib/client/gnt_instance.py \
        lib/client/gnt_job.py \
        lib/client/gnt_node.py \
-       lib/client/gnt_os.py
+       lib/client/gnt_network.py \
+       lib/client/gnt_os.py \
+       lib/client/gnt_storage.py
 
 hypervisor_PYTHON = \
        lib/hypervisor/__init__.py \
@@ -482,8 +485,10 @@ gnt_scripts = \
        scripts/gnt-group \
        scripts/gnt-instance \
        scripts/gnt-job \
+       scripts/gnt-network \
        scripts/gnt-node \
-       scripts/gnt-os
+       scripts/gnt-os \
+       scripts/gnt-storage
 
 PYTHON_BOOTSTRAP_SBIN = \
        daemons/ganeti-masterd \
@@ -685,16 +690,19 @@ man_MANS = \
        man/ganeti-masterd.8 \
        man/ganeti-noded.8 \
        man/ganeti-os-interface.7 \
+       man/ganeti-extstorage-interface.7 \
        man/ganeti-rapi.8 \
        man/ganeti-watcher.8 \
        man/gnt-backup.8 \
        man/gnt-cluster.8 \
        man/gnt-debug.8 \
        man/gnt-group.8 \
+       man/gnt-network.8 \
        man/gnt-instance.8 \
        man/gnt-job.8 \
        man/gnt-node.8 \
        man/gnt-os.8 \
+       man/gnt-storage.8 \
        man/hail.1 \
        man/hbal.1 \
        man/hcheck.1 \
@@ -1112,6 +1120,7 @@ lib/_autoconf.py: Makefile | lib/.dir
          echo "SSH_CONFIG_DIR = '$(SSH_CONFIG_DIR)'"; \
          echo "EXPORT_DIR = '$(EXPORT_DIR)'"; \
          echo "OS_SEARCH_PATH = [$(OS_SEARCH_PATH)]"; \
+         echo "ES_SEARCH_PATH = [$(ES_SEARCH_PATH)]"; \
          echo "XEN_BOOTLOADER = '$(XEN_BOOTLOADER)'"; \
          echo "XEN_KERNEL = '$(XEN_KERNEL)'"; \
          echo "XEN_INITRD = '$(XEN_INITRD)'"; \
index 365dad9..b520aa4 100755 (executable)
@@ -131,6 +131,15 @@ def WritePreamble(sw):
     sw.DecIndent()
   sw.Write("}")
 
+  sw.Write("_ganeti_network() {")
+  sw.IncIndent()
+  try:
+    networks_path = os.path.join(constants.DATA_DIR, "ssconf_networks")
+    sw.Write("cat %s 2>/dev/null || :", utils.ShellQuote(networks_path))
+  finally:
+    sw.DecIndent()
+  sw.Write("}")
+
   # Params: <offset> <options with values> <options without values>
   # Result variable: $first_arg_idx
   sw.Write("_ganeti_find_first_arg() {")
@@ -341,10 +350,14 @@ class CompletionWriter:
           WriteCompReply(sw, "-W \"$(_ganeti_instances)\"", cur=cur)
         elif suggest == cli.OPT_COMPL_ONE_OS:
           WriteCompReply(sw, "-W \"$(_ganeti_os)\"", cur=cur)
+        elif suggest == cli.OPT_COMPL_ONE_EXTSTORAGE:
+          WriteCompReply(sw, "-W \"$(_ganeti_extstorage)\"", cur=cur)
         elif suggest == cli.OPT_COMPL_ONE_IALLOCATOR:
           WriteCompReply(sw, "-W \"$(_ganeti_iallocator)\"", cur=cur)
         elif suggest == cli.OPT_COMPL_ONE_NODEGROUP:
           WriteCompReply(sw, "-W \"$(_ganeti_nodegroup)\"", cur=cur)
+        elif suggest == cli.OPT_COMPL_ONE_NETWORK:
+          WriteCompReply(sw, "-W \"$(_ganeti_network)\"", cur=cur)
         elif suggest == cli.OPT_COMPL_INST_ADD_NODES:
           sw.Write("local tmp= node1= pfx= curvalue=\"${optcur#*:}\"")
 
@@ -442,10 +455,14 @@ class CompletionWriter:
           choices = "$(_ganeti_nodes)"
         elif isinstance(arg, cli.ArgGroup):
           choices = "$(_ganeti_nodegroup)"
+        elif isinstance(arg, cli.ArgNetwork):
+          choices = "$(_ganeti_network)"
         elif isinstance(arg, cli.ArgJobId):
           choices = "$(_ganeti_jobs)"
         elif isinstance(arg, cli.ArgOs):
           choices = "$(_ganeti_os)"
+        elif isinstance(arg, cli.ArgExtStorage):
+          choices = "$(_ganeti_extstorage)"
         elif isinstance(arg, cli.ArgFile):
           choices = ""
           compgenargs.append("-f")
index ebc00b3..b3f31f8 100644 (file)
@@ -60,6 +60,18 @@ AC_ARG_WITH([os-search-path],
   [os_search_path="'/srv/ganeti/os'"])
 AC_SUBST(OS_SEARCH_PATH, $os_search_path)
 
+# --with-extstorage-search-path=...
+# same black sed magic for quoting of the strings in the list
+AC_ARG_WITH([extstorage-search-path],
+  [AS_HELP_STRING([--with-extstorage-search-path=LIST],
+    [comma separated list of directories to]
+    [ search for External Storage Providers]
+    [ (default is /srv/ganeti/extstorage)]
+  )],
+  [es_search_path=`echo -n "$withval" | sed -e "s/\([[^,]]*\)/'\1'/g"`],
+  [es_search_path="'/srv/ganeti/extstorage'"])
+AC_SUBST(ES_SEARCH_PATH, $es_search_path)
+
 # --with-iallocator-search-path=...
 # do a bit of black sed magic to for quoting of the strings in the list
 AC_ARG_WITH([iallocator-search-path],
index c175476..7080182 100644 (file)
@@ -64,15 +64,11 @@ The design addresses the following procedures:
   filesystems.
 - Introduction of shared block device disk template with device
   adoption.
+- Introduction of an External Storage Interface.
 
 Additionally, mid- to long-term goals include:
 
 - Support for external “storage pools”.
-- Introduction of an interface for communicating with external scripts,
-  providing methods for the various stages of a block device's and
-  instance's life-cycle. In order to provide storage provisioning
-  capabilities for various SAN appliances, external helpers in the form
-  of a “storage driver” will be possibly introduced as well.
 
 Refactoring of all code referring to constants.DTS_NET_MIRROR
 =============================================================
@@ -159,6 +155,104 @@ The shared block device template will make the following assumptions:
 - The device will be available with the same path under all nodes in the
   node group.
 
+Introduction of an External Storage Interface
+==============================================
+Overview
+--------
+
+To extend the shared block storage template and give Ganeti the ability
+to control and manipulate external storage (provisioning, removal,
+growing, etc.) we need a more generic approach. The generic method for
+supporting external shared storage in Ganeti will be to have an
+ExtStorage provider for each external shared storage hardware type. The
+ExtStorage provider will be a set of files (executable scripts and text
+files), contained inside a directory which will be named after the
+provider. This directory must be present across all nodes of a nodegroup
+(Ganeti doesn't replicate it), in order for the provider to be usable by
+Ganeti for this nodegroup (valid). The external shared storage hardware
+should also be accessible by all nodes of this nodegroup too.
+
+An “ExtStorage provider” will have to provide the following methods:
+
+- Create a disk
+- Remove a disk
+- Grow a disk
+- Attach a disk to a given node
+- Detach a disk from a given node
+- Verify its supported parameters
+
+The proposed ExtStorage interface borrows heavily from the OS
+interface and follows a one-script-per-function approach. An ExtStorage
+provider is expected to provide the following scripts:
+
+- `create`
+- `remove`
+- `grow`
+- `attach`
+- `detach`
+- `verify`
+
+All scripts will be called with no arguments and get their input via
+environment variables. A common set of variables will be exported for
+all commands, and some of them might have extra ones.
+
+- `VOL_NAME`: The name of the volume. This is unique for Ganeti and it
+  uses it to refer to a specific volume inside the external storage.
+- `VOL_SIZE`: The volume's size in mebibytes.
+- `VOL_NEW_SIZE`: Available only to the `grow` script. It declares the
+  new size of the volume after grow (in mebibytes).
+- `EXTP_name`: ExtStorage parameter, where `name` is the parameter in
+  upper-case (same as OS interface's `OSP_*` parameters).
+
+All scripts except `attach` should return 0 on success and non-zero on
+error, accompanied by an appropriate error message on stderr. The
+`attach` script should return a string on stdout on success, which is
+the block device's full path, after it has been successfully attached to
+the host node. On error it should return non-zero.
+
+Implementation
+--------------
+
+To support the ExtStorage interface, we will introduce a new disk
+template called `ext`. This template will implement the existing Ganeti
+disk interface in `lib/bdev.py` (create, remove, attach, assemble,
+shutdown, grow), and will simultaneously pass control to the external
+scripts to actually handle the above actions. The `ext` disk template
+will act as a translation layer between the current Ganeti disk
+interface and the ExtStorage providers.
+
+We will also introduce a new IDISK_PARAM called `IDISK_PROVIDER =
+provider`, which will be used at the command line to select the desired
+ExtStorage provider. This parameter will be valid only for template
+`ext` e.g.::
+
+ gnt-instance add -t ext --disk=0:size=2G,provider=sample_provider1
+
+The Extstorage interface will support different disks to be created by
+different providers. e.g.::
+
+ gnt-instance add -t ext --disk=0:size=2G,provider=sample_provider1
+                         --disk=1:size=1G,provider=sample_provider2
+                         --disk=2:size=3G,provider=sample_provider1
+
+Finally, the ExtStorage interface will support passing of parameters to
+the ExtStorage provider. This will also be done per disk, from the
+command line::
+
+ gnt-instance add -t ext --disk=0:size=1G,provider=sample_provider1,
+                                  param1=value1,param2=value2
+
+The above parameters will be exported to the ExtStorage provider's
+scripts as the enviromental variables:
+
+- `EXTP_PARAM1 = str(value1)`
+- `EXTP_PARAM2 = str(value2)`
+
+We will also introduce a new Ganeti client called `gnt-storage` which
+will be used to diagnose ExtStorage providers and show information about
+them, similarly to the way  `gnt-os diagose` and `gnt-os info` handle OS
+definitions.
+
 Long-term shared storage goals
 ==============================
 Storage pool handling
@@ -166,7 +260,7 @@ Storage pool handling
 
 A new cluster configuration attribute will be introduced, named
 “storage_pools”, modeled as a dictionary mapping storage pools to
-external storage drivers (see below), e.g.::
+external storage providers (see below), e.g.::
 
  {
   "nas1": "foostore",
@@ -188,93 +282,19 @@ Furthermore, the storage pools will be used to indicate the availability
 of storage pools to different node groups, thus specifying the
 instances' “mobility domain”.
 
-New disk templates will also be necessary to facilitate the use of external
-storage. The proposed addition is a whole template namespace created by
-prefixing the pool names with a fixed string, e.g. “ext:”, forming names
-like “ext:nas1”, “ext:foo”.
-
-Interface to the external storage drivers
------------------------------------------
-
-In addition to external storage pools, a new interface will be
-introduced to allow external scripts to provision and manipulate shared
-storage.
-
-In order to provide storage provisioning and manipulation (e.g. growing,
-renaming) capabilities, each instance's disk template can possibly be
-associated with an external “storage driver” which, based on the
-instance's configuration and tags, will perform all supported storage
-operations using auxiliary means (e.g. XML-RPC, ssh, etc.).
-
-A “storage driver” will have to provide the following methods:
-
-- Create a disk
-- Remove a disk
-- Rename a disk
-- Resize a disk
-- Attach a disk to a given node
-- Detach a disk from a given node
-
-The proposed storage driver architecture borrows heavily from the OS
-interface and follows a one-script-per-function approach. A storage
-driver is expected to provide the following scripts:
-
-- `create`
-- `resize`
-- `rename`
-- `remove`
-- `attach`
-- `detach`
-
-These executables will be called once for each disk with no arguments
-and all required information will be passed through environment
-variables. The following environment variables will always be present on
-each invocation:
-
-- `INSTANCE_NAME`: The instance's name
-- `INSTANCE_UUID`: The instance's UUID
-- `INSTANCE_TAGS`: The instance's tags
-- `DISK_INDEX`: The current disk index.
-- `LOGICAL_ID`: The disk's logical id (if existing)
-- `POOL`: The storage pool the instance belongs to.
-
-Additional variables may be available in a per-script context (see
-below).
-
-Of particular importance is the disk's logical ID, which will act as
-glue between Ganeti and the external storage drivers; there are two
-possible ways of using a disk's logical ID in a storage driver:
-
-1. Simply use it as a unique identifier (e.g. UUID) and keep a separate,
-   external database linking it to the actual storage.
-2. Encode all useful storage information in the logical ID and have the
-   driver decode it at runtime.
-
-All scripts should return 0 on success and non-zero on error accompanied by
-an appropriate error message on stderr. Furthermore, the following
-special cases are defined:
-
-1. `create` In case of success, a string representing the disk's logical
-   id must be returned on stdout, which will be saved in the instance's
-   configuration and can be later used by the other scripts of the same
-   storage driver. The logical id may be based on instance name,
-   instance uuid and/or disk index.
-
-   Additional environment variables present:
-     - `DISK_SIZE`: The requested disk size in MiB
-
-2. `resize` In case of success, output the new disk size.
-
-   Additional environment variables present:
-     - `DISK_SIZE`: The requested disk size in MiB
-
-3. `rename` On success, a new logical id should be returned, which will
-   replace the old one. This script is meant to rename the instance's
-   backing store and update the disk's logical ID in case one of them is
-   bound to the instance name.
+The pool, in which to put the new instance's disk, will be defined at
+the command line during `instance add`. This will become possible by
+replacing the IDISK_PROVIDER parameter with a new one, called `IDISK_POOL
+= pool`. The cmdlib logic will then look at the cluster-level mapping
+dictionary to determine the ExtStorage provider for the given pool.
 
-   Additional environment variables present:
-     - `NEW_INSTANCE_NAME`: The instance's new name.
+gnt-storage
+-----------
 
+The `gnt-storage` client can be extended to support pool management
+(creation/modification/deletion of pools, connection/disconnection of
+pools to nodegroups, etc.). It can also be extended to diagnose and
+provide information for internal disk templates too, such as lvm and
+drbd.
 
 .. vim: set textwidth=72 :
index 0ccf3bd..ece8415 100644 (file)
@@ -902,6 +902,12 @@ nodeEvacInstance nl il mode inst@(Instance.Instance
                    failOnSecondaryChange mode dt >>
                    evacOneNodeOnly nl il inst gdx avail_nodes
 
+nodeEvacInstance nl il mode inst@(Instance.Instance
+                                  {Instance.diskTemplate = dt@DTExt})
+                 gdx avail_nodes =
+                   failOnSecondaryChange mode dt >>
+                   evacOneNodeOnly nl il inst gdx avail_nodes
+
 nodeEvacInstance nl il ChangePrimary
                  inst@(Instance.Instance {Instance.diskTemplate = DTDrbd8})
                  _ _ =
index 76465ce..44d0891 100644 (file)
@@ -144,6 +144,7 @@ movableDiskTemplates =
   , T.DTBlock
   , T.DTSharedFile
   , T.DTRbd
+  , T.DTExt
   ]
 
 -- | A simple name for the int, instance association list.
index b6e92b7..14ff195 100644 (file)
@@ -123,6 +123,7 @@ $(THH.declareSADT "DiskTemplate"
        , ("DTBlock",      'C.dtBlock)
        , ("DTDrbd8",      'C.dtDrbd8)
        , ("DTRbd",        'C.dtRbd)
+       , ("DTExt",        'C.dtExt)
        ])
 $(THH.makeJSONInstance ''DiskTemplate)
 
@@ -141,6 +142,7 @@ templateMirrorType DTPlain      = MirrorNone
 templateMirrorType DTBlock      = MirrorExternal
 templateMirrorType DTDrbd8      = MirrorInternal
 templateMirrorType DTRbd        = MirrorExternal
+templateMirrorType DTExt        = MirrorExternal
 
 -- | The Group allocation policy type.
 --
index 6aa0649..7571578 100644 (file)
@@ -96,6 +96,7 @@ $(declareSADT "DiskType"
   , ("LD_FILE",     'C.ldFile)
   , ("LD_BLOCKDEV", 'C.ldBlockdev)
   , ("LD_RADOS",    'C.ldRbd)
+  , ("LD_EXT",      'C.ldExt)
   ])
 $(makeJSONInstance ''DiskType)
 
@@ -127,6 +128,7 @@ data DiskLogicalId
   | LIDFile FileDriver String -- ^ Driver, path
   | LIDBlockDev BlockDriver String -- ^ Driver, path (must be under /dev)
   | LIDRados String String -- ^ Unused, path
+  | LIDExt String String -- ^ ExtProvider, unique name
     deriving (Read, Show, Eq)
 
 -- | Mapping from a logical id to a disk type.
@@ -136,6 +138,7 @@ lidDiskType (LIDDrbd8 {}) = LD_DRBD8
 lidDiskType (LIDFile  {}) = LD_FILE
 lidDiskType (LIDBlockDev {}) = LD_BLOCKDEV
 lidDiskType (LIDRados {}) = LD_RADOS
+lidDiskType (LIDExt {}) = LD_EXT
 
 -- | Builds the extra disk_type field for a given logical id.
 lidEncodeType :: DiskLogicalId -> [(String, JSValue)]
@@ -150,6 +153,7 @@ encodeDLId (LIDDrbd8 nodeA nodeB port minorA minorB key) =
 encodeDLId (LIDRados pool name) = JSArray [showJSON pool, showJSON name]
 encodeDLId (LIDFile driver name) = JSArray [showJSON driver, showJSON name]
 encodeDLId (LIDBlockDev driver name) = JSArray [showJSON driver, showJSON name]
+encodeDLId (LIDExt extprovider name) = JSArray [showJSON extprovider, showJSON name]
 
 -- | Custom encoder for DiskLogicalId, composing both the logical id
 -- and the extra disk_type field.
@@ -201,6 +205,13 @@ decodeDLId obj lid = do
           path'   <- readJSON path
           return $ LIDRados driver' path'
         _ -> fail $ "Can't read logical_id for rdb type"
+    LD_EXT ->
+      case lid of
+        JSArray [extprovider, name] -> do
+          extprovider' <- readJSON extprovider
+          name'   <- readJSON name
+          return $ LIDExt extprovider' name'
+        _ -> fail $ "Can't read logical_id for extstorage type"
 
 -- | Disk data structure.
 --
@@ -235,6 +246,7 @@ $(declareSADT "DiskTemplate"
   , ("DTPlain",      'C.dtPlain)
   , ("DTBlock",      'C.dtBlock)
   , ("DTDrbd8",      'C.dtDrbd8)
+  , ("DTExt",        'C.dtExt)
   ])
 $(makeJSONInstance ''DiskTemplate)
 
index 51a06bb..a7c2b23 100644 (file)
@@ -1519,6 +1519,34 @@ def GetMigrationStatus(instance):
   except Exception, err:  # pylint: disable=W0703
     _Fail("Failed to get migration status: %s", err, exc=True)
 
+def HotAddDisk(instance, disk, dev_path, seq):
+  """Hot add a nic
+
+  """
+  hyper = hypervisor.GetHypervisor(instance.hypervisor)
+  return hyper.HotAddDisk(instance, disk, dev_path, seq)
+
+def HotDelDisk(instance, disk, seq):
+  """Hot add a nic
+
+  """
+  hyper = hypervisor.GetHypervisor(instance.hypervisor)
+  return hyper.HotDelDisk(instance, disk, seq)
+
+def HotAddNic(instance, nic, seq):
+  """Hot add a nic
+
+  """
+  hyper = hypervisor.GetHypervisor(instance.hypervisor)
+  return hyper.HotAddNic(instance, nic, seq)
+
+def HotDelNic(instance, nic, seq):
+  """Hot add a nic
+
+  """
+  hyper = hypervisor.GetHypervisor(instance.hypervisor)
+  return hyper.HotDelNic(instance, nic, seq)
+
 
 def BlockdevCreate(disk, size, owner, on_primary, info):
   """Creates a block device for an instance.
@@ -2431,6 +2459,8 @@ def OSEnvironment(instance, inst_os, debug=0):
       result["NIC_%d_BRIDGE" % idx] = nic.nicparams[constants.NIC_LINK]
     if nic.nicparams[constants.NIC_LINK]:
       result["NIC_%d_LINK" % idx] = nic.nicparams[constants.NIC_LINK]
+    if nic.network:
+      result["NIC_%d_NETWORK" % idx] = nic.network
     if constants.HV_NIC_TYPE in instance.hvparams:
       result["NIC_%d_FRONTEND_TYPE" % idx] = \
         instance.hvparams[constants.HV_NIC_TYPE]
@@ -2443,6 +2473,51 @@ def OSEnvironment(instance, inst_os, debug=0):
   return result
 
 
+def DiagnoseExtStorage(top_dirs=None):
+  """Compute the validity for all ExtStorage Providers.
+
+  @type top_dirs: list
+  @param top_dirs: the list of directories in which to
+      search (if not given defaults to
+      L{constants.ES_SEARCH_PATH})
+  @rtype: list of L{objects.ExtStorage}
+  @return: a list of tuples (name, path, status, diagnose, parameters)
+      for all (potential) ExtStorage Providers under all
+      search paths, where:
+          - name is the (potential) ExtStorage Provider
+          - path is the full path to the ExtStorage Provider
+          - status True/False is the validity of the ExtStorage Provider
+          - diagnose is the error message for an invalid ExtStorage Provider,
+            otherwise empty
+          - parameters is a list of (name, help) parameters, if any
+
+  """
+  if top_dirs is None:
+    top_dirs = constants.ES_SEARCH_PATH
+
+  result = []
+  for dir_name in top_dirs:
+    if os.path.isdir(dir_name):
+      try:
+        f_names = utils.ListVisibleFiles(dir_name)
+      except EnvironmentError, err:
+        logging.exception("Can't list the ExtStorage directory %s: %s",
+                          dir_name, err)
+        break
+      for name in f_names:
+        es_path = utils.PathJoin(dir_name, name)
+        status, es_inst = bdev.ExtStorageFromDisk(name, base_dir=dir_name)
+        if status:
+          diagnose = ""
+          parameters = es_inst.supported_parameters
+        else:
+          diagnose = es_inst
+          parameters = []
+        result.append((name, es_path, status, diagnose, parameters))
+
+  return result
+
+
 def BlockdevGrow(disk, amount, dryrun):
   """Grow a stack of block devices.
 
index 1d85867..40c75c3 100644 (file)
@@ -2637,11 +2637,342 @@ class RADOSBlockDevice(BlockDev):
                   result.fail_reason, result.output)
 
 
+class ExtStorageDevice(BlockDev):
+  """A block device provided by an ExtStorage Provider.
+
+  This class implements the External Storage Interface, which means
+  handling of the externally provided block devices.
+
+  """
+  def __init__(self, unique_id, children, size, params):
+    """Attaches to an extstorage block device.
+
+    """
+    super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
+    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
+      raise ValueError("Invalid configuration data %s" % str(unique_id))
+
+    self.driver, self.vol_name = unique_id
+    self.ext_params = params
+
+    self.major = self.minor = None
+    self.Attach()
+
+  @classmethod
+  def Create(cls, unique_id, children, size, params):
+    """Create a new extstorage device.
+
+    Provision a new volume using an extstorage provider, which will
+    then be mapped to a block device.
+
+    """
+    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
+      raise errors.ProgrammerError("Invalid configuration data %s" %
+                                   str(unique_id))
+    ext_params = params
+
+    # Call the External Storage's create script,
+    # to provision a new Volume inside the External Storage
+    _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id,
+                      ext_params, str(size))
+
+    return ExtStorageDevice(unique_id, children, size, params)
+
+  def Remove(self):
+    """Remove the extstorage device.
+
+    """
+    if not self.minor and not self.Attach():
+      # The extstorage device doesn't exist.
+      return
+
+    # First shutdown the device (remove mappings).
+    self.Shutdown()
+
+    # Call the External Storage's remove script,
+    # to remove the Volume from the External Storage
+    _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id,
+                      self.ext_params)
+
+  def Rename(self, new_id):
+    """Rename this device.
+
+    """
+    pass
+
+  def Attach(self):
+    """Attach to an existing extstorage device.
+
+    This method maps the extstorage volume that matches our name with
+    a corresponding block device and then attaches to this device.
+
+    """
+    self.attached = False
+
+    # Call the External Storage's attach script,
+    # to attach an existing Volume to a block device under /dev
+    self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
+                                      self.unique_id, self.ext_params)
+
+    try:
+      st = os.stat(self.dev_path)
+    except OSError, err:
+      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
+      return False
+
+    if not stat.S_ISBLK(st.st_mode):
+      logging.error("%s is not a block device", self.dev_path)
+      return False
+
+    self.major = os.major(st.st_rdev)
+    self.minor = os.minor(st.st_rdev)
+    self.attached = True
+
+    return True
+
+  def Assemble(self):
+    """Assemble the device.
+
+    """
+    pass
+
+  def Shutdown(self):
+    """Shutdown the device.
+
+    """
+    if not self.minor and not self.Attach():
+      # The extstorage device doesn't exist.
+      return
+
+    # Call the External Storage's detach script,
+    # to detach an existing Volume from it's block device under /dev
+    _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id,
+                      self.ext_params)
+
+    self.minor = None
+    self.dev_path = None
+
+  def Open(self, force=False):
+    """Make the device ready for I/O.
+
+    """
+    pass
+
+  def Close(self):
+    """Notifies that the device will no longer be used for I/O.
+
+    """
+    pass
+
+  def Grow(self, amount, dryrun):
+    """Grow the Volume.
+
+    @type amount: integer
+    @param amount: the amount (in mebibytes) to grow with
+    @type dryrun: boolean
+    @param dryrun: whether to execute the operation in simulation mode
+        only, without actually increasing the size
+
+    """
+    if not self.Attach():
+      _ThrowError("Can't attach to extstorage device during Grow()")
+
+    if dryrun:
+      # we do not support dry runs of resize operations for now.
+      return
+
+    new_size = self.size + amount
+
+    # Call the External Storage's grow script,
+    # to grow an existing Volume inside the External Storage
+    _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
+                      self.ext_params, str(self.size), grow=str(new_size))
+
+
+def _ExtStorageAction(action, unique_id, ext_params, size=None, grow=None):
+  """Take an External Storage action.
+
+  Take an External Storage action concerning or affecting
+  a specific Volume inside the External Storage.
+
+  @type action: string
+  @param action: which action to perform. One of:
+                 create / remove / grow / attach / detach
+  @type unique_id: tuple (driver, vol_name)
+  @param unique_id: a tuple containing the type of ExtStorage (driver)
+                    and the Volume name
+  @type ext_params: dict
+  @type ext_params: ExtStorage parameters
+  @type size: integer
+  @param size: the size of the Volume in mebibytes
+  @type grow: integer
+  @param grow: the new size in mebibytes (after grow)
+  @rtype: None or a block device path (during attach)
+
+  """
+  driver, vol_name = unique_id
+
+  # Create an External Storage instance of type `driver'
+  status, inst_es = ExtStorageFromDisk(driver)
+  if not status:
+    _ThrowError("%s" % inst_es)
+
+  # Create the basic environment for the driver's scripts
+  create_env = _ExtStorageEnvironment(unique_id, ext_params, size, grow)
+
+  # Do not use log file for action `attach' as we need
+  # to get the outpout from RunResult
+  # TODO: find a way to have a log file for attach too
+  logfile = None
+  if action is not constants.ES_ACTION_ATTACH:
+    logfile = _VolumeLogName(action, driver, vol_name)
+
+  # Find out which external script to run according the given action
+  script_name = action + "_script"
+  script = getattr(inst_es, script_name)
+
+  # Run the external script
+  result = utils.RunCmd([script], env=create_env,
+                        cwd=inst_es.path, output=logfile,)
+  if result.failed:
+    logging.error("External storage's %s command '%s' returned"
+                  " error: %s, logfile: %s, output: %s",
+                  action, result.cmd, result.fail_reason,
+                  logfile, result.output)
+    lines = [utils.SafeEncode(val)
+             for val in utils.TailFile(logfile, lines=20)]
+    _ThrowError("External storage's %s script failed (%s), last"
+                " lines in the log file:\n%s",
+                action, result.fail_reason, "\n".join(lines))
+
+  if action == constants.ES_ACTION_ATTACH:
+    return result.stdout
+
+
+def ExtStorageFromDisk(name, base_dir=None):
+  """Create an ExtStorage instance from disk.
+
+  This function will return an ExtStorage instance
+  if the given name is a valid ExtStorage name.
+
+  @type base_dir: string
+  @keyword base_dir: Base directory containing ExtStorage installations.
+                     Defaults to a search in all the ES_SEARCH_PATH dirs.
+  @rtype: tuple
+  @return: True and the ExtStorage instance if we find a valid one, or
+      False and the diagnose message on error
+
+  """
+  if base_dir is None:
+    es_dir = utils.FindFile(name, constants.ES_SEARCH_PATH, os.path.isdir)
+  else:
+    es_dir = utils.FindFile(name, [base_dir], os.path.isdir)
+
+  if es_dir is None:
+    return False, ("Directory for External Storage Provider %s not"
+                   " found in search path" % name)
+
+  # ES Files dictionary, we will populate it with the absolute path
+  # names; if the value is True, then it is a required file, otherwise
+  # an optional one
+  es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
+
+  es_files[constants.ES_PARAMETERS_FILE] = True
+
+  for (filename, required) in es_files.items():
+    es_files[filename] = utils.PathJoin(es_dir, filename)
+
+    try:
+      st = os.stat(es_files[filename])
+    except EnvironmentError, err:
+      return False, ("File '%s' under path '%s' is missing (%s)" %
+                     (filename, es_dir, utils.ErrnoOrStr(err)))
+
+    if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
+      return False, ("File '%s' under path '%s' is not a regular file" %
+                     (filename, es_dir))
+
+    if filename in constants.ES_SCRIPTS:
+      if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
+        return False, ("File '%s' under path '%s' is not executable" %
+                       (filename, es_dir))
+
+  parameters = []
+  if constants.ES_PARAMETERS_FILE in es_files:
+    parameters_file = es_files[constants.ES_PARAMETERS_FILE]
+    try:
+      parameters = utils.ReadFile(parameters_file).splitlines()
+    except EnvironmentError, err:
+      return False, ("Error while reading the EXT parameters file at %s: %s" %
+                     (parameters_file, utils.ErrnoOrStr(err)))
+    parameters = [v.split(None, 1) for v in parameters]
+
+  es_obj = \
+    objects.ExtStorage(name=name, path=es_dir,
+                       create_script=es_files[constants.ES_SCRIPT_CREATE],
+                       remove_script=es_files[constants.ES_SCRIPT_REMOVE],
+                       grow_script=es_files[constants.ES_SCRIPT_GROW],
+                       attach_script=es_files[constants.ES_SCRIPT_ATTACH],
+                       detach_script=es_files[constants.ES_SCRIPT_DETACH],
+                       verify_script=es_files[constants.ES_SCRIPT_VERIFY],
+                       supported_parameters=parameters)
+  return True, es_obj
+
+
+def _ExtStorageEnvironment(unique_id, ext_params, size=None, grow=None):
+  """Calculate the environment for an External Storage script.
+
+  @type unique_id: tuple (driver, vol_name)
+  @param unique_id: ExtStorage pool and name of the Volume
+  @type ext_params: dict
+  @param ext_params: the EXT parameters
+  @type size: integer
+  @param size: size of the Volume in mebibytes
+  @rtype: dict
+  @return: dict of environment variables
+
+  """
+  vol_name = unique_id[1]
+
+  result = {}
+  result['VOL_NAME'] = vol_name
+
+  # EXT params
+  for pname, pvalue in ext_params.items():
+    result["EXTP_%s" % pname.upper()] = str(pvalue)
+
+  if size is not None:
+    result['VOL_SIZE'] = size
+
+  if grow is not None:
+    result['VOL_NEW_SIZE'] = grow
+
+  return result
+
+
+def _VolumeLogName(kind, es_name, volume):
+  """Compute the ExtStorage log filename for a given Volume and operation.
+
+  @type kind: string
+  @param kind: the operation type (e.g. create, remove etc.)
+  @type es_name: string
+  @param es_name: the ExtStorage name
+  @type volume: string
+  @param volume: the name of the Volume inside the External Storage
+
+  """
+  # TODO: Use tempfile.mkstemp to create unique filename
+  base = ("%s-%s-%s-%s.log" %
+          (kind, es_name, volume, utils.TimestampForFilename()))
+  return utils.PathJoin(constants.LOG_ES_DIR, base)
+
+
 DEV_MAP = {
   constants.LD_LV: LogicalVolume,
   constants.LD_DRBD8: DRBD8,
   constants.LD_BLOCKDEV: PersistentBlockDevice,
   constants.LD_RBD: RADOSBlockDevice,
+  constants.LD_EXT: ExtStorageDevice,
   }
 
 if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
index 440f568..c5d9e5e 100644 (file)
@@ -600,6 +600,7 @@ def InitConfig(version, cluster_config, master_node_config,
                                    nodegroups=nodegroups,
                                    nodes=nodes,
                                    instances={},
+                                   networks={},
                                    serial_no=1,
                                    ctime=now, mtime=now)
   utils.WriteFile(cfg_file,
index feb0fe4..67072b3 100644 (file)
@@ -53,6 +53,7 @@ __all__ = [
   # Command line options
   "ABSOLUTE_OPT",
   "ADD_UIDS_OPT",
+  "ADD_RESERVED_IPS_OPT",
   "ALLOCATABLE_OPT",
   "ALLOC_POLICY_OPT",
   "ALL_OPT",
@@ -86,9 +87,12 @@ __all__ = [
   "FORCE_FILTER_OPT",
   "FORCE_OPT",
   "FORCE_VARIANT_OPT",
+  "GATEWAY_OPT",
+  "GATEWAY6_OPT",
   "GLOBAL_FILEDIR_OPT",
   "HID_OS_OPT",
   "GLOBAL_SHARED_FILEDIR_OPT",
+  "HOTPLUG_OPT",
   "HVLIST_OPT",
   "HVOPTS_OPT",
   "HYPERVISOR_OPT",
@@ -110,6 +114,9 @@ __all__ = [
   "MC_OPT",
   "MIGRATION_MODE_OPT",
   "NET_OPT",
+  "NETWORK_OPT",
+  "NETWORK6_OPT",
+  "NETWORK_TYPE_OPT",
   "NEW_CLUSTER_CERT_OPT",
   "NEW_CLUSTER_DOMAIN_SECRET_OPT",
   "NEW_CONFD_HMAC_KEY_OPT",
@@ -117,6 +124,7 @@ __all__ = [
   "NEW_SECONDARY_OPT",
   "NEW_SPICE_CERT_OPT",
   "NIC_PARAMS_OPT",
+  "NOCONFLICTSCHECK_OPT",
   "NODE_FORCE_JOIN_OPT",
   "NODE_LIST_OPT",
   "NODE_PLACEMENT_OPT",
@@ -159,6 +167,7 @@ __all__ = [
   "READD_OPT",
   "REBOOT_TYPE_OPT",
   "REMOVE_INSTANCE_OPT",
+  "REMOVE_RESERVED_IPS_OPT",
   "REMOVE_UIDS_OPT",
   "RESERVED_LVS_OPT",
   "RUNTIME_MEM_OPT",
@@ -200,6 +209,7 @@ __all__ = [
   "HV_STATE_OPT",
   "IGNORE_IPOLICY_OPT",
   "INSTANCE_POLICY_OPTS",
+  "ALLOW_ARBITPARAMS_OPT",
   # Generic functions for CLI programs
   "ConfirmOperation",
   "CreateIPolicyFromOpts",
@@ -233,11 +243,13 @@ __all__ = [
   "ARGS_MANY_INSTANCES",
   "ARGS_MANY_NODES",
   "ARGS_MANY_GROUPS",
+  "ARGS_MANY_NETWORKS",
   "ARGS_NONE",
   "ARGS_ONE_INSTANCE",
   "ARGS_ONE_NODE",
   "ARGS_ONE_GROUP",
   "ARGS_ONE_OS",
+  "ARGS_ONE_NETWORK",
   "ArgChoice",
   "ArgCommand",
   "ArgFile",
@@ -245,8 +257,10 @@ __all__ = [
   "ArgHost",
   "ArgInstance",
   "ArgJobId",
+  "ArgNetwork",
   "ArgNode",
   "ArgOs",
+  "ArgExtStorage",
   "ArgSuggest",
   "ArgUnknown",
   "OPT_COMPL_INST_ADD_NODES",
@@ -255,7 +269,9 @@ __all__ = [
   "OPT_COMPL_ONE_INSTANCE",
   "OPT_COMPL_ONE_NODE",
   "OPT_COMPL_ONE_NODEGROUP",
+  "OPT_COMPL_ONE_NETWORK",
   "OPT_COMPL_ONE_OS",
+  "OPT_COMPL_ONE_EXTSTORAGE",
   "cli_option",
   "SplitNodeOption",
   "CalculateOSNames",
@@ -353,6 +369,11 @@ class ArgNode(_Argument):
   """
 
 
+class ArgNetwork(_Argument):
+  """Network argument.
+
+  """
+
 class ArgGroup(_Argument):
   """Node group argument.
 
@@ -389,11 +410,19 @@ class ArgOs(_Argument):
   """
 
 
+class ArgExtStorage(_Argument):
+  """ExtStorage argument.
+
+  """
+
+
 ARGS_NONE = []
 ARGS_MANY_INSTANCES = [ArgInstance()]
+ARGS_MANY_NETWORKS = [ArgNetwork()]
 ARGS_MANY_NODES = [ArgNode()]
 ARGS_MANY_GROUPS = [ArgGroup()]
 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
+ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
 # TODO
 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
@@ -413,6 +442,7 @@ def _ExtractTagsObject(opts, args):
     retval = kind, kind
   elif kind in (constants.TAG_NODEGROUP,
                 constants.TAG_NODE,
+                constants.TAG_NETWORK,
                 constants.TAG_INSTANCE):
     if not args:
       raise errors.OpPrereqError("no arguments passed to the command")
@@ -635,16 +665,20 @@ def check_maybefloat(option, opt, value): # pylint: disable=W0613
  OPT_COMPL_ONE_NODE,
  OPT_COMPL_ONE_INSTANCE,
  OPT_COMPL_ONE_OS,
+ OPT_COMPL_ONE_EXTSTORAGE,
  OPT_COMPL_ONE_IALLOCATOR,
+ OPT_COMPL_ONE_NETWORK,
  OPT_COMPL_INST_ADD_NODES,
- OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
+ OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
 
 OPT_COMPL_ALL = frozenset([
   OPT_COMPL_MANY_NODES,
   OPT_COMPL_ONE_NODE,
   OPT_COMPL_ONE_INSTANCE,
   OPT_COMPL_ONE_OS,
+  OPT_COMPL_ONE_EXTSTORAGE,
   OPT_COMPL_ONE_IALLOCATOR,
+  OPT_COMPL_ONE_NETWORK,
   OPT_COMPL_INST_ADD_NODES,
   OPT_COMPL_ONE_NODEGROUP,
   ])
@@ -1431,6 +1465,55 @@ ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
                           help="Marks the grow as absolute instead of the"
                           " (default) relative mode")
 
+NETWORK_OPT = cli_option("--network",
+                         action="store", default=None, dest="network",
+                         help="IP network in CIDR notation")
+
+GATEWAY_OPT = cli_option("--gateway",
+                         action="store", default=None, dest="gateway",
+                         help="IP address of the router (gateway)")
+
+ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
+                                  action="store", default=None,
+                                  dest="add_reserved_ips",
+                                  help="Comma-separated list of"
+                                  " reserved IPs to add")
+
+REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
+                                     action="store", default=None,
+                                     dest="remove_reserved_ips",
+                                     help="Comma-delimited list of"
+                                     " reserved IPs to remove")
+
+NETWORK_TYPE_OPT = cli_option("--network-type",
+                              action="store", default=None, dest="network_type",
+                              help="Network type: private, public, None")
+
+NETWORK6_OPT = cli_option("--network6",
+                          action="store", default=None, dest="network6",
+                          help="IP network in CIDR notation")
+
+GATEWAY6_OPT = cli_option("--gateway6",
+                          action="store", default=None, dest="gateway6",
+                          help="IP6 address of the router (gateway)")
+
+NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
+                                  dest="conflicts_check",
+                                  default=True,
+                                  action="store_false",
+                                  help="Don't check for conflicting IPs")
+
+HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
+                         action="store_true", default=False,
+                         help="Enable disk/nic hotplug")
+
+ALLOW_ARBITPARAMS_OPT = cli_option("--allow-arbit-params",
+                                   dest="allow_arbit_params",
+                                   action="store_true", default=None,
+                                   help="Allow arbitrary parameters"
+                                   " to be passed to --disk(s)"
+                                   " option (used by ExtStorage)")
+
 #: Options provided by all commands
 COMMON_OPTS = [DEBUG_OPT]
 
@@ -1447,6 +1530,7 @@ COMMON_CREATE_OPTS = [
   NET_OPT,
   NODE_PLACEMENT_OPT,
   NOIPCHECK_OPT,
+  NOCONFLICTSCHECK_OPT,
   NONAMECHECK_OPT,
   NONICS_OPT,
   NWSYNC_OPT,
@@ -2390,10 +2474,16 @@ def GenericInstanceCreate(mode, opts, args):
   else:
     raise errors.ProgrammerError("Invalid creation mode %s" % mode)
 
+  if opts.hotplug:
+    hotplug = True
+  else:
+    hotplug = False
+
   op = opcodes.OpInstanceCreate(instance_name=instance,
                                 disks=disks,
                                 disk_template=opts.disk_template,
                                 nics=nics,
+                                conflicts_check=opts.conflicts_check,
                                 pnode=pnode, snode=snode,
                                 ip_check=opts.ip_check,
                                 name_check=opts.name_check,
@@ -2412,6 +2502,7 @@ def GenericInstanceCreate(mode, opts, args):
                                 src_node=src_node,
                                 src_path=src_path,
                                 tags=tags,
+                                hotplug=hotplug,
                                 no_install=no_install,
                                 identify_defaults=identify_defaults,
                                 ignore_ipolicy=opts.ignore_ipolicy)
index de5bfb7..59e8608 100644 (file)
@@ -453,6 +453,8 @@ def ShowClusterConfig(opts, args):
   ToStdout("  - primary ip version: %d", result["primary_ip_version"])
   ToStdout("  - preallocation wipe disks: %s", result["prealloc_wipe_disks"])
   ToStdout("  - OS search path: %s", utils.CommaJoin(constants.OS_SEARCH_PATH))
+  ToStdout("  - ExtStorage Providers search path: %s",
+           utils.CommaJoin(constants.ES_SEARCH_PATH))
 
   ToStdout("Default node parameters:")
   _PrintGroupedParams(result["ndparams"], roman=opts.roman_integers)
index c59817b..162c77a 100644 (file)
@@ -218,6 +218,7 @@ def ListInstances(opts, args):
 
   fmtoverride = dict.fromkeys(["tags", "disk.sizes", "nic.macs", "nic.ips",
                                "nic.modes", "nic.links", "nic.bridges",
+                               "nic.networks",
                                "snodes", "snodes.group", "snodes.group.uuid"],
                               (lambda value: ",".join(str(item)
                                                       for item in value),
@@ -1257,9 +1258,10 @@ def ShowInstanceConfig(opts, args):
     FormatParameterDict(buf, instance["be_instance"], be_actual, level=2)
     # TODO(ganeti 2.7) rework the NICs as well
     buf.write("    - NICs:\n")
-    for idx, (ip, mac, mode, link) in enumerate(instance["nics"]):
-      buf.write("      - nic/%d: MAC: %s, IP: %s, mode: %s, link: %s\n" %
-                (idx, mac, ip, mode, link))
+    for idx, (ip, mac, mode, link, network, _) in enumerate(instance["nics"]):
+      buf.write("      - nic/%d: MAC: %s, IP: %s,"
+                " mode: %s, link: %s, network: %s\n" %
+                (idx, mac, ip, mode, link, network))
     buf.write("  Disk template: %s\n" % instance["disk_template"])
     buf.write("  Disks:\n")
 
@@ -1403,9 +1405,15 @@ def SetInstanceParams(opts, args):
   else:
     offline = None
 
+  if opts.hotplug:
+    hotplug = True
+  else:
+    hotplug = False
+
   op = opcodes.OpInstanceSetParams(instance_name=args[0],
                                    nics=nics,
                                    disks=disks,
+                                   hotplug=hotplug,
                                    disk_template=opts.disk_template,
                                    remote_node=opts.node,
                                    hvparams=opts.hvparams,
@@ -1417,7 +1425,9 @@ def SetInstanceParams(opts, args):
                                    force=opts.force,
                                    wait_for_sync=opts.wait_for_sync,
                                    offline=offline,
-                                   ignore_ipolicy=opts.ignore_ipolicy)
+                                   conflicts_check=opts.conflicts_check,
+                                   ignore_ipolicy=opts.ignore_ipolicy,
+                                   allow_arbit_params=opts.allow_arbit_params)
 
   # even if here we process the result, we allow submit only
   result = SubmitOrSend(op, opts)
@@ -1517,6 +1527,7 @@ add_opts = [
   FORCE_VARIANT_OPT,
   NO_INSTALL_OPT,
   IGNORE_IPOLICY_OPT,
+  HOTPLUG_OPT,
   ]
 
 commands = {
@@ -1603,7 +1614,8 @@ commands = {
     [BACKEND_OPT, DISK_OPT, FORCE_OPT, HVOPTS_OPT, NET_OPT, SUBMIT_OPT,
      DISK_TEMPLATE_OPT, SINGLE_NODE_OPT, OS_OPT, FORCE_VARIANT_OPT,
      OSPARAMS_OPT, DRY_RUN_OPT, PRIORITY_OPT, NWSYNC_OPT, OFFLINE_INST_OPT,
-     ONLINE_INST_OPT, IGNORE_IPOLICY_OPT, RUNTIME_MEM_OPT],
+     ONLINE_INST_OPT, IGNORE_IPOLICY_OPT, RUNTIME_MEM_OPT,
+     NOCONFLICTSCHECK_OPT, HOTPLUG_OPT, ALLOW_ARBITPARAMS_OPT],
     "<instance>", "Alters the parameters of an instance"),
   "shutdown": (
     GenericManyOps("shutdown", _ShutdownInstance), [ArgInstance()],
diff --git a/lib/client/gnt_network.py b/lib/client/gnt_network.py
new file mode 100644 (file)
index 0000000..207873c
--- /dev/null
@@ -0,0 +1,347 @@
+#
+#
+
+# Copyright (C) 2011 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+"""IP pool related commands"""
+
+# pylint: disable-msg=W0401,W0614
+# W0401: Wildcard import ganeti.cli
+# W0614: Unused import %s from wildcard import (since we need cli)
+
+from ganeti.cli import *
+from ganeti import constants
+from ganeti import opcodes
+from ganeti import utils
+from textwrap import wrap
+
+
+#: default list of fields for L{ListNetworks}
+_LIST_DEF_FIELDS = ["name", "network", "gateway",
+                    "network_type", "mac_prefix", "group_list", "tags"]
+
+
+def _HandleReservedIPs(ips):
+  if ips is not None:
+    if ips == "":
+      return []
+    else:
+      return utils.UnescapeAndSplit(ips, sep=",")
+  return None
+
+def AddNetwork(opts, args):
+  """Add a network to the cluster.
+
+  @param opts: the command line options selected by the user
+  @type args: list
+  @param args: a list of length 1 with the network name to create
+  @rtype: int
+  @return: the desired exit code
+
+  """
+  (network_name, ) = args
+
+  if opts.tags is not None:
+    tags = opts.tags.split(",")
+  else:
+    tags = []
+
+  op = opcodes.OpNetworkAdd(network_name=network_name,
+                            gateway=opts.gateway,
+                            network=opts.network,
+                            gateway6=opts.gateway6,
+                            network6=opts.network6,
+                            mac_prefix=opts.mac_prefix,
+                            network_type=opts.network_type,
+                            add_reserved_ips=_HandleReservedIPs(opts.add_reserved_ips),
+                            tags=tags)
+  SubmitOpCode(op, opts=opts)
+
+
+def MapNetwork(opts, args):
+  """Map a network to a node group.
+
+  @param opts: the command line options selected by the user
+  @type args: list
+  @param args: a list of length 3 with network, nodegroup, mode, physlink
+  @rtype: int
+  @return: the desired exit code
+
+  """
+  network = args[0]
+  groups = args[1]
+  mode = args[2]
+  link = args[3]
+
+  #TODO: allow comma separated group names
+  if groups == 'all':
+    cl = GetClient()
+    (groups, ) = cl.QueryGroups([], ['name'], False)
+  else:
+    groups = [groups]
+
+  for group in groups:
+    op = opcodes.OpNetworkConnect(group_name=group,
+                                  network_name=network,
+                                  network_mode=mode,
+                                  network_link=link,
+                                  conflicts_check=opts.conflicts_check)
+    SubmitOpCode(op, opts=opts)
+
+
+def UnmapNetwork(opts, args):
+  """Unmap a network from a node group.
+
+  @param opts: the command line options selected by the user
+  @type args: list
+  @param args: a list of length 3 with network, nodegorup
+  @rtype: int
+  @return: the desired exit code
+
+  """
+  network = args[0]
+  groups = args[1]
+
+  #TODO: allow comma separated group names
+  if groups == 'all':
+    cl = GetClient()
+    (groups, ) = cl.QueryGroups([], ['name'], False)
+  else:
+    groups = [groups]
+
+  for group in groups:
+    op = opcodes.OpNetworkDisconnect(group_name=group,
+                                     network_name=network,
+                                     conflicts_check=opts.conflicts_check)
+    SubmitOpCode(op, opts=opts)
+
+
+def ListNetworks(opts, args):
+  """List Ip pools and their properties.
+
+  @param opts: the command line options selected by the user
+  @type args: list
+  @param args: networks to list, or empty for all
+  @rtype: int
+  @return: the desired exit code
+
+  """
+  desired_fields = ParseFields(opts.output, _LIST_DEF_FIELDS)
+  fmtoverride = {
+    "group_list": (",".join, False),
+    "inst_list": (",".join, False),
+    "tags": (",".join, False),
+  }
+
+  return GenericList(constants.QR_NETWORK, desired_fields, args, None,
+                     opts.separator, not opts.no_headers,
+                     verbose=opts.verbose, format_override=fmtoverride)
+
+
+def ListNetworkFields(opts, args):
+  """List network fields.
+
+  @param opts: the command line options selected by the user
+  @type args: list
+  @param args: fields to list, or empty for all
+  @rtype: int
+  @return: the desired exit code
+
+  """
+  return GenericListFields(constants.QR_NETWORK, args, opts.separator,
+                           not opts.no_headers)
+
+
+def ShowNetworkConfig(opts, args):
+  """Show network information.
+
+  @param opts: the command line options selected by the user
+  @type args: list
+  @param args: should either be an empty list, in which case
+      we show information about all nodes, or should contain
+      a list of networks (names or UUIDs) to be queried for information
+  @rtype: int
+  @return: the desired exit code
+
+  """
+  cl = GetClient()
+  result = cl.QueryNetworks(fields=["name", "network", "gateway",
+                                    "network6", "gateway6",
+                                    "mac_prefix", "network_type",
+                                    "free_count", "reserved_count",
+                                    "map", "group_list", "inst_list",
+                                    "external_reservations"],
+                            names=args, use_locking=False)
+
+  for (name, network, gateway, network6, gateway6,
+       mac_prefix, network_type, free_count, reserved_count,
+       map, group_list, instances, ext_res) in result:
+    size = free_count + reserved_count
+    ToStdout("Network name: %s", name)
+    ToStdout("  subnet: %s", network)
+    ToStdout("  gateway: %s", gateway)
+    ToStdout("  subnet6: %s", network6)
+    ToStdout("  gateway6: %s", gateway6)
+    ToStdout("  mac prefix: %s", mac_prefix)
+    ToStdout("  type: %s", network_type)
+    ToStdout("  size: %d", size)
+    ToStdout("  free: %d (%.2f%%)", free_count,
+             100 * float(free_count)/float(size))
+    ToStdout("  usage map:")
+    idx = 0
+    for line in wrap(map, width=64):
+      ToStdout("     %s %s %d", str(idx).rjust(3), line.ljust(64), idx + 63)
+      idx += 64
+    ToStdout("         (X) used    (.) free")
+
+    if ext_res:
+      ToStdout("  externally reserved IPs:")
+      for line in wrap(ext_res, width=64):
+        ToStdout("    %s" % line)
+
+    if group_list:
+      ToStdout("  connected to node groups:")
+      for group in group_list:
+        ToStdout("    %s", group)
+    else:
+      ToStdout("  not connected to any node group")
+
+    if instances:
+      ToStdout("  used by %d instances:", len(instances))
+      for inst in instances:
+        ((ips, networks), ) = cl.QueryInstances([inst],
+                                                ["nic.ips", "nic.networks"],
+                                                use_locking=False)
+
+        l = lambda value: ", ".join(`idx`+":"+str(ip)
+                                    for idx, (ip, net) in enumerate(value)
+                                      if net == name)
+
+        ToStdout("    %s : %s", inst, l(zip(ips,networks)))
+    else:
+      ToStdout("  not used by any instances")
+
+
+def SetNetworkParams(opts, args):
+  """Modifies an IP address pool's parameters.
+
+  @param opts: the command line options selected by the user
+  @type args: list
+  @param args: should contain only one element, the node group name
+
+  @rtype: int
+  @return: the desired exit code
+
+  """
+
+  # TODO: add "network": opts.network,
+  all_changes = {
+    "gateway": opts.gateway,
+    "add_reserved_ips": _HandleReservedIPs(opts.add_reserved_ips),
+    "remove_reserved_ips": _HandleReservedIPs(opts.remove_reserved_ips),
+    "mac_prefix": opts.mac_prefix,
+    "network_type": opts.network_type,
+    "gateway6": opts.gateway6,
+    "network6": opts.network6,
+  }
+
+  if all_changes.values().count(None) == len(all_changes):
+    ToStderr("Please give at least one of the parameters.")
+    return 1
+
+  op = opcodes.OpNetworkSetParams(network_name=args[0],
+                                  # pylint: disable-msg=W0142
+                                  **all_changes)
+
+  # TODO: add feedback to user, e.g. list the modifications
+  SubmitOrSend(op, opts)
+
+
+def RemoveNetwork(opts, args):
+  """Remove an IP address pool from the cluster.
+
+  @param opts: the command line options selected by the user
+  @type args: list
+  @param args: a list of length 1 with the id of the IP address pool to remove
+  @rtype: int
+  @return: the desired exit code
+
+  """
+  (network_name,) = args
+  op = opcodes.OpNetworkRemove(network_name=network_name, force=opts.force)
+  SubmitOpCode(op, opts=opts)
+
+
+commands = {
+  "add": (
+    AddNetwork, ARGS_ONE_NETWORK,
+    [DRY_RUN_OPT, NETWORK_OPT, GATEWAY_OPT, ADD_RESERVED_IPS_OPT, TAG_ADD_OPT,
+     MAC_PREFIX_OPT, NETWORK_TYPE_OPT, NETWORK6_OPT, GATEWAY6_OPT],
+    "<network_name>", "Add a new IP network to the cluster"),
+  "list": (
+    ListNetworks, ARGS_MANY_NETWORKS,
+    [NOHDR_OPT, SEP_OPT, FIELDS_OPT, VERBOSE_OPT],
+    "[<network_id>...]",
+    "Lists the IP networks in the cluster. The available fields can be shown"
+    " using the \"list-fields\" command (see the man page for details)."
+    " The default list is (in order): %s." % utils.CommaJoin(_LIST_DEF_FIELDS)),
+  "list-fields": (
+    ListNetworkFields, [ArgUnknown()], [NOHDR_OPT, SEP_OPT], "[fields...]",
+    "Lists all available fields for networks"),
+  "info": (
+    ShowNetworkConfig, ARGS_MANY_NETWORKS, [],
+    "[<network_name>...]", "Show information about the network(s)"),
+  "modify": (
+    SetNetworkParams, ARGS_ONE_NETWORK,
+    [DRY_RUN_OPT, SUBMIT_OPT, ADD_RESERVED_IPS_OPT, REMOVE_RESERVED_IPS_OPT,
+     GATEWAY_OPT, MAC_PREFIX_OPT, NETWORK_TYPE_OPT, NETWORK6_OPT, GATEWAY6_OPT],
+    "<network_name>", "Alters the parameters of a network"),
+  "connect": (
+    MapNetwork,
+    [ArgNetwork(min=1, max=1), ArgGroup(min=1, max=1),
+     ArgUnknown(min=1, max=1), ArgUnknown(min=1, max=1)],
+    [NOCONFLICTSCHECK_OPT],
+    "<network_name> <node_group> <mode> <link>",
+    "Map a given network to the specified node group"
+    " with given mode and link (netparams)"),
+  "disconnect": (
+    UnmapNetwork,
+    [ArgNetwork(min=1, max=1), ArgGroup(min=1, max=1)],
+    [NOCONFLICTSCHECK_OPT],
+    "<network_name> <node_group>",
+    "Unmap a given network from a specified node group"),
+  "remove": (
+    RemoveNetwork, ARGS_ONE_NETWORK, [FORCE_OPT, DRY_RUN_OPT],
+    "[--dry-run] <network_id>",
+    "Remove an (empty) network from the cluster"),
+  "list-tags": (
+    ListTags, ARGS_ONE_NETWORK, [],
+    "<network_name>", "List the tags of the given network"),
+  "add-tags": (
+    AddTags, [ArgNetwork(min=1, max=1), ArgUnknown()],
+    [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
+    "<network_name> tag...", "Add tags to the given network"),
+  "remove-tags": (
+    RemoveTags, [ArgNetwork(min=1, max=1), ArgUnknown()],
+    [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
+    "<network_name> tag...", "Remove tags from given network"),
+}
+
+
+def Main():
+  return GenericMain(commands, override={"tag_type": constants.TAG_NETWORK})
diff --git a/lib/client/gnt_storage.py b/lib/client/gnt_storage.py
new file mode 100644 (file)
index 0000000..2ada46b
--- /dev/null
@@ -0,0 +1,197 @@
+#
+#
+
+# Copyright (C) 2012 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+"""External Storage related commands"""
+
+# pylint: disable=W0401,W0613,W0614,C0103
+# W0401: Wildcard import ganeti.cli
+# W0613: Unused argument, since all functions follow the same API
+# W0614: Unused import %s from wildcard import (since we need cli)
+# C0103: Invalid name gnt-storage
+
+from ganeti.cli import *
+from ganeti import constants
+from ganeti import opcodes
+from ganeti import utils
+
+
+def ShowExtStorageInfo(opts, args):
+  """List detailed information about ExtStorage providers.
+
+  @param opts: the command line options selected by the user
+  @type args: list
+  @param args: empty list or list of ExtStorage providers' names
+  @rtype: int
+  @return: the desired exit code
+
+  """
+  op = opcodes.OpExtStorageDiagnose(output_fields=["name", "nodegroup_status",
+                                                   "parameters"],
+                                    names=[])
+
+  result = SubmitOpCode(op, opts=opts)
+
+  if not result:
+    ToStderr("Can't get the ExtStorage providers list")
+    return 1
+
+  do_filter = bool(args)
+
+  for (name, nodegroup_data, parameters) in result:
+    if do_filter:
+      if name not in args:
+        continue
+      else:
+        args.remove(name)
+
+    nodegroups_valid = []
+    for nodegroup_name, nodegroup_status in nodegroup_data.iteritems():
+      if nodegroup_status:
+        nodegroups_valid.append(nodegroup_name)
+
+    ToStdout("%s:", name)
+
+    if nodegroups_valid != []:
+      ToStdout("  - Valid for nodegroups:")
+      for ndgrp in utils.NiceSort(nodegroups_valid):
+        ToStdout("      %s", ndgrp)
+      ToStdout("  - Supported parameters:")
+      for pname, pdesc in parameters:
+        ToStdout("      %s: %s", pname, pdesc)
+    else:
+      ToStdout("  - Invalid for all nodegroups")
+
+    ToStdout("")
+
+  if args:
+    for name in args:
+      ToStdout("%s: Not Found", name)
+      ToStdout("")
+
+  return 0
+
+
+def _ExtStorageStatus(status, diagnose):
+  """Beautifier function for ExtStorage status.
+
+  @type status: boolean
+  @param status: is the ExtStorage provider valid
+  @type diagnose: string
+  @param diagnose: the error message for invalid ExtStorages
+  @rtype: string
+  @return: a formatted status
+
+  """
+  if status:
+    return "valid"
+  else:
+    return "invalid - %s" % diagnose
+
+
+def DiagnoseExtStorage(opts, args):
+  """Analyse all ExtStorage providers.
+
+  @param opts: the command line options selected by the user
+  @type args: list
+  @param args: should be an empty list
+  @rtype: int
+  @return: the desired exit code
+
+  """
+  op = opcodes.OpExtStorageDiagnose(output_fields=["name", "node_status",
+                                                   "nodegroup_status"],
+                                    names=[])
+
+  result = SubmitOpCode(op, opts=opts)
+
+  if not result:
+    ToStderr("Can't get the list of ExtStorage providers")
+    return 1
+
+  for provider_name, node_data, nodegroup_data in result:
+
+    nodes_valid = {}
+    nodes_bad = {}
+    nodegroups_valid = {}
+    nodegroups_bad = {}
+
+    # Per node diagnose
+    for node_name, node_info in node_data.iteritems():
+      if node_info: # at least one entry in the per-node list
+        (fo_path, fo_status, fo_msg, fo_params) = node_info.pop(0)
+        fo_msg = "%s (path: %s)" % (_ExtStorageStatus(fo_status, fo_msg),
+                                    fo_path)
+        if fo_params:
+          fo_msg += (" [parameters: %s]" %
+                     utils.CommaJoin([v[0] for v in fo_params]))
+        else:
+          fo_msg += " [no parameters]"
+        if fo_status:
+          nodes_valid[node_name] = fo_msg
+        else:
+          nodes_bad[node_name] = fo_msg
+      else:
+        nodes_bad[node_name] = "ExtStorage provider not found"
+
+    # Per nodegroup diagnose
+    for nodegroup_name, nodegroup_status in nodegroup_data.iteritems():
+      status = nodegroup_status
+      if status:
+        nodegroups_valid[nodegroup_name] = "valid"
+      else:
+        nodegroups_bad[nodegroup_name] = "invalid"
+
+    def _OutputPerNodegroupStatus(msg_map):
+      map_k = utils.NiceSort(msg_map.keys())
+      for nodegroup in map_k:
+        ToStdout("  For nodegroup: %s --> %s", nodegroup,
+                 msg_map[nodegroup])
+
+    def _OutputPerNodeStatus(msg_map):
+      map_k = utils.NiceSort(msg_map.keys())
+      for node_name in map_k:
+        ToStdout("  Node: %s, status: %s", node_name, msg_map[node_name])
+
+    # Print the output
+    st_msg = "Provider: %s"  % provider_name
+    ToStdout(st_msg)
+    ToStdout("---")
+    _OutputPerNodeStatus(nodes_valid)
+    _OutputPerNodeStatus(nodes_bad)
+    ToStdout("  --")
+    _OutputPerNodegroupStatus(nodegroups_valid)
+    _OutputPerNodegroupStatus(nodegroups_bad)
+    ToStdout("")
+
+  return 0
+
+
+commands = {
+  "diagnose": (
+    DiagnoseExtStorage, ARGS_NONE, [PRIORITY_OPT],
+    "", "Diagnose all ExtStorage providers"),
+  "info": (
+    ShowExtStorageInfo, [ArgOs()], [PRIORITY_OPT],
+    "", "Show info about ExtStorage providers"),
+  }
+
+
+def Main():
+  return GenericMain(commands)
index f8f061e..d01cd38 100644 (file)
@@ -40,6 +40,7 @@ import tempfile
 import shutil
 import itertools
 import operator
+import ipaddr
 
 from ganeti import ssh
 from ganeti import utils
@@ -60,6 +61,7 @@ from ganeti import opcodes
 from ganeti import ht
 from ganeti import rpc
 from ganeti import runtime
+from ganeti import network
 
 import ganeti.masterd.instance # pylint: disable=W0611
 
@@ -1322,6 +1324,42 @@ def _ExpandInstanceName(cfg, name):
   """Wrapper over L{_ExpandItemName} for instance."""
   return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
 
+def _BuildNetworkHookEnv(name, network, gateway, network6, gateway6,
+                         network_type, mac_prefix, tags):
+  env = dict()
+  if name:
+    env["NETWORK_NAME"] = name
+  if network:
+    env["NETWORK_SUBNET"] = network
+  if gateway:
+    env["NETWORK_GATEWAY"] = gateway
+  if network6:
+    env["NETWORK_SUBNET6"] = network6
+  if gateway6:
+    env["NETWORK_GATEWAY6"] = gateway6
+  if mac_prefix:
+    env["NETWORK_MAC_PREFIX"] = mac_prefix
+  if network_type:
+    env["NETWORK_TYPE"] = network_type
+  if tags:
+    env["NETWORK_TAGS"] = " ".join(tags)
+
+  return env
+
+
+def _BuildNetworkHookEnvByObject(lu, network):
+  args = {
+    "name": network.name,
+    "network": network.network,
+    "gateway": network.gateway,
+    "network6": network.network6,
+    "gateway6": network.gateway6,
+    "network_type": network.network_type,
+    "mac_prefix": network.mac_prefix,
+    "tags" : network.tags,
+  }
+  return _BuildNetworkHookEnv(**args)
+
 
 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
                           minmem, maxmem, vcpus, nics, disk_template, disks,
@@ -1347,7 +1385,7 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
   @type vcpus: string
   @param vcpus: the count of VCPUs the instance has
   @type nics: list
-  @param nics: list of tuples (ip, mac, mode, link) representing
+  @param nics: list of tuples (ip, mac, mode, link, network) representing
       the NICs the instance has
   @type disk_template: string
   @param disk_template: the disk template of the instance
@@ -1382,13 +1420,31 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
   }
   if nics:
     nic_count = len(nics)
-    for idx, (ip, mac, mode, link) in enumerate(nics):
+    for idx, (ip, mac, mode, link, network, netinfo) in enumerate(nics):
       if ip is None:
         ip = ""
       env["INSTANCE_NIC%d_IP" % idx] = ip
       env["INSTANCE_NIC%d_MAC" % idx] = mac
       env["INSTANCE_NIC%d_MODE" % idx] = mode
       env["INSTANCE_NIC%d_LINK" % idx] = link
+      if network:
+        env["INSTANCE_NIC%d_NETWORK" % idx] = network
+        if netinfo:
+          nobj = objects.Network.FromDict(netinfo)
+          if nobj.network:
+            env["INSTANCE_NIC%d_NETWORK_SUBNET" % idx] = nobj.network
+          if nobj.gateway:
+            env["INSTANCE_NIC%d_NETWORK_GATEWAY" % idx] = nobj.gateway
+          if nobj.network6:
+            env["INSTANCE_NIC%d_NETWORK_SUBNET6" % idx] = nobj.network6
+          if nobj.gateway6:
+            env["INSTANCE_NIC%d_NETWORK_GATEWAY6" % idx] = nobj.gateway6
+          if nobj.mac_prefix:
+            env["INSTANCE_NIC%d_NETWORK_MAC_PREFIX" % idx] = nobj.mac_prefix
+          if nobj.network_type:
+            env["INSTANCE_NIC%d_NETWORK_TYPE" % idx] = nobj.network_type
+          if nobj.tags:
+            env["INSTANCE_NIC%d_NETWORK_TAGS" % idx] = " ".join(nobj.tags)
       if mode == constants.NIC_MODE_BRIDGED:
         env["INSTANCE_NIC%d_BRIDGE" % idx] = link
   else:
@@ -1417,6 +1473,29 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
 
   return env
 
+def _NICToTuple(lu, nic):
+  """Build a tupple of nic information.
+
+  @type lu:  L{LogicalUnit}
+  @param lu: the logical unit on whose behalf we execute
+  @type nic: L{objects.NIC}
+  @param nic: nic to convert to hooks tuple
+
+  """
+  cluster = lu.cfg.GetClusterInfo()
+  ip = nic.ip
+  mac = nic.mac
+  filled_params = cluster.SimpleFillNIC(nic.nicparams)
+  mode = filled_params[constants.NIC_MODE]
+  link = filled_params[constants.NIC_LINK]
+  network = nic.network
+  netinfo = None
+  if network:
+    net_uuid = lu.cfg.LookupNetwork(network)
+    if net_uuid:
+      nobj = lu.cfg.GetNetwork(net_uuid)
+      netinfo = objects.Network.ToDict(nobj)
+  return (ip, mac, mode, link, network, netinfo)
 
 def _NICListToTuple(lu, nics):
   """Build a list of nic information tuples.
@@ -1433,15 +1512,9 @@ def _NICListToTuple(lu, nics):
   hooks_nics = []
   cluster = lu.cfg.GetClusterInfo()
   for nic in nics:
-    ip = nic.ip
-    mac = nic.mac
-    filled_params = cluster.SimpleFillNIC(nic.nicparams)
-    mode = filled_params[constants.NIC_MODE]
-    link = filled_params[constants.NIC_LINK]
-    hooks_nics.append((ip, mac, mode, link))
+    hooks_nics.append(_NICToTuple(lu, nic))
   return hooks_nics
 
-
 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
   """Builds instance related env variables for hooks from an object.
 
@@ -4941,6 +5014,159 @@ class LUOsDiagnose(NoHooksLU):
     return self.oq.OldStyleQuery(self)
 
 
+class _ExtStorageQuery(_QueryBase):
+  FIELDS = query.EXTSTORAGE_FIELDS
+
+  def ExpandNames(self, lu):
+    # Lock all nodes in shared mode
+    # Temporary removal of locks, should be reverted later
+    # TODO: reintroduce locks when they are lighter-weight
+    lu.needed_locks = {}
+    #self.share_locks[locking.LEVEL_NODE] = 1
+    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+
+    # The following variables interact with _QueryBase._GetNames
+    if self.names:
+      self.wanted = self.names
+    else:
+      self.wanted = locking.ALL_SET
+
+    self.do_locking = self.use_locking
+
+  def DeclareLocks(self, lu, level):
+    pass
+
+  @staticmethod
+  def _DiagnoseByProvider(rlist):
+    """Remaps a per-node return list into an a per-provider per-node dictionary
+
+    @param rlist: a map with node names as keys and ExtStorage objects as values
+
+    @rtype: dict
+    @return: a dictionary with extstorage providers as keys and as
+        value another map, with nodes as keys and tuples of
+        (path, status, diagnose, parameters) as values, eg::
+
+          {"provider1": {"node1": [(/usr/lib/..., True, "", [])]
+                         "node2": [(/srv/..., False, "missing file")]
+                         "node3": [(/srv/..., True, "", [])]
+          }
+
+    """
+    all_es = {}
+    # we build here the list of nodes that didn't fail the RPC (at RPC
+    # level), so that nodes with a non-responding node daemon don't
+    # make all OSes invalid
+    good_nodes = [node_name for node_name in rlist
+                  if not rlist[node_name].fail_msg]
+    for node_name, nr in rlist.items():
+      if nr.fail_msg or not nr.payload:
+        continue
+      for (name, path, status, diagnose, params) in nr.payload:
+        if name not in all_es:
+          # build a list of nodes for this os containing empty lists
+          # for each node in node_list
+          all_es[name] = {}
+          for nname in good_nodes:
+            all_es[name][nname] = []
+        # convert params from [name, help] to (name, help)
+        params = [tuple(v) for v in params]
+        all_es[name][node_name].append((path, status, diagnose, params))
+    return all_es
+
+  def _GetQueryData(self, lu):
+    """Computes the list of nodes and their attributes.
+
+    """
+    # Locking is not used
+    assert not (compat.any(lu.glm.is_owned(level)
+                           for level in locking.LEVELS
+                           if level != locking.LEVEL_CLUSTER) or
+                self.do_locking or self.use_locking)
+
+    valid_nodes = [node.name
+                   for node in lu.cfg.GetAllNodesInfo().values()
+                   if not node.offline and node.vm_capable]
+    pol = self._DiagnoseByProvider(lu.rpc.call_extstorage_diagnose(valid_nodes))
+
+    data = {}
+
+    nodegroup_list = lu.cfg.GetNodeGroupList()
+
+    for (es_name, es_data) in pol.items():
+      # For every provider compute the nodegroup validity.
+      # To do this we need to check the validity of each node in es_data
+      # and then construct the corresponding nodegroup dict:
+      #      { nodegroup1: status
+      #        nodegroup2: status
+      #      }
+      ndgrp_data = {}
+      for nodegroup in nodegroup_list:
+        ndgrp = lu.cfg.GetNodeGroup(nodegroup)
+
+        nodegroup_nodes = ndgrp.members
+        nodegroup_name = ndgrp.name
+        node_statuses = []
+
+        for node in nodegroup_nodes:
+          if node in valid_nodes:
+            if es_data[node] != []:
+              node_status = es_data[node][0][1]
+              node_statuses.append(node_status)
+            else:
+              node_statuses.append(False)
+
+        if False in node_statuses:
+          ndgrp_data[nodegroup_name] = False
+        else:
+          ndgrp_data[nodegroup_name] = True
+
+      # Compute the provider's parameters
+      parameters = set()
+      for idx, esl in enumerate(es_data.values()):
+        valid = bool(esl and esl[0][1])
+        if not valid:
+          break
+
+        node_params = esl[0][3]
+        if idx == 0:
+          # First entry
+          parameters.update(node_params)
+        else:
+          # Filter out inconsistent values
+          parameters.intersection_update(node_params)
+
+      params = list(parameters)
+
+      # Now fill all the info for this provider
+      info = query.ExtStorageInfo(name=es_name, node_status=es_data,
+                                  nodegroup_status=ndgrp_data,
+                                  parameters=params)
+
+      data[es_name] = info
+
+    # Prepare data in requested order
+    return [data[name] for name in self._GetNames(lu, pol.keys(), None)
+            if name in data]
+
+
+class LUExtStorageDiagnose(NoHooksLU):
+  """Logical unit for ExtStorage diagnose/query.
+
+  """
+  REQ_BGL = False
+
+  def CheckArguments(self):
+    self.eq = _ExtStorageQuery(qlang.MakeSimpleFilter("name", self.op.names),
+                               self.op.output_fields, False)
+
+  def ExpandNames(self):
+    self.eq.ExpandNames(self)
+
+  def Exec(self, feedback_fn):
+    return self.eq.OldStyleQuery(self)
+
+
 class LUNodeRemove(LogicalUnit):
   """Logical unit for removing a node.
 
@@ -6339,7 +6565,7 @@ class LUInstanceActivateDisks(NoHooksLU):
 
 
 def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
-                           ignore_size=False):
+                           ignore_size=False, check=True):
   """Prepare the block devices for an instance.
 
   This sets up the block devices on all nodes.
@@ -6365,7 +6591,8 @@ def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
   device_info = []
   disks_ok = True
   iname = instance.name
-  disks = _ExpandCheckDisks(instance, disks)
+  if check:
+    disks = _ExpandCheckDisks(instance, disks)
 
   # With the two passes mechanism we try to reduce the window of
   # opportunity for the race condition of switching DRBD to primary
@@ -7069,6 +7296,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
     # TODO: Implement support changing VG while recreating
     constants.IDISK_VG,
     constants.IDISK_METAVG,
+    constants.IDISK_PROVIDER,
     ]))
 
   def CheckArguments(self):
@@ -8500,9 +8728,9 @@ class TLMigrateInstance(Tasklet):
       self._GoReconnect(False)
       self._WaitUntilSync()
 
-    # If the instance's disk template is `rbd' and there was a successful
-    # migration, unmap the device from the source node.
-    if self.instance.disk_template == constants.DT_RBD:
+    # If the instance's disk template is `rbd' or `ext' and there was a
+    # successful migration, unmap the device from the source node.
+    if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT):
       disks = _ExpandCheckDisks(instance, instance.disks)
       self.feedback_fn("* unmapping instance's disks from %s" % source_node)
       for disk in disks:
@@ -8723,6 +8951,26 @@ def _GenerateUniqueNames(lu, exts):
     results.append("%s%s" % (new_id, val))
   return results
 
+def _GetPCIInfo(lu, dev_type):
+
+  if lu.op.hotplug:
+    # case of InstanceCreate()
+    if hasattr(lu, 'hotplug_info'):
+      if lu.hotplug_info is not None:
+        idx = getattr(lu.hotplug_info, dev_type)
+        setattr(lu.hotplug_info, dev_type, idx+1)
+        pci = lu.hotplug_info.pci_pool.pop()
+        lu.LogInfo("Choosing pci slot %d" % pci)
+        return idx, pci
+    # case of InstanceSetParams()
+    elif lu.instance.hotplug_info is not None:
+      idx, pci = lu.cfg.GetPCIInfo(lu.instance.name, dev_type)
+      lu.LogInfo("Choosing pci slot %d" % pci)
+      return idx, pci
+
+    lu.LogWarning("Hotplug not supported for this instance.")
+  return None, None
+
 
 def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
                          iv_name, p_minor, s_minor):
@@ -8739,7 +8987,10 @@ def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
   dev_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
                           logical_id=(vgnames[1], names[1]),
                           params={})
-  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
+
+  disk_idx, pci = _GetPCIInfo(lu, 'disks')
+  drbd_dev = objects.Disk(idx=disk_idx, pci=pci,
+                          dev_type=constants.LD_DRBD8, size=size,
                           logical_id=(primary, secondary, port,
                                       p_minor, s_minor,
                                       shared_secret),
@@ -8751,6 +9002,7 @@ def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
 _DISK_TEMPLATE_NAME_PREFIX = {
   constants.DT_PLAIN: "",
   constants.DT_RBD: ".rbd",
+  constants.DT_EXT: ".ext",
   }
 
 
@@ -8760,6 +9012,7 @@ _DISK_TEMPLATE_DEVICE_TYPE = {
   constants.DT_SHARED_FILE: constants.LD_FILE,
   constants.DT_BLOCK: constants.LD_BLOCKDEV,
   constants.DT_RBD: constants.LD_RBD,
+  constants.DT_EXT: constants.LD_EXT,
   }
 
 
@@ -8838,21 +9091,39 @@ def _GenerateDiskTemplate(lu, template_name, instance_name, primary_node,
                                        disk[constants.IDISK_ADOPT])
     elif template_name == constants.DT_RBD:
       logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
+    elif template_name == constants.DT_EXT:
+      def logical_id_fn(idx, _, disk):
+        provider = disk.get(constants.IDISK_PROVIDER, None)
+        if provider is None:
+          raise errors.ProgrammerError("Disk template is %s, but '%s' is"
+                                       " not found", constants.DT_EXT,
+                                       constants.IDISK_PROVIDER)
+        return (provider, names[idx])
     else:
       raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
 
     dev_type = _DISK_TEMPLATE_DEVICE_TYPE[template_name]
 
     for idx, disk in enumerate(disk_info):
+      params={}
+      # Only for the Ext template add disk_info to params
+      if template_name == constants.DT_EXT:
+        params[constants.IDISK_PROVIDER] = disk[constants.IDISK_PROVIDER]
+        for key in disk:
+          if key not in constants.IDISK_PARAMS:
+            params[key] = disk[key]
       disk_index = idx + base_index
       size = disk[constants.IDISK_SIZE]
       feedback_fn("* disk %s, size %s" %
                   (disk_index, utils.FormatUnit(size, "h")))
+
+      disk_idx, pci = _GetPCIInfo(lu, 'disks')
+
       disks.append(objects.Disk(dev_type=dev_type, size=size,
                                 logical_id=logical_id_fn(idx, disk_index, disk),
                                 iv_name="disk/%d" % disk_index,
                                 mode=disk[constants.IDISK_MODE],
-                                params={}))
+                                params=params, idx=disk_idx, pci=pci))
 
   return disks
 
@@ -9108,6 +9379,7 @@ def _ComputeDiskSize(disk_template, disks):
     constants.DT_SHARED_FILE: sum(d[constants.IDISK_SIZE] for d in disks),
     constants.DT_BLOCK: 0,
     constants.DT_RBD: sum(d[constants.IDISK_SIZE] for d in disks),
+    constants.DT_EXT: sum(d[constants.IDISK_SIZE] for d in disks),
   }
 
   if disk_template not in req_size_dict:
@@ -9225,7 +9497,8 @@ class LUInstanceCreate(LogicalUnit):
     # check disks. parameter names and consistent adopt/no-adopt strategy
     has_adopt = has_no_adopt = False
     for disk in self.op.disks:
-      utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
+      if self.op.disk_template != constants.DT_EXT:
+        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
       if constants.IDISK_ADOPT in disk:
         has_adopt = True
       else:
@@ -9416,6 +9689,8 @@ class LUInstanceCreate(LogicalUnit):
     """Run the allocator based on input opcode.
 
     """
+    #TODO Export network to iallocator so that it chooses a pnode
+    #     in a nodegroup that has the desired network connected to
     nics = [n.ToDict() for n in self.nics]
     ial = IAllocator(self.cfg, self.rpc,
                      mode=constants.IALLOCATOR_MODE_ALLOC,
@@ -9741,6 +10016,11 @@ class LUInstanceCreate(LogicalUnit):
     if self.op.identify_defaults:
       self._RevertToDefaults(cluster)
 
+    self.hotplug_info = None
+    if self.op.hotplug:
+      self.LogInfo("Enabling hotplug.")
+      self.hotplug_info = objects.HotplugInfo(disks=0, nics=0,
+                                              pci_pool=list(range(16,32)))
     # NIC buildup
     self.nics = []
     for idx, nic in enumerate(self.op.nics):
@@ -9749,14 +10029,19 @@ class LUInstanceCreate(LogicalUnit):
       if nic_mode is None or nic_mode == constants.VALUE_AUTO:
         nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
 
-      # in routed mode, for the first nic, the default ip is 'auto'
-      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
-        default_ip_mode = constants.VALUE_AUTO
+      net = nic.get(constants.INIC_NETWORK, None)
+      link = nic.get(constants.NIC_LINK, None)
+      ip = nic.get(constants.INIC_IP, None)
+
+      if net is None or net.lower() == constants.VALUE_NONE:
+        net = None
       else:
-        default_ip_mode = constants.VALUE_NONE
+        if nic_mode_req is not None or link is not None:
+          raise errors.OpPrereqError("If network is given, no mode or link"
+                                     " is allowed to be passed",
+                                     errors.ECODE_INVAL)
 
       # ip validity checks
-      ip = nic.get(constants.INIC_IP, default_ip_mode)
       if ip is None or ip.lower() == constants.VALUE_NONE:
         nic_ip = None
       elif ip.lower() == constants.VALUE_AUTO:
@@ -9766,9 +10051,18 @@ class LUInstanceCreate(LogicalUnit):
                                      errors.ECODE_INVAL)
         nic_ip = self.hostname1.ip
       else:
-        if not netutils.IPAddress.IsValid(ip):
+        # We defer pool operations until later, so that the iallocator has
+        # filled in the instance's node(s) dimara
+        if ip.lower() == constants.NIC_IP_POOL:
+          if net is None:
+            raise errors.OpPrereqError("if ip=pool, parameter network"
+                                       " must be passed too",
+                                       errors.ECODE_INVAL)
+
+        elif not netutils.IPAddress.IsValid(ip):
           raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
                                      errors.ECODE_INVAL)
+
         nic_ip = ip
 
       # TODO: check the ip address for uniqueness
@@ -9789,9 +10083,6 @@ class LUInstanceCreate(LogicalUnit):
                                      errors.ECODE_NOTUNIQUE)
 
       #  Build nic parameters
-      link = nic.get(constants.INIC_LINK, None)
-      if link == constants.VALUE_AUTO:
-        link = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_LINK]
       nicparams = {}
       if nic_mode_req:
         nicparams[constants.NIC_MODE] = nic_mode
@@ -9800,7 +10091,10 @@ class LUInstanceCreate(LogicalUnit):
 
       check_params = cluster.SimpleFillNIC(nicparams)
       objects.NIC.CheckParameterSyntax(check_params)
-      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
+      nic_idx, pci = _GetPCIInfo(self, 'nics')
+      self.nics.append(objects.NIC(idx=nic_idx, pci=pci,
+                                   mac=mac, ip=nic_ip, network=net,
+                                   nicparams=check_params))
 
     # disk checks/pre-build
     default_vg = self.cfg.GetVGName()
@@ -9819,16 +10113,37 @@ class LUInstanceCreate(LogicalUnit):
         raise errors.OpPrereqError("Invalid disk size '%s'" % size,
                                    errors.ECODE_INVAL)
 
+      ext_provider = disk.get(constants.IDISK_PROVIDER, None)
+      if ext_provider and self.op.disk_template != constants.DT_EXT:
+        raise errors.OpPrereqError("The '%s' option is only valid for the %s"
+                                   " disk template, not %s" %
+                                   (constants.IDISK_PROVIDER, constants.DT_EXT,
+                                   self.op.disk_template), errors.ECODE_INVAL)
+
       data_vg = disk.get(constants.IDISK_VG, default_vg)
       new_disk = {
         constants.IDISK_SIZE: size,
         constants.IDISK_MODE: mode,
         constants.IDISK_VG: data_vg,
         }
+
       if constants.IDISK_METAVG in disk:
         new_disk[constants.IDISK_METAVG] = disk[constants.IDISK_METAVG]
       if constants.IDISK_ADOPT in disk:
         new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
+
+      # For extstorage, demand the `provider' option and add any
+      # additional parameters (ext-params) to the dict
+      if self.op.disk_template == constants.DT_EXT:
+        if ext_provider:
+          new_disk[constants.IDISK_PROVIDER] = ext_provider
+          for key in disk:
+            if key not in constants.IDISK_PARAMS:
+              new_disk[key] = disk[key]
+        else:
+          raise errors.OpPrereqError("Missing provider for template '%s'" %
+                                     constants.DT_EXT, errors.ECODE_INVAL)
+
       self.disks.append(new_disk)
 
     if self.op.mode == constants.INSTANCE_IMPORT:
@@ -9870,7 +10185,7 @@ class LUInstanceCreate(LogicalUnit):
     # creation job will fail.
     for nic in self.nics:
       if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
-        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
+        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
 
     #### allocator run
 
@@ -9903,6 +10218,45 @@ class LUInstanceCreate(LogicalUnit):
 
     self.secondaries = []
 
+    # Fill in any IPs from IP pools. This must happen here, because we need to
+    # know the nic's primary node, as specified by the iallocator
+    for idx, nic in enumerate(self.nics):
+      net = nic.network
+      if net is not None:
+        netparams = self.cfg.GetGroupNetParams(net, self.pnode.name)
+        if netparams is None:
+          raise errors.OpPrereqError("No netparams found for network"
+                                     " %s. Propably not connected to"
+                                     " node's %s nodegroup" %
+                                     (net, self.pnode.name),
+                                     errors.ECODE_INVAL)
+        self.LogInfo("NIC/%d inherits netparams %s" %
+                     (idx, netparams.values()))
+        nic.nicparams = dict(netparams)
+        if nic.ip is not None:
+          filled_params = cluster.SimpleFillNIC(nic.nicparams)
+          if nic.ip.lower() == constants.NIC_IP_POOL:
+            try:
+              nic.ip = self.cfg.GenerateIp(net, self.proc.GetECId())
+            except errors.ReservationError:
+              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
+                                         " from the address pool" % idx,
+                                         errors.ECODE_STATE)
+            self.LogInfo("Chose IP %s from network %s", nic.ip, net)
+          else:
+            try:
+              self.cfg.ReserveIp(net, nic.ip, self.proc.GetECId())
+            except errors.ReservationError:
+              raise errors.OpPrereqError("IP address %s already in use"
+                                         " or does not belong to network %s" %
+                                         (nic.ip, net),
+                                         errors.ECODE_NOTUNIQUE)
+      else:
+        # net is None, ip None or given
+        if self.op.conflicts_check:
+          _CheckForConflictingIp(self, nic.ip, self.pnode.name)
+
+
     # mirror node verification
     if self.op.disk_template in constants.DTS_INT_MIRROR:
       if self.op.snode == pnode.name:
@@ -9948,6 +10302,9 @@ class LUInstanceCreate(LogicalUnit):
         # Any function that checks prerequisites can be placed here.
         # Check if there is enough space on the RADOS cluster.
         _CheckRADOSFreeSpace()
+      elif self.op.disk_template == constants.DT_EXT:
+        # FIXME: Function that checks prereqs if needed
+        pass
       else:
         # Check lv size requirements, if not adopting
         req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
@@ -10031,6 +10388,9 @@ class LUInstanceCreate(LogicalUnit):
 
     _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
 
+    #TODO: _CheckExtParams (remotely)
+    # Check parameters for extstorage
+
     # memory check on primary node
     #TODO(dynmem): use MINMEM for checking
     if self.op.start:
@@ -10084,6 +10444,7 @@ class LUInstanceCreate(LogicalUnit):
                             hvparams=self.op.hvparams,
                             hypervisor=self.op.hypervisor,
                             osparams=self.op.osparams,
+                            hotplug_info=self.hotplug_info,
                             )
 
     if self.op.tags:
@@ -11660,7 +12021,8 @@ class LUInstanceGrowDisk(LogicalUnit):
 
     if instance.disk_template not in (constants.DT_FILE,
                                       constants.DT_SHARED_FILE,
-                                      constants.DT_RBD):
+                                      constants.DT_RBD,
+                                      constants.DT_EXT):
       # TODO: check the free disk space for file, when that feature will be
       # supported
       _CheckNodesFreeDiskPerVG(self, nodenames,
@@ -12049,13 +12411,16 @@ def ApplyContainerMods(kind, container, chgdesc, mods,
         if remove_fn is not None:
           remove_fn(absidx, item, private)
 
+        #TODO: include a hotplugged msg in changes
         changes = [("%s/%s" % (kind, absidx), "remove")]
 
         assert container[absidx] == item
         del container[absidx]
       elif op == constants.DDM_MODIFY:
         if modify_fn is not None:
+          #TODO: include a hotplugged msg in changes
           changes = modify_fn(absidx, item, params, private)
+
       else:
         raise errors.ProgrammerError("Unhandled operation '%s'" % op)
 
@@ -12129,7 +12494,10 @@ class LUInstanceSetParams(LogicalUnit):
     for (op, _, params) in mods:
       assert ht.TDict(params)
 
-      utils.ForceDictType(params, key_types)
+      # If key_types is an empty dict, we assume we have an 'ext' template
+      # and thus do not ForceDictType
+      if key_types:
+        utils.ForceDictType(params, key_types)
 
       if op == constants.DDM_REMOVE:
         if params:
@@ -12165,9 +12533,18 @@ class LUInstanceSetParams(LogicalUnit):
 
       params[constants.IDISK_SIZE] = size
 
-    elif op == constants.DDM_MODIFY and constants.IDISK_SIZE in params:
-      raise errors.OpPrereqError("Disk size change not possible, use"
-                                 " grow-disk", errors.ECODE_INVAL)
+    elif op == constants.DDM_MODIFY:
+      if constants.IDISK_SIZE in params:
+        raise errors.OpPrereqError("Disk size change not possible, use"
+                                   " grow-disk", errors.ECODE_INVAL)
+      if constants.IDISK_MODE not in params:
+        raise errors.OpPrereqError("Disk 'mode' is the only kind of"
+                                   " modification supported, but missing",
+                                   errors.ECODE_NOENT)
+      if len(params) > 1:
+        raise errors.OpPrereqError("Disk modification doesn't support"
+                                   " additional arbitrary parameters",
+                                   errors.ECODE_INVAL)
 
   @staticmethod
   def _VerifyNicModification(op, params):
@@ -12176,29 +12553,37 @@ class LUInstanceSetParams(LogicalUnit):
     """
     if op in (constants.DDM_ADD, constants.DDM_MODIFY):
       ip = params.get(constants.INIC_IP, None)
-      if ip is None:
-        pass
-      elif ip.lower() == constants.VALUE_NONE:
-        params[constants.INIC_IP] = None
-      elif not netutils.IPAddress.IsValid(ip):
-        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
-                                   errors.ECODE_INVAL)
-
-      bridge = params.get("bridge", None)
-      link = params.get(constants.INIC_LINK, None)
-      if bridge and link:
-        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
-                                   " at the same time", errors.ECODE_INVAL)
-      elif bridge and bridge.lower() == constants.VALUE_NONE:
-        params["bridge"] = None
-      elif link and link.lower() == constants.VALUE_NONE:
-        params[constants.INIC_LINK] = None
+      req_net = params.get(constants.INIC_NETWORK, None)
+      link = params.get(constants.NIC_LINK, None)
+      mode = params.get(constants.NIC_MODE, None)
+      if req_net is not None:
+        if req_net.lower() == constants.VALUE_NONE:
+          params[constants.INIC_NETWORK] = None
+          req_net = None
+        elif link is not None or mode is not None:
+          raise errors.OpPrereqError("If network is given"
+                                     " mode or link should not",
+                                     errors.ECODE_INVAL)
 
       if op == constants.DDM_ADD:
         macaddr = params.get(constants.INIC_MAC, None)
         if macaddr is None:
           params[constants.INIC_MAC] = constants.VALUE_AUTO
 
+      if ip is not None:
+        if ip.lower() == constants.VALUE_NONE:
+          params[constants.INIC_IP] = None
+        else:
+          if ip.lower() == constants.NIC_IP_POOL:
+            if op == constants.DDM_ADD and req_net is None:
+              raise errors.OpPrereqError("If ip=pool, parameter network"
+                                         " cannot be none",
+                                         errors.ECODE_INVAL)
+          else:
+            if not netutils.IPAddress.IsValid(ip):
+              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
+                                         errors.ECODE_INVAL)
+
       if constants.INIC_MAC in params:
         macaddr = params[constants.INIC_MAC]
         if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
@@ -12218,16 +12603,26 @@ class LUInstanceSetParams(LogicalUnit):
     if self.op.hvparams:
       _CheckGlobalHvParams(self.op.hvparams)
 
-    self.op.disks = \
-      self._UpgradeDiskNicMods("disk", self.op.disks,
-        opcodes.OpInstanceSetParams.TestDiskModifications)
+    if self.op.allow_arbit_params:
+      self.op.disks = \
+        self._UpgradeDiskNicMods("disk", self.op.disks,
+          opcodes.OpInstanceSetParams.TestExtDiskModifications)
+    else:
+      self.op.disks = \
+        self._UpgradeDiskNicMods("disk", self.op.disks,
+          opcodes.OpInstanceSetParams.TestDiskModifications)
+
     self.op.nics = \
       self._UpgradeDiskNicMods("NIC", self.op.nics,
         opcodes.OpInstanceSetParams.TestNicModifications)
 
     # Check disk modifications
-    self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
-                    self._VerifyDiskModification)
+    if self.op.allow_arbit_params:
+      self._CheckMods("disk", self.op.disks, {},
+                      self._VerifyDiskModification)
+    else:
+      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
+                      self._VerifyDiskModification)
 
     if self.op.disks and self.op.disk_template is not None:
       raise errors.OpPrereqError("Disk template conversion and other disk"
@@ -12285,10 +12680,10 @@ class LUInstanceSetParams(LogicalUnit):
       nics = []
 
       for nic in self._new_nics:
-        nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
-        mode = nicparams[constants.NIC_MODE]
-        link = nicparams[constants.NIC_LINK]
-        nics.append((nic.ip, nic.mac, mode, link))
+        n = copy.deepcopy(nic)
+        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
+        n.nicparams = nicparams
+        nics.append(_NICToTuple(self, n))
 
       args["nics"] = nics
 
@@ -12307,16 +12702,27 @@ class LUInstanceSetParams(LogicalUnit):
     nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
     return (nl, nl)
 
-  def _PrepareNicModification(self, params, private, old_ip, old_params,
-                              cluster, pnode):
+  def _PrepareNicModification(self, params, private, old_ip, old_net,
+                              old_params, cluster, pnode):
+
     update_params_dict = dict([(key, params[key])
                                for key in constants.NICS_PARAMETERS
                                if key in params])
 
-    if "bridge" in params:
-      update_params_dict[constants.NIC_LINK] = params["bridge"]
+    req_link = update_params_dict.get(constants.NIC_LINK, None)
+    req_mode = update_params_dict.get(constants.NIC_MODE, None)
+
+    new_net = params.get(constants.INIC_NETWORK, old_net)
+    if new_net is not None:
+      netparams = self.cfg.GetGroupNetParams(new_net, pnode)
+      if netparams is None:
+        raise errors.OpPrereqError("No netparams found for the network"
+                                   " %s, propably not connected." % new_net,
+                                   errors.ECODE_INVAL)
+      new_params = dict(netparams)
+    else:
+      new_params = _GetUpdatedParams(old_params, update_params_dict)
 
-    new_params = _GetUpdatedParams(old_params, update_params_dict)
     utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
 
     new_filled_params = cluster.SimpleFillNIC(new_params)
@@ -12347,7 +12753,7 @@ class LUInstanceSetParams(LogicalUnit):
       elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
         # otherwise generate the MAC address
         params[constants.INIC_MAC] = \
-          self.cfg.GenerateMAC(self.proc.GetECId())
+          self.cfg.GenerateMAC(new_net, self.proc.GetECId())
       else:
         # or validate/reserve the current one
         try:
@@ -12356,7 +12762,67 @@ class LUInstanceSetParams(LogicalUnit):
           raise errors.OpPrereqError("MAC address '%s' already in use"
                                      " in cluster" % mac,
                                      errors.ECODE_NOTUNIQUE)
+    elif new_net != old_net:
+      def get_net_prefix(net):
+        if net:
+          uuid = self.cfg.LookupNetwork(net)
+          if uuid:
+            nobj = self.cfg.GetNetwork(uuid)
+            return nobj.mac_prefix
+        return None
+      new_prefix = get_net_prefix(new_net)
+      old_prefix = get_net_prefix(old_net)
+      if old_prefix != new_prefix:
+        params[constants.INIC_MAC] = \
+          self.cfg.GenerateMAC(new_net, self.proc.GetECId())
+
+    #if there is a change in nic-network configuration
+    new_ip = params.get(constants.INIC_IP, old_ip)
+    if (new_ip, new_net) != (old_ip, old_net):
+      if new_ip:
+        if new_net:
+          if new_ip.lower() == constants.NIC_IP_POOL:
+            try:
+              new_ip = self.cfg.GenerateIp(new_net, self.proc.GetECId())
+            except errors.ReservationError:
+              raise errors.OpPrereqError("Unable to get a free IP"
+                                        " from the address pool",
+                                         errors.ECODE_STATE)
+            self.LogInfo("Chose IP %s from pool %s", new_ip, new_net)
+            params[constants.INIC_IP] = new_ip
+          elif new_ip != old_ip or new_net != old_net:
+            try:
+              self.LogInfo("Reserving IP %s in pool %s", new_ip, new_net)
+              self.cfg.ReserveIp(new_net, new_ip, self.proc.GetECId())
+            except errors.ReservationError:
+              raise errors.OpPrereqError("IP %s not available in network %s" %
+                                         (new_ip, new_net),
+                                         errors.ECODE_NOTUNIQUE)
+        elif new_ip.lower() == constants.NIC_IP_POOL:
+          raise errors.OpPrereqError("ip=pool, but no network found",
+                                     ECODEE_INVAL)
+        else:
+          # new net is None
+          if self.op.conflicts_check:
+            _CheckForConflictingIp(self, new_ip, pnode)
+
+      if old_ip:
+        if old_net:
+          try:
+            self.cfg.ReleaseIp(old_net, old_ip, self.proc.GetECId())
+          except errors.AddressPoolError:
+            logging.warning("Release IP %s not contained in network %s",
+                            old_ip, old_net)
+
+    # there are no changes in (net, ip) tuple
+    elif (old_net is not None and
+          (req_link is not None or req_mode is not None)):
+      raise errors.OpPrereqError("Not allowed to change link or mode of"
+                                 " a NIC that is connected to a network.",
+                                 errors.ECODE_INVAL)
 
+    logging.info("new_params %s", new_params)
+    logging.info("new_filled_params %s", new_filled_params)
     private.params = new_params
     private.filled = new_filled_params
 
@@ -12380,6 +12846,31 @@ class LUInstanceSetParams(LogicalUnit):
     # Prepare disk/NIC modifications
     self.diskmod = PrepareContainerMods(self.op.disks, None)
     self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
+    logging.info("nicmod %s", self.nicmod)
+
+    # Check the validity of the `provider' parameter
+    if instance.disk_template in constants.DT_EXT:
+      for mod in self.diskmod:
+        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
+        if mod[0] == constants.DDM_ADD:
+          if ext_provider is None:
+            raise errors.OpPrereqError("Instance template is '%s' and parameter"
+                                       " '%s' missing, during disk add" %
+                                       (constants.DT_EXT,
+                                        constants.IDISK_PROVIDER),
+                                       errors.ECODE_NOENT)
+        elif mod[0] == constants.DDM_MODIFY:
+          if ext_provider:
+            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
+                                       " modification" % constants.IDISK_PROVIDER,
+                                       errors.ECODE_INVAL)
+    else:
+      for mod in self.diskmod:
+        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
+        if ext_provider is not None:
+          raise errors.OpPrereqError("Parameter '%s' is only valid for instances"
+                                     " of type '%s'" % (constants.IDISK_PROVIDER,
+                                      constants.DT_EXT), errors.ECODE_INVAL)
 
     # OS change
     if self.op.os_name and not self.op.force:
@@ -12594,26 +13085,35 @@ class LUInstanceSetParams(LogicalUnit):
                                  errors.ECODE_INVAL)
 
     def _PrepareNicCreate(_, params, private):
-      self._PrepareNicModification(params, private, None, {}, cluster, pnode)
+      self._PrepareNicModification(params, private, None, None,
+                                   {}, cluster, pnode)
       return (None, None)
 
     def _PrepareNicMod(_, nic, params, private):
-      self._PrepareNicModification(params, private, nic.ip,
+      self._PrepareNicModification(params, private, nic.ip, nic.network,
                                    nic.nicparams, cluster, pnode)
       return None
 
+    def _PrepareNicRemove(_, params, private):
+      ip = params.ip
+      net = params.network
+      if net is not None and ip is not None:
+        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
+
     # Verify NIC changes (operating on copy)
     nics = instance.nics[:]
     ApplyContainerMods("NIC", nics, None, self.nicmod,
-                       _PrepareNicCreate, _PrepareNicMod, None)
+                       _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
     if len(nics) > constants.MAX_NICS:
       raise errors.OpPrereqError("Instance has too many network interfaces"
                                  " (%d), cannot add more" % constants.MAX_NICS,
                                  errors.ECODE_STATE)
 
+
     # Verify disk changes (operating on a copy)
     disks = instance.disks[:]
-    ApplyContainerMods("disk", disks, None, self.diskmod, None, None, None)
+    ApplyContainerMods("disk", disks, None, self.diskmod,
+                       None, None, None)
     if len(disks) > constants.MAX_DISKS:
       raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
                                  " more" % constants.MAX_DISKS,
@@ -12632,11 +13132,13 @@ class LUInstanceSetParams(LogicalUnit):
       # Operate on copies as this is still in prereq
       nics = [nic.Copy() for nic in instance.nics]
       ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
-                         self._CreateNewNic, self._ApplyNicMods, None)
+                         self._CreateNewNic, self._ApplyNicMods,
+                         self._RemoveNic)
       self._new_nics = nics
     else:
       self._new_nics = None
 
+
   def _ConvertPlainToDrbd(self, feedback_fn):
     """Converts an instance from plain to drbd.
 
@@ -12784,6 +13286,13 @@ class LUInstanceSetParams(LogicalUnit):
         self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
                         disk.iv_name, disk, node, err)
 
+    if self.op.hotplug and disk.pci:
+      self.LogInfo("Trying to hotplug device.")
+      disk_ok, device_info = _AssembleInstanceDisks(self, self.instance,
+                                                    [disk], check=False)
+      _, _, dev_path = device_info[0]
+      result = self.rpc.call_hot_add_disk(self.instance.primary_node,
+                                          self.instance, disk, dev_path, idx)
     return (disk, [
       ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
       ])
@@ -12803,6 +13312,20 @@ class LUInstanceSetParams(LogicalUnit):
     """Removes a disk.
 
     """
+    #TODO: log warning in case hotplug is not possible
+    #      handle errors
+    if root.pci and not self.op.hotplug:
+      raise errors.OpPrereqError("Cannot remove a disk that has"
+                                 " been hotplugged"
+                                 " without removing it with hotplug",
+                                 errors.ECODE_INVAL)
+    if self.op.hotplug and root.pci:
+      self.LogInfo("Trying to hotplug device.")
+      self.rpc.call_hot_del_disk(self.instance.primary_node,
+                                 self.instance, root, idx)
+      _ShutdownInstanceDisks(self, self.instance, [root])
+      self.cfg.UpdatePCIInfo(self.instance.name, root.pci)
+
     (anno_disk,) = _AnnotateDiskParams(self.instance, [root], self.cfg)
     for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
       self.cfg.SetDiskID(disk, node)
@@ -12815,42 +13338,78 @@ class LUInstanceSetParams(LogicalUnit):
     if root.dev_type in constants.LDS_DRBD:
       self.cfg.AddTcpUdpPort(root.logical_id[2])
 
-  @staticmethod
-  def _CreateNewNic(idx, params, private):
+  def _CreateNewNic(self, idx, params, private):
     """Creates data structure for a new network interface.
 
     """
     mac = params[constants.INIC_MAC]
     ip = params.get(constants.INIC_IP, None)
-    nicparams = private.params
-
-    return (objects.NIC(mac=mac, ip=ip, nicparams=nicparams), [
+    network = params.get(constants.INIC_NETWORK, None)
+    #TODO: not private.filled?? can a nic have no nicparams??
+    nicparams = private.filled
+
+    nic = objects.NIC(mac=mac, ip=ip, network=network, nicparams=nicparams)
+
+    #TODO: log warning in case hotplug is not possible
+    #      handle errors
+    #      return changes
+    if self.op.hotplug:
+      nic_idx, pci = _GetPCIInfo(self, 'nics')
+      if pci is not None:
+        nic.idx = nic_idx
+        nic.pci = pci
+        result = self.rpc.call_hot_add_nic(self.instance.primary_node,
+                                           self.instance, nic, idx)
+    desc =  [
       ("nic.%d" % idx,
-       "add:mac=%s,ip=%s,mode=%s,link=%s" %
+       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
        (mac, ip, private.filled[constants.NIC_MODE],
-       private.filled[constants.NIC_LINK])),
-      ])
+       private.filled[constants.NIC_LINK],
+       network)),
+      ]
+    return (nic, desc)
 
-  @staticmethod
-  def _ApplyNicMods(idx, nic, params, private):
+  def _ApplyNicMods(self, idx, nic, params, private):
     """Modifies a network interface.
 
     """
     changes = []
 
-    for key in [constants.INIC_MAC, constants.INIC_IP]:
+    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NETWORK]:
       if key in params:
         changes.append(("nic.%s/%d" % (key, idx), params[key]))
         setattr(nic, key, params[key])
 
-    if private.params:
-      nic.nicparams = private.params
+    if private.filled:
+      nic.nicparams = private.filled
 
-      for (key, val) in params.items():
+      for (key, val) in nic.nicparams.items():
         changes.append(("nic.%s/%d" % (key, idx), val))
 
+    #TODO: log warning in case hotplug is not possible
+    #      handle errors
+    if self.op.hotplug and nic.pci:
+      self.LogInfo("Trying to hotplug device.")
+      self.rpc.call_hot_del_nic(self.instance.primary_node,
+                                self.instance, nic, idx)
+      result = self.rpc.call_hot_add_nic(self.instance.primary_node,
+                                         self.instance, nic, idx)
     return changes
 
+  def _RemoveNic(self, idx, nic, private):
+    if nic.pci and not self.op.hotplug:
+      raise errors.OpPrereqError("Cannot remove a nic that has been hotplugged"
+                                 " without removing it with hotplug",
+                                 errors.ECODE_INVAL)
+    #TODO: log warning in case hotplug is not possible
+    #      handle errors
+    if self.op.hotplug and nic.pci:
+      self.LogInfo("Trying to hotplug device.")
+      self.rpc.call_hot_del_nic(self.instance.primary_node,
+                                self.instance, nic, idx)
+      self.cfg.UpdatePCIInfo(self.instance.name, nic.pci)
+
+
   def Exec(self, feedback_fn):
     """Modifies an instance.
 
@@ -12954,7 +13513,7 @@ class LUInstanceSetParams(LogicalUnit):
       self.cfg.MarkInstanceDown(instance.name)
       result.append(("admin_state", constants.ADMINST_DOWN))
 
-    self.cfg.Update(instance, feedback_fn)
+    self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
 
     assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
                 self.owned_locks(locking.LEVEL_NODE)), \
@@ -14385,6 +14944,10 @@ class TagsLU(NoHooksLU): # pylint: disable=W0223
       self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
       lock_level = locking.LEVEL_NODEGROUP
       lock_name = self.group_uuid
+    elif self.op.kind == constants.TAG_NETWORK:
+      self.network_uuid = self.cfg.LookupNetwork(self.op.name)
+      lock_level = locking.LEVEL_NETWORK
+      lock_name = self.network_uuid
     else:
       lock_level = None
       lock_name = None
@@ -14407,6 +14970,8 @@ class TagsLU(NoHooksLU): # pylint: disable=W0223
       self.target = self.cfg.GetInstanceInfo(self.op.name)
     elif self.op.kind == constants.TAG_NODEGROUP:
       self.target = self.cfg.GetNodeGroup(self.group_uuid)
+    elif self.op.kind == constants.TAG_NETWORK:
+      self.target = self.cfg.GetNetwork(self.network_uuid)
     else:
       raise errors.OpPrereqError("Wrong tag type requested (%s)" %
                                  str(self.op.kind), errors.ECODE_INVAL)
@@ -14938,6 +15503,7 @@ class IAllocator(object):
           "ip": nic.ip,
           "mode": filled_params[constants.NIC_MODE],
           "link": filled_params[constants.NIC_LINK],
+          "network": nic.network,
           }
         if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
           nic_dict["bridge"] = filled_params[constants.NIC_LINK]
@@ -15336,6 +15902,635 @@ class LUTestAllocator(NoHooksLU):
       result = ial.out_text
     return result
 
+# Network LUs
+class LUNetworkAdd(LogicalUnit):
+  """Logical unit for creating networks.
+
+  """
+  HPATH = "network-add"
+  HTYPE = constants.HTYPE_NETWORK
+  REQ_BGL = False
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    mn = self.cfg.GetMasterNode()
+    return ([mn], [mn])
+
+  def ExpandNames(self):
+    self.network_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
+    self.needed_locks = {}
+    self.add_locks[locking.LEVEL_NETWORK] = self.network_uuid
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks that the given group name is not an existing node group
+    already.
+
+    """
+    if self.op.network is None:
+      raise errors.OpPrereqError("Network must be given",
+                                 errors.ECODE_INVAL)
+
+    uuid = self.cfg.LookupNetwork(self.op.network_name)
+
+    if uuid:
+      raise errors.OpPrereqError("Network '%s' already defined" %
+                                 self.op.network, errors.ECODE_EXISTS)
+
+    if self.op.mac_prefix:
+      utils.NormalizeAndValidateMac(self.op.mac_prefix+":00:00:00")
+
+    # Check tag validity
+    for tag in self.op.tags:
+      objects.TaggableObject.ValidateTag(tag)
+
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    """
+    args = {
+      "name": self.op.network_name,
+      "network": self.op.network,
+      "gateway": self.op.gateway,
+      "network6": self.op.network6,
+      "gateway6": self.op.gateway6,
+      "mac_prefix": self.op.mac_prefix,
+      "network_type": self.op.network_type,
+      "tags": self.op.tags,
+      }
+    return _BuildNetworkHookEnv(**args)
+
+  def Exec(self, feedback_fn):
+    """Add the ip pool to the cluster.
+
+    """
+    nobj = objects.Network(name=self.op.network_name,
+                           network=self.op.network,
+                           gateway=self.op.gateway,
+                           network6=self.op.network6,
+                           gateway6=self.op.gateway6,
+                           mac_prefix=self.op.mac_prefix,
+                           network_type=self.op.network_type,
+                           uuid=self.network_uuid,
+                           family=4)
+    # Initialize the associated address pool
+    try:
+      pool = network.AddressPool.InitializeNetwork(nobj)
+    except errors.AddressPoolError, e:
+      raise errors.OpExecError("Cannot create IP pool for this network. %s" % e)
+
+    # Check if we need to reserve the nodes and the cluster master IP
+    # These may not be allocated to any instances in routed mode, as
+    # they wouldn't function anyway.
+    for node in self.cfg.GetAllNodesInfo().values():
+      for ip in [node.primary_ip, node.secondary_ip]:
+        try:
+          pool.Reserve(ip)
+          self.LogInfo("Reserved node %s's IP (%s)", node.name, ip)
+
+        except errors.AddressPoolError:
+          pass
+
+    master_ip = self.cfg.GetClusterInfo().master_ip
+    try:
+      pool.Reserve(master_ip)
+      self.LogInfo("Reserved cluster master IP (%s)", master_ip)
+    except errors.AddressPoolError:
+      pass
+
+    if self.op.add_reserved_ips:
+      for ip in self.op.add_reserved_ips:
+        try:
+          pool.Reserve(ip, external=True)
+        except errors.AddressPoolError, e:
+          raise errors.OpExecError("Cannot reserve IP %s. %s " % (ip, e))
+
+    if self.op.tags:
+      for tag in self.op.tags:
+        nobj.AddTag(tag)
+
+    self.cfg.AddNetwork(nobj, self.proc.GetECId(), check_uuid=False)
+    del self.remove_locks[locking.LEVEL_NETWORK]
+
+
+class LUNetworkRemove(LogicalUnit):
+  HPATH = "network-remove"
+  HTYPE = constants.HTYPE_NETWORK
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.network_uuid = self.cfg.LookupNetwork(self.op.network_name)
+
+    self.needed_locks = {
+      locking.LEVEL_NETWORK: [self.network_uuid],
+      }
+
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks that the given network name exists as a network, that is
+    empty (i.e., contains no nodes), and that is not the last group of the
+    cluster.
+
+    """
+    if not self.network_uuid:
+      raise errors.OpPrereqError("Network %s not found" % self.op.network_name,
+                                 errors.ECODE_INVAL)
+
+    # Verify that the network is not conncted.
+    node_groups = [group.name
+                   for group in self.cfg.GetAllNodeGroupsInfo().values()
+                   for network in group.networks.keys()
+                   if network == self.network_uuid]
+
+    if node_groups:
+      self.LogWarning("Nework '%s' is connected to the following"
+                      " node groups: %s" % (self.op.network_name,
+                      utils.CommaJoin(utils.NiceSort(node_groups))))
+      raise errors.OpPrereqError("Network still connected",
+                                 errors.ECODE_STATE)
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    """
+    return {
+      "NETWORK_NAME": self.op.network_name,
+      }
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    mn = self.cfg.GetMasterNode()
+    return ([mn], [mn])
+
+  def Exec(self, feedback_fn):
+    """Remove the network.
+
+    """
+    try:
+      self.cfg.RemoveNetwork(self.network_uuid)
+    except errors.ConfigurationError:
+      raise errors.OpExecError("Network '%s' with UUID %s disappeared" %
+                               (self.op.network_name, self.network_uuid))
+
+
+class LUNetworkSetParams(LogicalUnit):
+  """Modifies the parameters of a network.
+
+  """
+  HPATH = "network-modify"
+  HTYPE = constants.HTYPE_NETWORK
+  REQ_BGL = False
+
+  def CheckArguments(self):
+    if (self.op.gateway and
+        (self.op.add_reserved_ips or self.op.remove_reserved_ips)):
+      raise errors.OpPrereqError("Cannot modify gateway and reserved ips"
+                                 " at once", errors.ECODE_INVAL)
+
+
+  def ExpandNames(self):
+    self.network_uuid = self.cfg.LookupNetwork(self.op.network_name)
+    self.network = self.cfg.GetNetwork(self.network_uuid)
+    self.needed_locks = {
+      locking.LEVEL_NETWORK: [self.network_uuid],
+      }
+
+
+    if self.network is None:
+      raise errors.OpPrereqError("Could not retrieve network '%s' (UUID: %s)" %
+                                 (self.op.network_name, self.network_uuid),
+                                 errors.ECODE_INVAL)
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    """
+    self.gateway = self.network.gateway
+    self.network_type = self.network.network_type
+    self.mac_prefix = self.network.mac_prefix
+    self.network6 = self.network.network6
+    self.gateway6 = self.network.gateway6
+    self.tags = self.network.tags
+
+    self.pool = network.AddressPool(self.network)
+
+    if self.op.gateway:
+      if self.op.gateway == constants.VALUE_NONE:
+        self.gateway = None
+      else:
+        self.gateway = self.op.gateway
+        if self.pool.IsReserved(self.gateway):
+          raise errors.OpPrereqError("%s is already reserved" %
+                                     self.gateway, errors.ECODE_INVAL)
+
+    if self.op.network_type:
+      if self.op.network_type == constants.VALUE_NONE:
+        self.network_type = None
+      else:
+        self.network_type = self.op.network_type
+
+    if self.op.mac_prefix:
+      if self.op.mac_prefix == constants.VALUE_NONE:
+        self.mac_prefix = None
+      else:
+        utils.NormalizeAndValidateMac(self.op.mac_prefix+":00:00:00")
+        self.mac_prefix = self.op.mac_prefix
+
+    if self.op.gateway6:
+      if self.op.gateway6 == constants.VALUE_NONE:
+        self.gateway6 = None
+      else:
+        self.gateway6 = self.op.gateway6
+
+    if self.op.network6:
+      if self.op.network6 == constants.VALUE_NONE:
+        self.network6 = None
+      else:
+        self.network6 = self.op.network6
+
+
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    """
+    args = {
+      "name": self.op.network_name,
+      "network": self.network.network,
+      "gateway": self.gateway,
+      "network6": self.network6,
+      "gateway6": self.gateway6,
+      "mac_prefix": self.mac_prefix,
+      "network_type": self.network_type,
+      "tags": self.tags,
+      }
+    return _BuildNetworkHookEnv(**args)
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    mn = self.cfg.GetMasterNode()
+    return ([mn], [mn])
+
+  def Exec(self, feedback_fn):
+    """Modifies the network.
+
+    """
+    #TODO: reserve/release via temporary reservation manager
+    #      extend cfg.ReserveIp/ReleaseIp with the external flag
+    if self.op.gateway:
+      if self.gateway == self.network.gateway:
+        self.LogWarning("Gateway is already %s" % self.gateway)
+      else:
+        if self.gateway:
+          self.pool.Reserve(self.gateway, external=True)
+        if self.network.gateway:
+          self.pool.Release(self.network.gateway, external=True)
+        self.network.gateway = self.gateway
+
+    if self.op.add_reserved_ips:
+      for ip in self.op.add_reserved_ips:
+        try:
+          if self.pool.IsReserved(ip):
+            self.LogWarning("IP %s is already reserved" % ip)
+          else:
+            self.pool.Reserve(ip, external=True)
+        except errors.AddressPoolError, e:
+          self.LogWarning("Cannot reserve ip %s. %s" % (ip, e))
+
+    if self.op.remove_reserved_ips:
+      for ip in self.op.remove_reserved_ips:
+        if ip == self.network.gateway:
+          self.LogWarning("Cannot unreserve Gateway's IP")
+          continue
+        try:
+          if not self.pool.IsReserved(ip):
+            self.LogWarning("IP %s is already unreserved" % ip)
+          else:
+            self.pool.Release(ip, external=True)
+        except errors.AddressPoolError, e:
+          self.LogWarning("Cannot release ip %s. %s" % (ip, e))
+
+    if self.op.mac_prefix:
+      self.network.mac_prefix = self.mac_prefix
+
+    if self.op.network6:
+      self.network.network6 = self.network6
+
+    if self.op.gateway6:
+      self.network.gateway6 = self.gateway6
+
+    if self.op.network_type:
+      self.network.network_type = self.network_type
+
+    self.pool.Validate()
+
+    self.cfg.Update(self.network, feedback_fn)
+
+
+class _NetworkQuery(_QueryBase):
+  FIELDS = query.NETWORK_FIELDS
+
+  def ExpandNames(self, lu):
+    lu.needed_locks = {}
+
+    self._all_networks = lu.cfg.GetAllNetworksInfo()
+    name_to_uuid = dict((n.name, n.uuid) for n in self._all_networks.values())
+
+    if not self.names:
+      self.wanted = [name_to_uuid[name]
+                     for name in utils.NiceSort(name_to_uuid.keys())]
+    else:
+      # Accept names to be either names or UUIDs.
+      missing = []
+      self.wanted = []
+      all_uuid = frozenset(self._all_networks.keys())
+
+      for name in self.names:
+        if name in all_uuid:
+          self.wanted.append(name)
+        elif name in name_to_uuid:
+          self.wanted.append(name_to_uuid[name])
+        else:
+          missing.append(name)
+
+      if missing:
+        raise errors.OpPrereqError("Some networks do not exist: %s" % missing,
+                                   errors.ECODE_NOENT)
+
+  def DeclareLocks(self, lu, level):
+    pass
+
+  def _GetQueryData(self, lu):
+    """Computes the list of networks and their attributes.
+
+    """
+    do_instances = query.NETQ_INST in self.requested_data
+    do_groups = do_instances or (query.NETQ_GROUP in self.requested_data)
+    do_stats = query.NETQ_STATS in self.requested_data
+    cluster = lu.cfg.GetClusterInfo()
+
+    network_to_groups = None
+    network_to_instances = None
+    stats = None
+
+    # For NETQ_GROUP, we need to map network->[groups]
+    if do_groups:
+      all_groups = lu.cfg.GetAllNodeGroupsInfo()
+      network_to_groups = dict((uuid, []) for uuid in self.wanted)
+      default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
+
+      if do_instances:
+        all_instances = lu.cfg.GetAllInstancesInfo()
+        all_nodes = lu.cfg.GetAllNodesInfo()
+        network_to_instances = dict((uuid, []) for uuid in self.wanted)
+
+
+      for group in all_groups.values():
+        if do_instances:
+          group_nodes = [node.name for node in all_nodes.values() if
+                         node.group == group.uuid]
+          group_instances = [instance for instance in all_instances.values()
+                             if instance.primary_node in group_nodes]
+
+        for net_uuid in group.networks.keys():
+          if net_uuid in network_to_groups:
+            netparams = group.networks[net_uuid]
+            mode = netparams[constants.NIC_MODE]
+            link = netparams[constants.NIC_LINK]
+            info = group.name + '(' + mode + ', ' + link + ')'
+            network_to_groups[net_uuid].append(info)
+
+            if do_instances:
+              for instance in group_instances:
+                for nic in instance.nics:
+                  if nic.network == self._all_networks[net_uuid].name:
+                    network_to_instances[net_uuid].append(instance.name)
+                    break
+
+    if do_stats:
+      stats = {}
+      for uuid, net in self._all_networks.items():
+        if uuid in self.wanted:
+          pool = network.AddressPool(net)
+          stats[uuid] = {
+            "free_count": pool.GetFreeCount(),
+            "reserved_count": pool.GetReservedCount(),
+            "map": pool.GetMap(),
+            "external_reservations": ", ".join(pool.GetExternalReservations()),
+            }
+
+    return query.NetworkQueryData([self._all_networks[uuid]
+                                   for uuid in self.wanted],
+                                   network_to_groups,
+                                   network_to_instances,
+                                   stats)
+
+
+class LUNetworkQuery(NoHooksLU):
+  """Logical unit for querying networks.
+
+  """
+  REQ_BGL = False
+
+  def CheckArguments(self):
+    self.nq = _NetworkQuery(qlang.MakeSimpleFilter("name", self.op.names),
+                            self.op.output_fields, False)
+
+  def ExpandNames(self):
+    self.nq.ExpandNames(self)
+
+  def Exec(self, feedback_fn):
+    return self.nq.OldStyleQuery(self)
+
+
+
+class LUNetworkConnect(LogicalUnit):
+  """Connect a network to a nodegroup
+
+  """
+  HPATH = "network-connect"
+  HTYPE = constants.HTYPE_NETWORK
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.network_name = self.op.network_name
+    self.group_name = self.op.group_name
+    self.network_mode = self.op.network_mode
+    self.network_link = self.op.network_link
+
+    self.network_uuid = self.cfg.LookupNetwork(self.network_name)
+    self.network = self.cfg.GetNetwork(self.network_uuid)
+    self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
+    self.group = self.cfg.GetNodeGroup(self.group_uuid)
+
+    self.needed_locks = {
+      locking.LEVEL_INSTANCE: [],
+      locking.LEVEL_NODEGROUP: [self.group_uuid],
+      }
+    self.share_locks[locking.LEVEL_INSTANCE] = 1
+
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_INSTANCE:
+      assert not self.needed_locks[locking.LEVEL_INSTANCE]
+
+      # Lock instances optimistically, needs verification once group lock has
+      # been acquired
+      self.needed_locks[locking.LEVEL_INSTANCE] = \
+          self.cfg.GetNodeGroupInstances(self.group_uuid)
+
+  def BuildHooksEnv(self):
+    ret = dict()
+    ret["GROUP_NAME"] = self.group_name
+    ret["GROUP_NETWORK_MODE"] = self.network_mode
+    ret["GROUP_NETWORK_LINK"] = self.network_link
+    ret.update(_BuildNetworkHookEnvByObject(self, self.network))
+    return ret
+
+  def BuildHooksNodes(self):
+    nodes = self.cfg.GetNodeGroup(self.group_uuid).members
+    return (nodes, nodes)
+
+
+  def CheckPrereq(self):
+    l = lambda value: ", ".join("%s: %s/%s" % (i[0], i[1], i[2])
+                                   for i in value)
+
+    if self.network is None:
+      raise errors.OpPrereqError("Network %s does not exist" %
+                                 self.network_name, errors.ECODE_INVAL)
+
+    self.netparams = dict()
+    self.netparams[constants.NIC_MODE] = self.network_mode
+    self.netparams[constants.NIC_LINK] = self.network_link
+    objects.NIC.CheckParameterSyntax(self.netparams)
+
+    #if self.network_mode == constants.NIC_MODE_BRIDGED:
+    #  _CheckNodeGroupBridgesExist(self, self.network_link, self.group_uuid)
+    self.connected = False
+    if self.network_uuid in self.group.networks:
+      self.LogWarning("Network '%s' is already mapped to group '%s'" %
+                      (self.network_name, self.group.name))
+      self.connected = True
+      return
+
+    pool = network.AddressPool(self.network)
+    if self.op.conflicts_check:
+      groupinstances = []
+      for n in self.cfg.GetNodeGroupInstances(self.group_uuid):
+        groupinstances.append(self.cfg.GetInstanceInfo(n))
+      instances = [(instance.name, idx, nic.ip)
+                   for instance in groupinstances
+                   for idx, nic in enumerate(instance.nics)
+                   if (not nic.network and pool._Contains(nic.ip))]
+      if instances:
+        self.LogWarning("Following occurences use IPs from network %s"
+                        " that is about to connect to nodegroup %s: %s" %
+                        (self.network_name, self.group.name,
+                        l(instances)))
+        raise errors.OpPrereqError("Conflicting IPs found."
+                                   " Please remove/modify"
+                                   " corresponding NICs",
+                                   errors.ECODE_INVAL)
+
+  def Exec(self, feedback_fn):
+    if self.connected:
+      return
+
+    self.group.networks[self.network_uuid] = self.netparams
+    self.cfg.Update(self.group, feedback_fn)
+
+
+class LUNetworkDisconnect(LogicalUnit):
+  """Disconnect a network to a nodegroup
+
+  """
+  HPATH = "network-disconnect"
+  HTYPE = constants.HTYPE_NETWORK
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.network_name = self.op.network_name
+    self.group_name = self.op.group_name
+
+    self.network_uuid = self.cfg.LookupNetwork(self.network_name)
+    self.network = self.cfg.GetNetwork(self.network_uuid)
+    self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
+    self.group = self.cfg.GetNodeGroup(self.group_uuid)
+
+    self.needed_locks = {
+      locking.LEVEL_INSTANCE: [],
+      locking.LEVEL_NODEGROUP: [self.group_uuid],
+      }
+    self.share_locks[locking.LEVEL_INSTANCE] = 1
+
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_INSTANCE:
+      assert not self.needed_locks[locking.LEVEL_INSTANCE]
+
+      # Lock instances optimistically, needs verification once group lock has
+      # been acquired
+      self.needed_locks[locking.LEVEL_INSTANCE] = \
+          self.cfg.GetNodeGroupInstances(self.group_uuid)
+
+  def BuildHooksEnv(self):
+    ret = dict()
+    ret["GROUP_NAME"] = self.group_name
+    ret.update(_BuildNetworkHookEnvByObject(self, self.network))
+    return ret
+
+  def BuildHooksNodes(self):
+    nodes = self.cfg.GetNodeGroup(self.group_uuid).members
+    return (nodes, nodes)
+
+
+  def CheckPrereq(self):
+    l = lambda value: ", ".join("%s: %s/%s" % (i[0], i[1], i[2])
+                                   for i in value)
+
+    self.connected = True
+    if self.network_uuid not in self.group.networks:
+      self.LogWarning("Network '%s' is"
+                         " not mapped to group '%s'" %
+                         (self.network_name, self.group.name))
+      self.connected = False
+      return
+
+    if self.op.conflicts_check:
+      groupinstances = []
+      for n in self.cfg.GetNodeGroupInstances(self.group_uuid):
+        groupinstances.append(self.cfg.GetInstanceInfo(n))
+      instances = [(instance.name, idx, nic.ip)
+                   for instance in groupinstances
+                   for idx, nic in enumerate(instance.nics)
+                   if nic.network == self.network_name]
+      if instances:
+        self.LogWarning("Following occurences use IPs from network %s"
+                           " that is about to disconnected from the nodegroup"
+                           " %s: %s" %
+                           (self.network_name, self.group.name,
+                            l(instances)))
+        raise errors.OpPrereqError("Conflicting IPs."
+                                   " Please remove/modify"
+                                   " corresponding NICS",
+                                   errors.ECODE_INVAL)
+
+  def Exec(self, feedback_fn):
+    if not self.connected:
+      return
+
+    del self.group.networks[self.network_uuid]
+    self.cfg.Update(self.group, feedback_fn)
+
 
 #: Query type implementations
 _QUERY_IMPL = {
@@ -15343,7 +16538,9 @@ _QUERY_IMPL = {
   constants.QR_INSTANCE: _InstanceQuery,
   constants.QR_NODE: _NodeQuery,
   constants.QR_GROUP: _GroupQuery,
+  constants.QR_NETWORK: _NetworkQuery,
   constants.QR_OS: _OsQuery,
+  constants.QR_EXTSTORAGE: _ExtStorageQuery,
   constants.QR_EXPORT: _ExportQuery,
   }
 
@@ -15361,3 +16558,20 @@ def _GetQueryImplementation(name):
   except KeyError:
     raise errors.OpPrereqError("Unknown query resource '%s'" % name,
                                errors.ECODE_INVAL)
+
+def _CheckForConflictingIp(lu, ip, node):
+  """In case of conflicting ip raise error.
+
+  @type ip: string
+  @param ip: ip address
+  @type node: string
+  @param node: node name
+
+  """
+  (conf_net, conf_netparams) = lu.cfg.CheckIPInNodeGroup(ip, node)
+  if conf_net is not None:
+    raise errors.OpPrereqError("Conflicting IP found:"
+                               " %s <> %s." % (ip, conf_net),
+                               errors.ECODE_INVAL)
+
+  return (None, None)
index c98caff..d7a6c8a 100644 (file)
@@ -39,6 +39,7 @@ import random
 import logging
 import time
 import itertools
+from functools import wraps
 
 from ganeti import errors
 from ganeti import locking
@@ -50,6 +51,7 @@ from ganeti import serializer
 from ganeti import uidpool
 from ganeti import netutils
 from ganeti import runtime
+from ganeti import network
 
 
 _config_lock = locking.SharedLock("ConfigWriter")
@@ -106,6 +108,13 @@ class TemporaryReservationManager:
       all_reserved.update(holder_reserved)
     return all_reserved
 
+  def GetECReserved(self, ec_id):
+    ec_reserved = set()
+    if ec_id in self._ec_reserved:
+      ec_reserved.update(self._ec_reserved[ec_id])
+    return ec_reserved
+
+
   def Generate(self, existing, generate_one_fn, ec_id):
     """Generate a new resource of this type
 
@@ -176,8 +185,10 @@ class ConfigWriter:
     self._temporary_macs = TemporaryReservationManager()
     self._temporary_secrets = TemporaryReservationManager()
     self._temporary_lvs = TemporaryReservationManager()
+    self._temporary_ips = TemporaryReservationManager()
     self._all_rms = [self._temporary_ids, self._temporary_macs,
-                     self._temporary_secrets, self._temporary_lvs]
+                     self._temporary_secrets, self._temporary_lvs,
+                     self._temporary_ips]
     # Note: in order to prevent errors when resolving our name in
     # _DistributeConfig, we compute it here once and reuse it; it's
     # better to raise an error before starting to modify the config
@@ -208,16 +219,30 @@ class ConfigWriter:
     """
     return os.path.exists(constants.CLUSTER_CONF_FILE)
 
-  def _GenerateOneMAC(self):
+  def _GenerateMACPrefix(self, net=None):
+    def _get_mac_prefix(view_func):
+      def _decorator(*args, **kwargs):
+        prefix = self._config_data.cluster.mac_prefix
+        if net:
+          net_uuid = self._UnlockedLookupNetwork(net)
+          if net_uuid:
+            nobj = self._UnlockedGetNetwork(net_uuid)
+            if nobj.mac_prefix:
+              prefix = nobj.mac_prefix
+        suffix = view_func(*args, **kwargs)
+        return prefix+':'+suffix
+      return wraps(view_func)(_decorator)
+    return _get_mac_prefix
+
+  def _GenerateMACSuffix(self):
     """Generate one mac address
 
     """
-    prefix = self._config_data.cluster.mac_prefix
     byte1 = random.randrange(0, 256)
     byte2 = random.randrange(0, 256)
     byte3 = random.randrange(0, 256)
-    mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
-    return mac
+    suffix = "%02x:%02x:%02x" % (byte1, byte2, byte3)
+    return suffix
 
   @locking.ssynchronized(_config_lock, shared=1)
   def GetNdParams(self, node):
@@ -266,14 +291,15 @@ class ConfigWriter:
     return self._config_data.cluster.SimpleFillDP(group.diskparams)
 
   @locking.ssynchronized(_config_lock, shared=1)
-  def GenerateMAC(self, ec_id):
+  def GenerateMAC(self, net, ec_id):
     """Generate a MAC for an instance.
 
     This should check the current instances for duplicates.
 
     """
     existing = self._AllMACs()
-    return self._temporary_ids.Generate(existing, self._GenerateOneMAC, ec_id)
+    gen_mac = self._GenerateMACPrefix(net)(self._GenerateMACSuffix)
+    return self._temporary_ids.Generate(existing, gen_mac, ec_id)
 
   @locking.ssynchronized(_config_lock, shared=1)
   def ReserveMAC(self, mac, ec_id):
@@ -289,6 +315,114 @@ class ConfigWriter:
     else:
       self._temporary_macs.Reserve(ec_id, mac)
 
+  def _UnlockedCommitTemporaryIps(self, ec_id):
+    """Commit all reserved IP address to their respective pools
+
+    """
+    for action, address, net_uuid in self._temporary_ips.GetECReserved(ec_id):
+      self._UnlockedCommitIp(action, net_uuid, address)
+
+  def _UnlockedCommitIp(self, action, net_uuid, address):
+    """Commit a reserved IP address to an IP pool.
+
+    The IP address is taken from the network's IP pool and marked as reserved.
+
+    """
+    nobj = self._UnlockedGetNetwork(net_uuid)
+    pool = network.AddressPool(nobj)
+    if action == 'reserve':
+      pool.Reserve(address)
+    elif action == 'release':
+      pool.Release(address)
+
+  def _UnlockedReleaseIp(self, net_uuid, address, ec_id):
+    """Give a specific IP address back to an IP pool.
+
+    The IP address is returned to the IP pool designated by pool_id and marked
+    as reserved.
+
+    """
+    nobj = self._UnlockedGetNetwork(net_uuid)
+    pool = network.AddressPool(nobj)
+    self._temporary_ips.Reserve(ec_id, ('release', address, net_uuid))
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def ReleaseIp(self, network, address, ec_id):
+    """Give a specified IP address back to an IP pool.
+
+    This is just a wrapper around _UnlockedReleaseIp.
+
+    """
+    net_uuid = self._UnlockedLookupNetwork(network)
+    if net_uuid:
+      self._UnlockedReleaseIp(net_uuid, address, ec_id)
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GenerateIp(self, net, ec_id):
+    """Find a free IPv4 address for an instance.
+
+    """
+    net_uuid = self._UnlockedLookupNetwork(net)
+    nobj = self._UnlockedGetNetwork(net_uuid)
+    pool = network.AddressPool(nobj)
+    gen_free = pool.GenerateFree()
+
+    def gen_one():
+      try:
+        ip = gen_free()
+      except StopIteration:
+        raise errors.ReservationError("Cannot generate IP. Network is full")
+      return ("reserve", ip, net_uuid)
+
+    _ ,address, _ = self._temporary_ips.Generate([], gen_one, ec_id)
+    return address
+
+  def _UnlockedReserveIp(self, net_uuid, address, ec_id):
+    """Reserve a given IPv4 address for use by an instance.
+
+    """
+    nobj = self._UnlockedGetNetwork(net_uuid)
+    pool = network.AddressPool(nobj)
+    try:
+      isreserved = pool.IsReserved(address)
+    except errors.AddressPoolError:
+      raise errors.ReservationError("IP address not in network")
+    if isreserved:
+      raise errors.ReservationError("IP address already in use")
+
+    return self._temporary_ips.Reserve(ec_id, ('reserve', address, net_uuid))
+
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def ReserveIp(self, net, address, ec_id):
+    """Reserve a given IPv4 address for use by an instance.
+
+    """
+    net_uuid = self._UnlockedLookupNetwork(net)
+    if net_uuid:
+      return self._UnlockedReserveIp(net_uuid, address, ec_id)
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetPCIInfo(self, instance_name, dev_type):
+
+    instance = self._UnlockedGetInstanceInfo(instance_name)
+    if not instance.hotplug_info:
+      return None, None 
+    idx = getattr(instance.hotplug_info, dev_type)
+    setattr(instance.hotplug_info, dev_type, idx+1)
+    pci = instance.hotplug_info.pci_pool.pop()
+    self._WriteConfig()
+
+    return idx, pci
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def UpdatePCIInfo(self, instance_name, pci_slot):
+
+    instance = self._UnlockedGetInstanceInfo(instance_name)
+    if instance.hotplug_info:
+      instance.hotplug_info.pci_pool.append(pci_slot)
+      self._WriteConfig()
+
   @locking.ssynchronized(_config_lock, shared=1)
   def ReserveLV(self, lv_name, ec_id):
     """Reserve an VG/LV pair for an instance.
@@ -680,7 +814,7 @@ class ConfigWriter:
         else:
           raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode)
 
-        _AddIpAddress("%s/%s" % (link, nic.ip),
+        _AddIpAddress("%s/%s/%s" % (link, nic.ip, nic.network),
                       "instance:%s/nic:%d" % (instance.name, idx))
 
     for ip, owners in ips.items():
@@ -1262,6 +1396,7 @@ class ConfigWriter:
     self._config_data.instances[instance.name] = instance
     self._config_data.cluster.serial_no += 1
     self._UnlockedReleaseDRBDMinors(instance.name)
+    self._UnlockedCommitTemporaryIps(ec_id)
     self._WriteConfig()
 
   def _EnsureUUID(self, item, ec_id):
@@ -1323,6 +1458,16 @@ class ConfigWriter:
     if network_port is not None:
       self._config_data.cluster.tcpudp_port_pool.add(network_port)
 
+    instance = self._UnlockedGetInstanceInfo(instance_name)
+
+    for nic in instance.nics:
+      if nic.network is not None and nic.ip is not None:
+        net_uuid = self._UnlockedLookupNetwork(nic.network)
+        if net_uuid:
+          # Return all IP addresses to the respective address pools
+          self._UnlockedCommitIp('release', net_uuid, nic.ip)
+
+
     del self._config_data.instances[instance_name]
     self._config_data.cluster.serial_no += 1
     self._WriteConfig()
@@ -2094,6 +2239,9 @@ class ConfigWriter:
     nodegroups = ["%s %s" % (nodegroup.uuid, nodegroup.name) for nodegroup in
                   self._config_data.nodegroups.values()]
     nodegroups_data = fn(utils.NiceSort(nodegroups))
+    networks = ["%s %s" % (net.uuid, net.name) for net in
+                self._config_data.networks.values()]
+    networks_data = fn(utils.NiceSort(networks))
 
     ssconf_values = {
       constants.SS_CLUSTER_NAME: cluster.cluster_name,
@@ -2118,6 +2266,7 @@ class ConfigWriter:
       constants.SS_MAINTAIN_NODE_HEALTH: str(cluster.maintain_node_health),
       constants.SS_UID_POOL: uid_pool,
       constants.SS_NODEGROUPS: nodegroups_data,
+      constants.SS_NETWORKS: networks_data,
       }
     bad_values = [(k, v) for k, v in ssconf_values.items()
                   if not isinstance(v, (str, basestring))]
@@ -2191,7 +2340,7 @@ class ConfigWriter:
     return self._config_data.HasAnyDiskOfType(dev_type)
 
   @locking.ssynchronized(_config_lock)
-  def Update(self, target, feedback_fn):
+  def Update(self, target, feedback_fn, ec_id=None):
     """Notify function to be called after updates.
 
     This function must be called when an object (as returned by
@@ -2219,6 +2368,8 @@ class ConfigWriter:
       test = target in self._config_data.instances.values()
     elif isinstance(target, objects.NodeGroup):
       test = target in self._config_data.nodegroups.values()
+    elif isinstance(target, objects.Network):
+      test = target in self._config_data.networks.values()
     else:
       raise errors.ProgrammerError("Invalid object type (%s) passed to"
                                    " ConfigWriter.Update" % type(target))
@@ -2236,6 +2387,10 @@ class ConfigWriter:
     if isinstance(target, objects.Instance):
       self._UnlockedReleaseDRBDMinors(target.name)
 
+    if ec_id is not None:
+      # Commit all ips reserved by OpInstanceSetParams and OpGroupSetParams
+      self._UnlockedCommitTemporaryIps(ec_id)
+
     self._WriteConfig(feedback_fn=feedback_fn)
 
   @locking.ssynchronized(_config_lock)
@@ -2245,3 +2400,196 @@ class ConfigWriter:
     """
     for rm in self._all_rms:
       rm.DropECReservations(ec_id)
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetAllNetworksInfo(self):
+    """Get the configuration of all networks
+
+    """
+    return dict(self._config_data.networks)
+
+  def _UnlockedGetNetworkList(self):
+    """Get the list of networks.
+
+    This function is for internal use, when the config lock is already held.
+
+    """
+    return self._config_data.networks.keys()
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetNetworkList(self):
+    """Get the list of networks.
+
+    @return: array of networks, ex. ["main", "vlan100", "200]
+
+    """
+    return self._UnlockedGetNetworkList()
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetNetworkNames(self):
+    """Get a list of network names
+
+    """
+    names = [network.name
+             for network in self._config_data.networks.values()]
+    return names
+
+  def _UnlockedGetNetwork(self, uuid):
+    """Returns information about a network.
+
+    This function is for internal use, when the config lock is already held.
+
+    """
+    if uuid not in self._config_data.networks:
+      return None
+
+    return self._config_data.networks[uuid]
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetNetwork(self, uuid):
+    """Returns information about a network.
+
+    It takes the information from the configuration file.
+
+    @param uuid: UUID of the network
+
+    @rtype: L{objects.Network}
+    @return: the network object
+
+    """
+    return self._UnlockedGetNetwork(uuid)
+
+  @locking.ssynchronized(_config_lock)
+  def AddNetwork(self, net, ec_id, check_uuid=True):
+    """Add a network to the configuration.
+
+    @type net: L{objects.Network}
+    @param net: the Network object to add
+    @type ec_id: string
+    @param ec_id: unique id for the job to use when creating a missing UUID
+
+    """
+    self._UnlockedAddNetwork(net, ec_id, check_uuid)
+    self._WriteConfig()
+
+  def _UnlockedAddNetwork(self, net, ec_id, check_uuid):
+    """Add a network to the configuration.
+
+    """
+    logging.info("Adding network %s to configuration", net.name)
+
+    if check_uuid:
+      self._EnsureUUID(net, ec_id)
+
+    existing_uuid = self._UnlockedLookupNetwork(net.name)
+    if existing_uuid:
+      raise errors.OpPrereqError("Desired network name '%s' already"
+                                 " exists as a network (UUID: %s)" %
+                                 (net.name, existing_uuid),
+                                 errors.ECODE_EXISTS)
+    net.serial_no = 1
+    self._config_data.networks[net.uuid] = net
+    self._config_data.cluster.serial_no += 1
+
+  def _UnlockedLookupNetwork(self, target):
+    """Lookup a network's UUID.
+
+    @type target: string
+    @param target: network name or UUID
+    @rtype: string
+    @return: network UUID
+    @raises errors.OpPrereqError: when the target network cannot be found
+
+    """
+    if target in self._config_data.networks:
+      return target
+    for net in self._config_data.networks.values():
+      if net.name == target:
+        return net.uuid
+    return None
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def LookupNetwork(self, target):
+    """Lookup a network's UUID.
+
+    This function is just a wrapper over L{_UnlockedLookupNetwork}.
+
+    @type target: string
+    @param target: network name or UUID
+    @rtype: string
+    @return: network UUID
+
+    """
+    return self._UnlockedLookupNetwork(target)
+
+  @locking.ssynchronized(_config_lock)
+  def RemoveNetwork(self, network_uuid):
+    """Remove a network from the configuration.
+
+    @type network_uuid: string
+    @param network_uuid: the UUID of the network to remove
+
+    """
+    logging.info("Removing network %s from configuration", network_uuid)
+
+    if network_uuid not in self._config_data.networks:
+      raise errors.ConfigurationError("Unknown network '%s'" % network_uuid)
+
+    del self._config_data.networks[network_uuid]
+    self._config_data.cluster.serial_no += 1
+    self._WriteConfig()
+
+  def _UnlockedGetGroupNetParams(self, net, node):
+    """Get the netparams (mode, link) of a network.
+
+    Get a network's netparams for a given node.
+
+    @type net: string
+    @param net: network name
+    @type node: string
+    @param node: node name
+    @rtype: dict or None
+    @return: netparams
+
+    """
+    net_uuid = self._UnlockedLookupNetwork(net)
+    if net_uuid is None:
+      return None
+
+    node_info = self._UnlockedGetNodeInfo(node)
+    nodegroup_info = self._UnlockedGetNodeGroup(node_info.group)
+    netparams = nodegroup_info.networks.get(net_uuid, None)
+
+    return netparams
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetGroupNetParams(self, net, node):
+    """Locking wrapper of _UnlockedGetGroupNetParams()
+
+    """
+    return self._UnlockedGetGroupNetParams(net, node)
+
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def CheckIPInNodeGroup(self, ip, node):
+    """Check for conflictig IP.
+
+    @type ip: string
+    @param ip: ip address
+    @type node: string
+    @param node: node name
+    @rtype: (string, dict) or (None, None)
+    @return: (network name, netparams)
+
+    """
+    if ip is None:
+      return (None, None)
+    node_info = self._UnlockedGetNodeInfo(node)
+    nodegroup_info = self._UnlockedGetNodeGroup(node_info.group)
+    for net_uuid in nodegroup_info.networks.keys():
+      net_info = self._UnlockedGetNetwork(net_uuid)
+      pool = network.AddressPool(net_info)
+      if pool._Contains(ip):
+        return (net_info.name, nodegroup_info.networks[net_uuid])
+
+    return (None, None)
index 02aa6f6..63caf1d 100644 (file)
@@ -240,6 +240,7 @@ DAEMONS_LOGFILES = {
   }
 
 LOG_OS_DIR = LOG_DIR + "os"
+LOG_ES_DIR = LOG_DIR + "extstorage"
 LOG_WATCHER = LOG_DIR + "watcher.log"
 LOG_COMMANDS = LOG_DIR + "commands.log"
 LOG_BURNIN = LOG_DIR + "burnin.log"
@@ -261,6 +262,7 @@ SYSLOG_ONLY = "only"
 SYSLOG_SOCKET = "/dev/log"
 
 OS_SEARCH_PATH = _autoconf.OS_SEARCH_PATH
+ES_SEARCH_PATH = _autoconf.ES_SEARCH_PATH
 EXPORT_DIR = _autoconf.EXPORT_DIR
 
 EXPORT_CONF_FILE = "config.ini"
@@ -369,6 +371,7 @@ HTYPE_CLUSTER = "CLUSTER"
 HTYPE_NODE = "NODE"
 HTYPE_GROUP = "GROUP"
 HTYPE_INSTANCE = "INSTANCE"
+HTYPE_NETWORK = "NETWORK"
 
 HKR_SKIP = 0
 HKR_FAIL = 1
@@ -427,19 +430,21 @@ DT_FILE = "file"
 DT_SHARED_FILE = "sharedfile"
 DT_BLOCK = "blockdev"
 DT_RBD = "rbd"
+DT_EXT = "ext"
 
 # the set of network-mirrored disk templates
 DTS_INT_MIRROR = frozenset([DT_DRBD8])
 
 # the set of externally-mirrored disk templates (e.g. SAN, NAS)
-DTS_EXT_MIRROR = frozenset([DT_SHARED_FILE, DT_BLOCK, DT_RBD])
+DTS_EXT_MIRROR = frozenset([DT_SHARED_FILE, DT_BLOCK, DT_RBD, DT_EXT])
 
 # the set of non-lvm-based disk templates
 DTS_NOT_LVM = frozenset([DT_DISKLESS, DT_FILE, DT_SHARED_FILE,
-                         DT_BLOCK, DT_RBD])
+                         DT_BLOCK, DT_RBD, DT_EXT])
 
 # the set of disk templates which can be grown
-DTS_GROWABLE = frozenset([DT_PLAIN, DT_DRBD8, DT_FILE, DT_SHARED_FILE, DT_RBD])
+DTS_GROWABLE = frozenset([DT_PLAIN, DT_DRBD8, DT_FILE, DT_SHARED_FILE,
+                          DT_RBD, DT_EXT])
 
 # the set of disk templates that allow adoption
 DTS_MAY_ADOPT = frozenset([DT_PLAIN, DT_BLOCK])
@@ -459,15 +464,17 @@ LD_DRBD8 = "drbd8"
 LD_FILE = "file"
 LD_BLOCKDEV = "blockdev"
 LD_RBD = "rbd"
+LD_EXT = "ext"
 LOGICAL_DISK_TYPES = frozenset([
   LD_LV,
   LD_DRBD8,
   LD_FILE,
   LD_BLOCKDEV,
   LD_RBD,
+  LD_EXT,
   ])
 
-LDS_BLOCK = frozenset([LD_LV, LD_DRBD8, LD_BLOCKDEV, LD_RBD])
+LDS_BLOCK = frozenset([LD_LV, LD_DRBD8, LD_BLOCKDEV, LD_RBD, LD_EXT])
 
 # drbd constants
 DRBD_HMAC_ALG = "md5"
@@ -563,7 +570,8 @@ DISK_TEMPLATES = frozenset([
   DT_FILE,
   DT_SHARED_FILE,
   DT_BLOCK,
-  DT_RBD
+  DT_RBD,
+  DT_EXT
   ])
 
 FILE_DRIVER = frozenset([FD_LOOP, FD_BLKTAP])
@@ -601,11 +609,13 @@ TAG_CLUSTER = "cluster"
 TAG_NODEGROUP = "nodegroup"
 TAG_NODE = "node"
 TAG_INSTANCE = "instance"
+TAG_NETWORK = "network"
 VALID_TAG_TYPES = frozenset([
   TAG_CLUSTER,
   TAG_NODEGROUP,
   TAG_NODE,
   TAG_INSTANCE,
+  TAG_NETWORK,
   ])
 MAX_TAG_LEN = 128
 MAX_TAGS_PER_OBJ = 4096
@@ -674,6 +684,31 @@ OS_PARAMETERS_FILE = "parameters.list"
 OS_VALIDATE_PARAMETERS = "parameters"
 OS_VALIDATE_CALLS = frozenset([OS_VALIDATE_PARAMETERS])
 
+# External Storage (ES) related constants
+ES_ACTION_CREATE = "create"
+ES_ACTION_REMOVE = "remove"
+ES_ACTION_GROW = "grow"
+ES_ACTION_ATTACH = "attach"
+ES_ACTION_DETACH = "detach"
+ES_ACTION_VERIFY = "verify"
+
+ES_SCRIPT_CREATE = ES_ACTION_CREATE
+ES_SCRIPT_REMOVE = ES_ACTION_REMOVE
+ES_SCRIPT_GROW = ES_ACTION_GROW
+ES_SCRIPT_ATTACH = ES_ACTION_ATTACH
+ES_SCRIPT_DETACH = ES_ACTION_DETACH
+ES_SCRIPT_VERIFY = ES_ACTION_VERIFY
+ES_SCRIPTS = frozenset([
+  ES_SCRIPT_CREATE,
+  ES_SCRIPT_REMOVE,
+  ES_SCRIPT_GROW,
+  ES_SCRIPT_ATTACH,
+  ES_SCRIPT_DETACH,
+  ES_SCRIPT_VERIFY
+  ])
+
+ES_PARAMETERS_FILE = "parameters.list"
+
 # ssh constants
 SSH_CONFIG_DIR = _autoconf.SSH_CONFIG_DIR
 SSH_HOST_DSA_PRIV = SSH_CONFIG_DIR + "/ssh_host_dsa_key"
@@ -1089,9 +1124,17 @@ NIC_LINK = "link"
 
 NIC_MODE_BRIDGED = "bridged"
 NIC_MODE_ROUTED = "routed"
+NIC_IP_POOL = "pool"
 
 NIC_VALID_MODES = frozenset([NIC_MODE_BRIDGED, NIC_MODE_ROUTED])
 
+# An extra description of the network.
+# Can be used by hooks/kvm-vif-bridge to apply different rules
+NETWORK_TYPE_PRIVATE = "private"
+NETWORK_TYPE_PUBLIC = "public"
+
+NETWORK_VALID_TYPES = frozenset([NETWORK_TYPE_PRIVATE, NETWORK_TYPE_PUBLIC])
+
 NICS_PARAMETER_TYPES = {
   NIC_MODE: VTYPE_STRING,
   NIC_LINK: VTYPE_STRING,
@@ -1105,12 +1148,14 @@ IDISK_MODE = "mode"
 IDISK_ADOPT = "adopt"
 IDISK_VG = "vg"
 IDISK_METAVG = "metavg"
+IDISK_PROVIDER = "provider"
 IDISK_PARAMS_TYPES = {
   IDISK_SIZE: VTYPE_SIZE,
   IDISK_MODE: VTYPE_STRING,
   IDISK_ADOPT: VTYPE_STRING,
   IDISK_VG: VTYPE_STRING,
   IDISK_METAVG: VTYPE_STRING,
+  IDISK_PROVIDER: VTYPE_STRING,
   }
 IDISK_PARAMS = frozenset(IDISK_PARAMS_TYPES.keys())
 
@@ -1119,11 +1164,13 @@ INIC_MAC = "mac"
 INIC_IP = "ip"
 INIC_MODE = "mode"
 INIC_LINK = "link"
+INIC_NETWORK = "network"
 INIC_PARAMS_TYPES = {
   INIC_IP: VTYPE_MAYBE_STRING,
   INIC_LINK: VTYPE_STRING,
   INIC_MAC: VTYPE_STRING,
   INIC_MODE: VTYPE_STRING,
+  INIC_NETWORK: VTYPE_MAYBE_STRING,
   }
 INIC_PARAMS = frozenset(INIC_PARAMS_TYPES.keys())
 
@@ -1624,6 +1671,8 @@ QR_GROUP = "group"
 QR_OS = "os"
 QR_JOB = "job"
 QR_EXPORT = "export"
+QR_NETWORK = "network"
+QR_EXTSTORAGE = "extstorage"
 
 #: List of resources which can be queried using L{opcodes.OpQuery}
 QR_VIA_OP = frozenset([
@@ -1633,6 +1682,8 @@ QR_VIA_OP = frozenset([
   QR_GROUP,
   QR_OS,
   QR_EXPORT,
+  QR_NETWORK,
+  QR_EXTSTORAGE,
   ])
 
 #: List of resources which can be queried using Local UniX Interface
@@ -1724,6 +1775,7 @@ SS_HYPERVISOR_LIST = "hypervisor_list"
 SS_MAINTAIN_NODE_HEALTH = "maintain_node_health"
 SS_UID_POOL = "uid_pool"
 SS_NODEGROUPS = "nodegroups"
+SS_NETWORKS = "networks"
 
 SS_FILE_PERMS = 0444
 
@@ -1871,6 +1923,8 @@ DISK_LD_DEFAULTS = {
   LD_RBD: {
     LDP_POOL: "rbd"
     },
+  LD_EXT: {
+    },
   }
 
 # readability shortcuts
@@ -1908,6 +1962,8 @@ DISK_DT_DEFAULTS = {
   DT_RBD: {
     RBD_POOL: DISK_LD_DEFAULTS[LD_RBD][LDP_POOL]
     },
+  DT_EXT: {
+    },
   }
 
 # we don't want to export the shortcuts
@@ -2067,6 +2123,7 @@ VALID_ALLOC_POLICIES = [
 
 # Temporary external/shared storage parameters
 BLOCKDEV_DRIVER_MANUAL = "manual"
+EXTSTORAGE_SAMPLE_PROVIDER = "rbd"
 
 # qemu-img path, required for ovfconverter
 QEMUIMG_PATH = _autoconf.QEMUIMG_PATH
index ca3e4d8..118fb87 100644 (file)
@@ -139,6 +139,12 @@ class ConfigVersionMismatch(ConfigurationError):
   pass
 
 
+class AddressPoolError(GenericError):
+  """Errors related to IP address pools.
+
+  """
+
+
 class ReservationError(GenericError):
   """Errors reserving a resource.
 
index adcc7bc..e047fd1 100644 (file)
@@ -37,6 +37,7 @@ import shutil
 import socket
 import stat
 import StringIO
+import fdsend
 try:
   import affinity   # pylint: disable=F0401
 except ImportError:
@@ -773,6 +774,34 @@ class KVMHypervisor(hv_base.BaseHypervisor):
     if nic.nicparams[constants.NIC_LINK]:
       env["LINK"] = nic.nicparams[constants.NIC_LINK]
 
+    def _BuildNetworkEnv(name, network, gateway, network6, gateway6,
+                         network_type, mac_prefix, tags, env):
+      if name:
+        env["NETWORK_NAME"] = name
+      if network:
+        env["NETWORK_SUBNET"] = network
+      if gateway:
+        env["NETWORK_GATEWAY"] = gateway
+      if network6:
+        env["NETWORK_SUBNET6"] = network6
+      if gateway6:
+        env["NETWORK_GATEWAY6"] = gateway6
+      if mac_prefix:
+        env["NETWORK_MAC_PREFIX"] = mac_prefix
+      if network_type:
+        env["NETWORK_TYPE"] = network_type
+      if tags:
+        env["NETWORK_TAGS"] = " ".join(tags)
+
+      return env
+
+
+    if nic.network:
+      n = objects.Network.FromDict(nic.netinfo)
+      _BuildNetworkEnv(nic.network, n.network, n.gateway,
+                       n.network6, n.gateway6, n.network_type,
+                       n.mac_prefix, n.tags, env)
+
     if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
       env["BRIDGE"] = nic.nicparams[constants.NIC_LINK]
 
@@ -942,6 +971,69 @@ class KVMHypervisor(hv_base.BaseHypervisor):
         data.append(info)
     return data
 
+  def _GenerateKVMBlockDevicesOptions(self, instance, kvm_cmd, block_devices):
+
+    hvp = instance.hvparams
+    boot_disk = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_DISK
+
+    _, v_major, v_min, _ = self._GetKVMVersion()
+
+    # whether this is an older KVM version that uses the boot=on flag
+    # on devices
+    needs_boot_flag = (v_major, v_min) < (0, 14)
+
+    disk_type = hvp[constants.HV_DISK_TYPE]
+    if disk_type == constants.HT_DISK_PARAVIRTUAL:
+      if_val = ",if=virtio"
+      if (v_major, v_min) >= (0, 12):
+        disk_model = "virtio-blk-pci"
+      else:
+        disk_model = "virtio"
+    else:
+      if_val = ",if=%s" % disk_type
+      disk_model = disk_type
+    # Cache mode
+    disk_cache = hvp[constants.HV_DISK_CACHE]
+    if instance.disk_template in constants.DTS_EXT_MIRROR:
+      if disk_cache != "none":
+        # TODO: make this a hard error, instead of a silent overwrite
+        logging.warning("KVM: overriding disk_cache setting '%s' with 'none'"
+                        " to prevent shared storage corruption on migration",
+                        disk_cache)
+      cache_val = ",cache=none"
+    elif disk_cache != constants.HT_CACHE_DEFAULT:
+      cache_val = ",cache=%s" % disk_cache
+    else:
+      cache_val = ""
+    for cfdev, dev_path in block_devices:
+      if cfdev.mode != constants.DISK_RDWR:
+        raise errors.HypervisorError("Instance has read-only disks which"
+                                     " are not supported by KVM")
+      # TODO: handle FD_LOOP and FD_BLKTAP (?)
+      boot_val = ""
+      if boot_disk:
+        kvm_cmd.extend(["-boot", "c"])
+        boot_disk = False
+        if needs_boot_flag and disk_type != constants.HT_DISK_IDE:
+          boot_val = ",boot=on"
+      drive_val = "file=%s,format=raw%s%s" % \
+                  (dev_path, boot_val, cache_val)
+      if cfdev.pci:
+        #TODO: name id after model
+        drive_val += (",bus=0,unit=%d,if=none,id=drive%d" %
+                      (cfdev.pci, cfdev.idx))
+      else:
+        drive_val += if_val
+
+      kvm_cmd.extend(["-drive", drive_val])
+
+      if cfdev.pci:
+        dev_val = ("%s,bus=pci.0,addr=%s,drive=drive%d,id=virtio-blk-pci.%d" %
+                   (disk_model, hex(cfdev.pci), cfdev.idx, cfdev.idx))
+        kvm_cmd.extend(["-device", dev_val])
+
+    return kvm_cmd
+
   def _GenerateKVMRuntime(self, instance, block_devices, startup_paused):
     """Generate KVM information to start an instance.
 
@@ -1000,38 +1092,9 @@ class KVMHypervisor(hv_base.BaseHypervisor):
     needs_boot_flag = (v_major, v_min) < (0, 14)
 
     disk_type = hvp[constants.HV_DISK_TYPE]
-    if disk_type == constants.HT_DISK_PARAVIRTUAL:
-      if_val = ",if=virtio"
-    else:
-      if_val = ",if=%s" % disk_type
-    # Cache mode
-    disk_cache = hvp[constants.HV_DISK_CACHE]
-    if instance.disk_template in constants.DTS_EXT_MIRROR:
-      if disk_cache != "none":
-        # TODO: make this a hard error, instead of a silent overwrite
-        logging.warning("KVM: overriding disk_cache setting '%s' with 'none'"
-                        " to prevent shared storage corruption on migration",
-                        disk_cache)
-      cache_val = ",cache=none"
-    elif disk_cache != constants.HT_CACHE_DEFAULT:
-      cache_val = ",cache=%s" % disk_cache
-    else:
-      cache_val = ""
-    for cfdev, dev_path in block_devices:
-      if cfdev.mode != constants.DISK_RDWR:
-        raise errors.HypervisorError("Instance has read-only disks which"
-                                     " are not supported by KVM")
-      # TODO: handle FD_LOOP and FD_BLKTAP (?)
-      boot_val = ""
-      if boot_disk:
-        kvm_cmd.extend(["-boot", "c"])
-        boot_disk = False
-        if needs_boot_flag and disk_type != constants.HT_DISK_IDE:
-          boot_val = ",boot=on"
-
-      drive_val = "file=%s,format=raw%s%s%s" % (dev_path, if_val, boot_val,
-                                                cache_val)
-      kvm_cmd.extend(["-drive", drive_val])
+    if not instance.hotplug_info:
+      kvm_cmd = self._GenerateKVMBlockDevicesOptions(instance, kvm_cmd,
+                                                     block_devices)
 
     #Now we can specify a different device type for CDROM devices.
     cdrom_disk_type = hvp[constants.HV_KVM_CDROM_DISK_TYPE]
@@ -1257,7 +1320,10 @@ class KVMHypervisor(hv_base.BaseHypervisor):
     kvm_nics = instance.nics
     hvparams = hvp
 
-    return (kvm_cmd, kvm_nics, hvparams)
+    if instance.hotplug_info:
+      return (kvm_cmd, kvm_nics, hvparams, block_devices)
+    else:
+      return (kvm_cmd, kvm_nics, hvparams)
 
   def _WriteKVMRuntime(self, instance_name, data):
     """Write an instance's KVM runtime
@@ -1283,9 +1349,21 @@ class KVMHypervisor(hv_base.BaseHypervisor):
     """Save an instance's KVM runtime
 
     """
-    kvm_cmd, kvm_nics, hvparams = kvm_runtime
+    if instance.hotplug_info:
+      kvm_cmd, kvm_nics, hvparams, block_devices = kvm_runtime
+      serialized_blockdevs = [(blk.ToDict(), link)
+                              for blk,link in block_devices]
+    else:
+      kvm_cmd, kvm_nics, hvparams = kvm_runtime
+
     serialized_nics = [nic.ToDict() for nic in kvm_nics]
-    serialized_form = serializer.Dump((kvm_cmd, serialized_nics, hvparams))
+
+    if instance.hotplug_info:
+      serialized_form = serializer.Dump((kvm_cmd, serialized_nics,
+                                        hvparams, serialized_blockdevs))
+    else:
+      serialized_form = serializer.Dump((kvm_cmd, serialized_nics, hvparams))
+
     self._WriteKVMRuntime(instance.name, serialized_form)
 
   def _LoadKVMRuntime(self, instance, serialized_runtime=None):
@@ -1295,9 +1373,19 @@ class KVMHypervisor(hv_base.BaseHypervisor):
     if not serialized_runtime:
       serialized_runtime = self._ReadKVMRuntime(instance.name)
     loaded_runtime = serializer.Load(serialized_runtime)
-    kvm_cmd, serialized_nics, hvparams = loaded_runtime
+    if instance.hotplug_info:
+      kvm_cmd, serialized_nics, hvparams, serialized_blockdevs = loaded_runtime
+      block_devices = [(objects.Disk.FromDict(sdisk), link)
+                       for sdisk, link in serialized_blockdevs]
+    else:
+      kvm_cmd, serialized_nics, hvparams = loaded_runtime
+
     kvm_nics = [objects.NIC.FromDict(snic) for snic in serialized_nics]
-    return (kvm_cmd, kvm_nics, hvparams)
+
+    if instance.hotplug_info:
+      return (kvm_cmd, kvm_nics, hvparams, block_devices)
+    else:
+      return (kvm_cmd, kvm_nics, hvparams)
 
   def _RunKVMCmd(self, name, kvm_cmd, tap_fds=None):
     """Run the KVM cmd and check for errors
@@ -1340,10 +1428,15 @@ class KVMHypervisor(hv_base.BaseHypervisor):
     conf_hvp = instance.hvparams
     name = instance.name
     self._CheckDown(name)
+    boot_disk = conf_hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_DISK
 
     temp_files = []
 
-    kvm_cmd, kvm_nics, up_hvp = kvm_runtime
+    if instance.hotplug_info:
+      kvm_cmd, kvm_nics, up_hvp, block_devices = kvm_runtime
+    else:
+      kvm_cmd, kvm_nics, up_hvp = kvm_runtime
+
     up_hvp = objects.FillDict(conf_hvp, up_hvp)
 
     _, v_major, v_min, _ = self._GetKVMVersion()
@@ -1364,6 +1457,10 @@ class KVMHypervisor(hv_base.BaseHypervisor):
       utils.WriteFile(keymap_path, data="include en-us\ninclude %s\n" % keymap)
       kvm_cmd.extend(["-k", keymap_path])
 
+    if instance.hotplug_info:
+      kvm_cmd = self._GenerateKVMBlockDevicesOptions(instance, kvm_cmd,
+                                                     block_devices)
+
     # We have reasons to believe changing something like the nic driver/type
     # upon migration won't exactly fly with the instance kernel, so for nic
     # related parameters we'll use up_hvp
@@ -1398,8 +1495,16 @@ class KVMHypervisor(hv_base.BaseHypervisor):
         tapfds.append(tapfd)
         taps.append(tapname)
         if (v_major, v_min) >= (0, 12):
-          nic_val = "%s,mac=%s,netdev=netdev%s" % (nic_model, nic.mac, nic_seq)
-          tap_val = "type=tap,id=netdev%s,fd=%d%s" % (nic_seq, tapfd, tap_extra)
+          if nic.pci:
+            nic_idx = nic.idx
+          else:
+            nic_idx = nic_seq
+          nic_val = ("%s,mac=%s,netdev=netdev%d" %
+                     (nic_model, nic.mac, nic_idx))
+          if nic.pci:
+            nic_val += (",bus=pci.0,addr=%s,id=virtio-net-pci.%d" %
+                        (hex(nic.pci), nic_idx))
+          tap_val = "type=tap,id=netdev%d,fd=%d%s" % (nic_idx, tapfd, tap_extra)
           kvm_cmd.extend(["-netdev", tap_val, "-device", nic_val])
         else:
           nic_val = "nic,vlan=%s,macaddr=%s,model=%s" % (nic_seq,
@@ -1550,6 +1655,167 @@ class KVMHypervisor(hv_base.BaseHypervisor):
 
     return result
 
+  def HotAddDisk(self, instance, disk, dev_path, seq):
+    """Hotadd new disk to the VM
+
+    """
+    if not self._InstancePidAlive(instance.name)[2]:
+      logging.info("Cannot hotplug. Instance %s not alive" % instance.name)
+      return disk.ToDict()
+
+    _, v_major, v_min, _ = self._GetKVMVersion()
+    if (v_major, v_min) >= (1, 0) and disk.pci:
+      idx = disk.idx
+      command = ("drive_add dummy file=%s,if=none,id=drive%d,format=raw" %
+                 (dev_path, idx))
+
+      logging.info("%s" % command)
+      output = self._CallMonitorCommand(instance.name, command)
+
+      command = ("device_add virtio-blk-pci,bus=pci.0,addr=%s,"
+                 "drive=drive%d,id=virtio-blk-pci.%d"
+                 % (hex(disk.pci), idx, idx))
+      logging.info("%s" % command)
+      output = self._CallMonitorCommand(instance.name, command)
+      for line in output.stdout.splitlines():
+        logging.info("%s" % line)
+
+      (kvm_cmd, kvm_nics,
+       hvparams, block_devices) = self._LoadKVMRuntime(instance)
+      block_devices.append((disk, dev_path))
+      new_kvm_runtime = (kvm_cmd, kvm_nics, hvparams, block_devices)
+      self._SaveKVMRuntime(instance, new_kvm_runtime)
+
+    return disk.ToDict()
+
+  def HotDelDisk(self, instance, disk, seq):
+    """Hotdel disk to the VM
+
+    """
+    if not self._InstancePidAlive(instance.name)[2]:
+      logging.info("Cannot hotplug. Instance %s not alive" % instance.name)
+      return disk.ToDict()
+
+    _, v_major, v_min, _ = self._GetKVMVersion()
+    if (v_major, v_min) >= (1, 0) and disk.pci:
+      idx = disk.idx
+
+      command = "device_del virtio-blk-pci.%d" % idx
+      logging.info("%s" % command)
+      output = self._CallMonitorCommand(instance.name, command)
+      for line in output.stdout.splitlines():
+        logging.info("%s" % line)
+
+      command = "drive_del drive%d" % idx
+      logging.info("%s" % command)
+      #output = self._CallMonitorCommand(instance.name, command)
+      #for line in output.stdout.splitlines():
+      #  logging.info("%s" % line)
+
+      (kvm_cmd, kvm_nics,
+       hvparams, block_devices) = self._LoadKVMRuntime(instance)
+      rem  = [(d, p) for d, p in block_devices
+                     if d.idx is not None and d.idx == idx]
+      try:
+        block_devices.remove(rem[0])
+      except (ValueError, IndexError):
+        logging.info("Disk with %d idx disappeared from runtime file", idx)
+      new_kvm_runtime = (kvm_cmd, kvm_nics, hvparams, block_devices)
+      self._SaveKVMRuntime(instance, new_kvm_runtime)
+
+    return disk.ToDict()
+
+  def HotAddNic(self, instance, nic, seq):
+    """Hotadd new nic to the VM
+
+    """
+    if not self._InstancePidAlive(instance.name)[2]:
+      logging.info("Cannot hotplug. Instance %s not alive" % instance.name)
+      return nic.ToDict()
+
+    _, v_major, v_min, _ = self._GetKVMVersion()
+    if (v_major, v_min) >= (1, 0) and nic.pci:
+      mac = nic.mac
+      idx = nic.idx
+
+      (tap, fd) = _OpenTap()
+      logging.info("%s %d", tap, fd)
+
+      self._PassTapFd(instance, fd, nic)
+
+      command = ("netdev_add tap,id=netdev%d,fd=netdev%d"
+                 % (idx, idx))
+      logging.info("%s" % command)
+      output = self._CallMonitorCommand(instance.name, command)
+      for line in output.stdout.splitlines():
+        logging.info("%s" % line)
+
+      command = ("device_add virtio-net-pci,bus=pci.0,addr=%s,mac=%s,"
+                 "netdev=netdev%d,id=virtio-net-pci.%d"
+                 % (hex(nic.pci), mac, idx, idx))
+      logging.info("%s" % command)
+      output = self._CallMonitorCommand(instance.name, command)
+      for line in output.stdout.splitlines():
+        logging.info("%s" % line)
+
+      self._ConfigureNIC(instance, seq, nic, tap)
+
+      (kvm_cmd, kvm_nics,
+       hvparams, block_devices) = self._LoadKVMRuntime(instance)
+      kvm_nics.append(nic)
+      new_kvm_runtime = (kvm_cmd, kvm_nics, hvparams, block_devices)
+      self._SaveKVMRuntime(instance, new_kvm_runtime)
+
+    return nic.ToDict()
+
+  def HotDelNic(self, instance, nic, seq):
+    """Hotadd new nic to the VM
+
+    """
+    if not self._InstancePidAlive(instance.name)[2]:
+      logging.info("Cannot hotplug. Instance %s not alive" % instance.name)
+      return nic.ToDict()
+
+    _, v_major, v_min, _ = self._GetKVMVersion()
+    if (v_major, v_min) >= (1, 0) and nic.pci:
+      mac = nic.mac
+      idx = nic.idx
+
+      command = "device_del virtio-net-pci.%d" % idx
+      logging.info("%s" % command)
+      output = self._CallMonitorCommand(instance.name, command)
+      for line in output.stdout.splitlines():
+        logging.info("%s" % line)
+
+      command = "netdev_del netdev%d" % idx
+      logging.info("%s" % command)
+      output = self._CallMonitorCommand(instance.name, command)
+      for line in output.stdout.splitlines():
+        logging.info("%s" % line)
+
+      (kvm_cmd, kvm_nics,
+       hvparams, block_devices) = self._LoadKVMRuntime(instance)
+      rem  = [n for n in kvm_nics if n.idx is not None and n.idx == nic.idx]
+      try:
+        kvm_nics.remove(rem[0])
+      except (ValueError, IndexError):
+        logging.info("NIC with %d idx disappeared from runtime file", nic.idx)
+      new_kvm_runtime = (kvm_cmd, kvm_nics, hvparams, block_devices)
+      self._SaveKVMRuntime(instance, new_kvm_runtime)
+
+    return nic.ToDict()
+
+  def _PassTapFd(self, instance, fd, nic):
+    monsock = utils.ShellQuote(self._InstanceMonitor(instance.name))
+    s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+    s.connect(monsock)
+    idx = nic.idx
+    command = "getfd netdev%d\n" % idx
+    fds = [fd]
+    logging.info("%s", fds)
+    fdsend.sendfds(s, command, fds = fds)
+    s.close()
+
   @classmethod
   def _ParseKVMVersion(cls, text):
     """Parse the KVM version from the --help output.
index ea044d5..8691585 100644 (file)
@@ -1465,6 +1465,7 @@ LEVEL_INSTANCE = 1
 LEVEL_NODEGROUP = 2
 LEVEL_NODE = 3
 LEVEL_NODE_RES = 4
+LEVEL_NETWORK = 5
 
 LEVELS = [
   LEVEL_CLUSTER,
@@ -1472,6 +1473,7 @@ LEVELS = [
   LEVEL_NODEGROUP,
   LEVEL_NODE,
   LEVEL_NODE_RES,
+  LEVEL_NETWORK,
   ]
 
 # Lock levels which are modifiable
@@ -1480,6 +1482,7 @@ LEVELS_MOD = frozenset([
   LEVEL_NODE,
   LEVEL_NODEGROUP,
   LEVEL_INSTANCE,
+  LEVEL_NETWORK,
   ])
 
 #: Lock level names (make sure to use singular form)
@@ -1489,6 +1492,7 @@ LEVEL_NAMES = {
   LEVEL_NODEGROUP: "nodegroup",
   LEVEL_NODE: "node",
   LEVEL_NODE_RES: "node-res",
+  LEVEL_NETWORK: "network",
   }
 
 # Constant for the big ganeti lock
@@ -1506,7 +1510,7 @@ class GanetiLockManager:
   """
   _instance = None
 
-  def __init__(self, nodes, nodegroups, instances):
+  def __init__(self, nodes, nodegroups, instances, networks):
     """Constructs a new GanetiLockManager object.
 
     There should be only a GanetiLockManager object at any time, so this
@@ -1531,8 +1535,8 @@ class GanetiLockManager:
       LEVEL_NODE: LockSet(nodes, "node", monitor=self._monitor),
       LEVEL_NODE_RES: LockSet(nodes, "node-res", monitor=self._monitor),
       LEVEL_NODEGROUP: LockSet(nodegroups, "nodegroup", monitor=self._monitor),
-      LEVEL_INSTANCE: LockSet(instances, "instance",
-                              monitor=self._monitor),
+      LEVEL_INSTANCE: LockSet(instances, "instance", monitor=self._monitor),
+      LEVEL_NETWORK: LockSet(networks, "network", monitor=self._monitor),
       }
 
     assert compat.all(ls.name == LEVEL_NAMES[level]
index 108b836..55066f5 100644 (file)
@@ -60,6 +60,7 @@ REQ_QUERY_JOBS = "QueryJobs"
 REQ_QUERY_INSTANCES = "QueryInstances"
 REQ_QUERY_NODES = "QueryNodes"
 REQ_QUERY_GROUPS = "QueryGroups"
+REQ_QUERY_NETWORKS = "QueryNetworks"
 REQ_QUERY_EXPORTS = "QueryExports"
 REQ_QUERY_CONFIG_VALUES = "QueryConfigValues"
 REQ_QUERY_CLUSTER_INFO = "QueryClusterInfo"
@@ -560,6 +561,9 @@ class Client(object):
   def QueryGroups(self, names, fields, use_locking):
     return self.CallMethod(REQ_QUERY_GROUPS, (names, fields, use_locking))
 
+  def QueryNetworks(self, names, fields, use_locking):
+    return self.CallMethod(REQ_QUERY_NETWORKS, (names, fields, use_locking))
+
   def QueryExports(self, nodes, use_locking):
     return self.CallMethod(REQ_QUERY_EXPORTS, (nodes, use_locking))
 
diff --git a/lib/network.py b/lib/network.py
new file mode 100644 (file)
index 0000000..c424223
--- /dev/null
@@ -0,0 +1,202 @@
+#
+#
+
+# Copyright (C) 2011 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+
+"""Ip address pool management functions.
+
+"""
+
+import ipaddr
+
+from bitarray import bitarray
+from base64 import b64encode
+from base64 import b64decode
+
+from ganeti import errors
+
+class AddressPool(object):
+  """Address pool class, wrapping an objects.Network object
+
+  This class provides methods to manipulate address pools, backed by
+  L{objects.Network} objects.
+
+  """
+  def __init__(self, network):
+    """Initialize a new IPv4 address pool from an objects.Network object
+
+    @type network: L{objects.Network}
+    @param network: the network object from which the pool will be generated
+
+    """
+    self.network = None
+    self.gateway = None
+    self.network6 = None
+    self.gateway6 = None
+
+    self.net = network
+
+    self.network = ipaddr.IPNetwork(self.net.network)
+    if self.net.gateway:
+      self.gateway = ipaddr.IPAddress(self.net.gateway)
+
+    if self.net.network6:
+      self.network6 = ipaddr.IPv6Network(self.net.network6)
+    if self.net.gateway6:
+      self.gateway6 = ipaddr.IPv6Address(self.net.gateway6)
+
+    if self.net.reservations:
+      self.reservations = bitarray()
+      self.reservations.fromstring(b64decode(self.net.reservations))
+    else:
+      self.reservations = bitarray(self.network.numhosts)
+      self.reservations.setall(False)
+
+    if self.net.ext_reservations:
+      self.ext_reservations = bitarray()
+      self.ext_reservations.fromstring(b64decode(self.net.ext_reservations))
+    else:
+      self.ext_reservations = bitarray(self.network.numhosts)
+      self.ext_reservations.setall(False)
+
+    assert self.network.numhosts >= 8
+    assert len(self.reservations) == self.network.numhosts
+    assert len(self.ext_reservations) == self.network.numhosts
+
+  def _Contains(self, address):
+    if address is None:
+      return False
+    addr = ipaddr.IPAddress(address)
+
+    return addr in self.network
+
+  def _GetAddrIndex(self, address):
+    addr = ipaddr.IPAddress(address)
+
+    if not addr in self.network:
+      raise errors.AddressPoolError("%s does not contain %s" %
+                                    (self.network, addr))
+
+    return int(addr) - int(self.network.network)
+
+  def _Update(self):
+    """Write address pools back to the network object"""
+    self.net.ext_reservations = b64encode(self.ext_reservations.tostring())
+    self.net.reservations = b64encode(self.reservations.tostring())
+
+  def _Mark(self, address, value=True, external=False):
+    idx = self._GetAddrIndex(address)
+    if external:
+      self.ext_reservations[idx] = value
+    else:
+      self.reservations[idx] = value
+    self._Update()
+
+  def _GetSize(self):
+    return 2**(32 - self.network.prefixlen)
+
+  @property
+  def all_reservations(self):
+    """Return a combined map of internal + external reservations."""
+    return (self.reservations | self.ext_reservations)
+
+  def Validate(self):
+    assert self.net.family == 4
+    assert len(self.reservations) == self._GetSize()
+    assert len(self.ext_reservations) == self._GetSize()
+    assert not (self.reservations & self.ext_reservations).any()
+
+    if self.gateway is not None:
+      assert self.net.family == self.gateway.version
+      assert self.gateway in self.network
+
+    if self.network6 and self.gateway6:
+      assert self.gateway6 in self.network6
+
+    return True
+
+  def IsFull(self):
+    """Check whether the network is full"""
+    return self.all_reservations.all()
+
+  def GetReservedCount(self):
+    """Get the count of reserved addresses"""
+    return self.all_reservations.count(True)
+
+  def GetFreeCount(self):
+    """Get the count of unused addresses"""
+    return self.all_reservations.count(False)
+
+  def GetMap(self):
+    """Return a textual representation of the network's occupation status."""
+    return self.all_reservations.to01().replace("1", "X").replace("0", ".")
+
+  def IsReserved(self, address):
+    """Checks if the given IP is reserved"""
+    idx = self._GetAddrIndex(address)
+    return self.all_reservations[idx]
+
+  def Reserve(self, address, external=False):
+    """Mark an address as used."""
+    if self.IsReserved(address):
+      raise errors.AddressPoolError("%s is already reserved" % address)
+    self._Mark(address, external=external)
+
+  def Release(self, address, external=False):
+    """Release a given address reservation."""
+    self._Mark(address, value=False, external=external)
+
+  def GetFreeAddress(self):
+    """Returns the first available address."""
+    if self.IsFull():
+      raise errors.AddressPoolError("%s is full" % self.network)
+
+    idx = self.all_reservations.index(False)
+    address = str(self.network[idx])
+    self.Reserve(address)
+    return address
+
+  def GenerateFree(self):
+    """A generator for free addresses."""
+    def _iter_free():
+      for idx in self.all_reservations.search("0", 64):
+        yield str(self.network[idx])
+
+    return _iter_free().next
+
+  def GetExternalReservations(self):
+    """Returns a list of all externally reserved addresses"""
+    idxs = self.ext_reservations.search("1")
+    return [str(self.network[idx]) for idx in idxs]
+
+  @classmethod
+  def InitializeNetwork(cls, net):
+    """Initialize an L{objects.Network} object
+
+    Reserve the network, broadcast and gateway IPs
+
+    """
+    obj = cls(net)
+    obj._Update()
+    for ip in [obj.network[0], obj.network[-1]]:
+      obj.Reserve(ip, external=True)
+    if obj.net.gateway is not None:
+      obj.Reserve(obj.net.gateway, external=True)
+    obj.Validate()
+    return obj
index d79e085..d9861ec 100644 (file)
@@ -50,7 +50,7 @@ from socket import AF_INET
 
 
 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
-           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
+           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
 
 _TIMESTAMPS = ["ctime", "mtime"]
 _UUID = ["uuid"]
@@ -439,6 +439,7 @@ class ConfigData(ConfigObject):
     "nodes",
     "nodegroups",
     "instances",
+    "networks",
     "serial_no",
     ] + _TIMESTAMPS
 
@@ -451,7 +452,7 @@ class ConfigData(ConfigObject):
     """
     mydict = super(ConfigData, self).ToDict()
     mydict["cluster"] = mydict["cluster"].ToDict()
-    for key in "nodes", "instances", "nodegroups":
+    for key in "nodes", "instances", "nodegroups", "networks":
       mydict[key] = self._ContainerToDicts(mydict[key])
 
     return mydict
@@ -466,6 +467,7 @@ class ConfigData(ConfigObject):
     obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
     obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
     obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
+    obj.networks = cls._ContainerFromDicts(obj.networks, dict, Network)
     return obj
 
   def HasAnyDiskOfType(self, dev_type):
@@ -502,11 +504,16 @@ class ConfigData(ConfigObject):
       # gives a good approximation.
       if self.HasAnyDiskOfType(constants.LD_DRBD8):
         self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
+    if self.networks is None:
+      self.networks = {}
+
+class HotplugInfo(ConfigObject):
+  __slots__ = ["nics", "disks", "pci_pool"]
 
 
 class NIC(ConfigObject):
   """Config object representing a network card."""
-  __slots__ = ["mac", "ip", "nicparams"]
+  __slots__ = ["idx", "pci", "mac", "ip", "network", "nicparams", "netinfo"]
 
   @classmethod
   def CheckParameterSyntax(cls, nicparams):
@@ -530,7 +537,7 @@ class NIC(ConfigObject):
 
 class Disk(ConfigObject):
   """Config object representing a block device."""
-  __slots__ = ["dev_type", "logical_id", "physical_id",
+  __slots__ = ["idx", "pci", "dev_type", "logical_id", "physical_id",
                "children", "iv_name", "size", "mode", "params"]
 
   def CreateOnSecondary(self):
@@ -605,7 +612,8 @@ class Disk(ConfigObject):
 
     """
     if self.dev_type in [constants.LD_LV, constants.LD_FILE,
-                         constants.LD_BLOCKDEV, constants.LD_RBD]:
+                         constants.LD_BLOCKDEV, constants.LD_RBD,
+                         constants.LD_EXT]:
       result = [node]
     elif self.dev_type in constants.LDS_DRBD:
       result = [self.logical_id[0], self.logical_id[1]]
@@ -681,7 +689,7 @@ class Disk(ConfigObject):
 
     """
     if self.dev_type in (constants.LD_LV, constants.LD_FILE,
-                         constants.LD_RBD):
+                         constants.LD_RBD, constants.LD_EXT):
       self.size += amount
     elif self.dev_type == constants.LD_DRBD8:
       if self.children:
@@ -934,6 +942,9 @@ class Disk(ConfigObject):
                  params)
       result.append(params)
 
+    elif disk_template == constants.DT_EXT:
+      result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT])
+
     return result
 
 
@@ -1033,6 +1044,7 @@ class Instance(TaggableObject):
     "admin_state",
     "nics",
     "disks",
+    "hotplug_info",
     "disk_template",
     "network_port",
     "serial_no",
@@ -1163,6 +1175,8 @@ class Instance(TaggableObject):
       else:
         nlist = []
       bo[attr] = nlist
+    if self.hotplug_info:
+      bo['hotplug_info'] = self.hotplug_info.ToDict()
     return bo
 
   @classmethod
@@ -1180,6 +1194,8 @@ class Instance(TaggableObject):
     obj = super(Instance, cls).FromDict(val)
     obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
     obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
+    if "hotplug_info" in val:
+      obj.hotplug_info = HotplugInfo.FromDict(val["hotplug_info"])
     return obj
 
   def UpgradeConfig(self):
@@ -1261,6 +1277,23 @@ class OS(ConfigObject):
     return cls.SplitNameVariant(name)[1]
 
 
+class ExtStorage(ConfigObject):
+  """Config object representing an External Storage Provider.
+
+  """
+  __slots__ = [
+    "name",
+    "path",
+    "create_script",
+    "remove_script",
+    "grow_script",
+    "attach_script",
+    "detach_script",
+    "verify_script",
+    "supported_parameters",
+    ]
+
+
 class NodeHvState(ConfigObject):
   """Hypvervisor state on a node.
 
@@ -1389,6 +1422,7 @@ class NodeGroup(TaggableObject):
     "hv_state_static",
     "disk_state_static",
     "alloc_policy",
+    "networks",
     ] + _TIMESTAMPS + _UUID
 
   def ToDict(self):
@@ -1436,6 +1470,9 @@ class NodeGroup(TaggableObject):
     if self.ipolicy is None:
       self.ipolicy = MakeEmptyIPolicy()
 
+    if self.networks is None:
+      self.networks = {}
+
   def FillND(self, node):
     """Return filled out ndparams for L{objects.Node}
 
@@ -2020,6 +2057,26 @@ class InstanceConsole(ConfigObject):
     return True
 
 
+class Network(TaggableObject):
+  """Object representing a network definition for ganeti.
+
+  """
+  __slots__ = [
+    "name",
+    "serial_no",
+    "network_type",
+    "mac_prefix",
+    "family",
+    "network",
+    "network6",
+    "gateway",
+    "gateway6",
+    "size",
+    "reservations",
+    "ext_reservations",
+    ] + _TIMESTAMPS + _UUID
+
+
 class SerializableConfigParser(ConfigParser.SafeConfigParser):
   """Simple wrapper over ConfigParse that allows serialization.
 
index 3243e0e..641179e 100644 (file)
@@ -35,6 +35,7 @@ opcodes.
 
 import logging
 import re
+import ipaddr
 
 from ganeti import constants
 from ganeti import errors
@@ -162,6 +163,9 @@ _PIgnoreIpolicy = ("ignore_ipolicy", False, ht.TBool,
 _PAllowRuntimeChgs = ("allow_runtime_changes", True, ht.TBool,
                       "Allow runtime changes (eg. memory ballooning)")
 
+#: a required network name
+_PNetworkName = ("network_name", ht.NoDefault, ht.TNonEmptyString,
+                 "Set network name")
 
 #: OP_ID conversion regular expression
 _OPID_RE = re.compile("([a-z])([A-Z])")
@@ -196,6 +200,12 @@ _TDiskParams = \
   ht.Comment("Disk parameters")(ht.TDictOf(ht.TElemOf(constants.IDISK_PARAMS),
                                            ht.TOr(ht.TNonEmptyString, ht.TInt)))
 
+#: Same as _TDiskParams but with NonEmptyString in the place of IDISK_PARAMS
+_TExtDiskParams = \
+  ht.Comment("ExtStorage Disk parameters")(ht.TDictOf(ht.TNonEmptyString,
+                                                      ht.TOr(ht.TNonEmptyString,
+                                                             ht.TInt)))
+
 _TQueryRow = \
   ht.TListOf(ht.TAnd(ht.TIsLength(2),
                      ht.TItems([ht.TElemOf(constants.RS_ALL),
@@ -337,6 +347,51 @@ def _CheckStorageType(storage_type):
 _PStorageType = ("storage_type", ht.NoDefault, _CheckStorageType,
                  "Storage type")
 
+_CheckNetworkType = ht.TElemOf(constants.NETWORK_VALID_TYPES)
+
+#: Network type parameter
+_PNetworkType = ("network_type", None, ht.TOr(ht.TNone, _CheckNetworkType),
+                 "Network type")
+
+def _CheckCIDRNetNotation(value):
+  """Ensure a given cidr notation type is valid.
+
+  """
+  try:
+    ipaddr.IPv4Network(value)
+  except ipaddr.AddressValueError:
+    return False
+  return True
+
+def _CheckCIDRAddrNotation(value):
+  """Ensure a given cidr notation type is valid.
+
+  """
+  try:
+    ipaddr.IPv4Address(value)
+  except ipaddr.AddressValueError:
+    return False
+  return True
+
+def _CheckCIDR6AddrNotation(value):
+  """Ensure a given cidr notation type is valid.
+
+  """
+  try:
+    ipaddr.IPv6Address(value)
+  except ipaddr.AddressValueError:
+    return False
+  return True
+
+def _CheckCIDR6NetNotation(value):
+  """Ensure a given cidr notation type is valid.
+
+  """
+  try:
+    ipaddr.IPv6Network(value)
+  except ipaddr.AddressValueError:
+    return False
+  return True
 
 class _AutoOpParamSlots(type):
   """Meta class for opcode definitions.
@@ -1195,6 +1250,7 @@ class OpInstanceCreate(OpCode):
     ("identify_defaults", False, ht.TBool,
      "Reset instance parameters to default if equal"),
     ("ip_check", True, ht.TBool, _PIpCheckDoc),
+    ("conflicts_check", True, ht.TBool, "Check for conflicting IPs"),
     ("mode", ht.NoDefault, ht.TElemOf(constants.INSTANCE_CREATE_MODES),
      "Instance creation mode"),
     ("nics", ht.NoDefault, ht.TListOf(_TestNicDef),
@@ -1222,9 +1278,60 @@ class OpInstanceCreate(OpCode):
     ("src_path", None, ht.TMaybeString, "Source directory for import"),
     ("start", True, ht.TBool, "Whether to start instance after creation"),
     ("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Instance tags"),
+    ("hotplug", None, ht.TMaybeBool, "Whether to hotplug devices"),
     ]
   OP_RESULT = ht.Comment("instance nodes")(ht.TListOf(ht.TNonEmptyString))
 
+  def Validate(self, set_defaults):
+    """Validate opcode parameters, optionally setting default values.
+
+    @type set_defaults: bool
+    @param set_defaults: Whether to set default values
+    @raise errors.OpPrereqError: When a parameter value doesn't match
+                                 requirements
+
+    """
+    # Check if the template is DT_EXT
+    is_ext = False
+    for (attr_name, _, _, _) in self.GetAllParams():
+      if hasattr(self, attr_name):
+        if attr_name == "disk_template" and \
+           getattr(self, attr_name) == constants.DT_EXT:
+          is_ext = True
+
+    for (attr_name, default, test, _) in self.GetAllParams():
+      assert test == ht.NoType or callable(test)
+
+      if not hasattr(self, attr_name):
+        if default == ht.NoDefault:
+          raise errors.OpPrereqError("Required parameter '%s.%s' missing" %
+                                     (self.OP_ID, attr_name),
+                                     errors.ECODE_INVAL)
+        elif set_defaults:
+          if callable(default):
+            dval = default()
+          else:
+            dval = default
+          setattr(self, attr_name, dval)
+
+      # If the template is DT_EXT and attr_name = disks
+      # set a new test method that allows passing of unknown parameters
+      if is_ext and attr_name == "disks":
+        test = ht.TListOf(_TExtDiskParams)
+
+      if test == ht.NoType:
+        # no tests here
+        continue
+
+      if set_defaults or hasattr(self, attr_name):
+        attr_val = getattr(self, attr_name)
+        if not test(attr_val):
+          logging.error("OpCode %s, parameter %s, has invalid type %s/value %s",
+                        self.OP_ID, attr_name, type(attr_val), attr_val)
+          raise errors.OpPrereqError("Parameter '%s.%s' fails validation" %
+                                     (self.OP_ID, attr_name),
+                                     errors.ECODE_INVAL)
+
 
 class OpInstanceReinstall(OpCode):
   """Reinstall an instance's OS."""
@@ -1492,6 +1599,7 @@ class OpInstanceSetParams(OpCode):
   """
   TestNicModifications = _TestInstSetParamsModList(_TestNicDef)
   TestDiskModifications = _TestInstSetParamsModList(_TDiskParams)
+  TestExtDiskModifications = _TestInstSetParamsModList(_TExtDiskParams)
 
   OP_DSC_FIELD = "instance_name"
   OP_PARAMS = [
@@ -1525,9 +1633,62 @@ class OpInstanceSetParams(OpCode):
     ("wait_for_sync", True, ht.TBool,
      "Whether to wait for the disk to synchronize, when changing template"),
     ("offline", None, ht.TMaybeBool, "Whether to mark instance as offline"),
+    ("conflicts_check", True, ht.TBool, "Check for conflicting IPs"),
+    ("hotplug", None, ht.TMaybeBool, "Whether to hotplug devices"),
+    ("allow_arbit_params", None, ht.TMaybeBool,
+     "Whether to allow the passing of arbitrary parameters to --disk(s)"),
     ]
   OP_RESULT = _TSetParamsResult
 
+  def Validate(self, set_defaults):
+    """Validate opcode parameters, optionally setting default values.
+
+    @type set_defaults: bool
+    @param set_defaults: Whether to set default values
+    @raise errors.OpPrereqError: When a parameter value doesn't match
+                                 requirements
+
+    """
+    # Check if the template is DT_EXT
+    allow_arbitrary_params = False
+    for (attr_name, _, _, _) in self.GetAllParams():
+      if hasattr(self, attr_name):
+        if attr_name == "allow_arbit_params" and \
+          getattr(self, attr_name) == True:
+          allow_arbitrary_params = True
+
+    for (attr_name, default, test, _) in self.GetAllParams():
+      assert test == ht.NoType or callable(test)
+
+      if not hasattr(self, attr_name):
+        if default == ht.NoDefault:
+          raise errors.OpPrereqError("Required parameter '%s.%s' missing" %
+                                     (self.OP_ID, attr_name),
+                                     errors.ECODE_INVAL)
+        elif set_defaults:
+          if callable(default):
+            dval = default()
+          else:
+            dval = default
+          setattr(self, attr_name, dval)
+
+      # If `allow_arbit_params' is set, use the ExtStorage's test method for disks
+      if allow_arbitrary_params and attr_name == "disks":
+        test = OpInstanceSetParams.TestExtDiskModifications
+
+      if test == ht.NoType:
+        # no tests here
+        continue
+
+      if set_defaults or hasattr(self, attr_name):
+        attr_val = getattr(self, attr_name)
+        if not test(attr_val):
+          logging.error("OpCode %s, parameter %s, has invalid type %s/value %s",
+                        self.OP_ID, attr_name, type(attr_val), attr_val)
+          raise errors.OpPrereqError("Parameter '%s.%s' fails validation" %
+                                     (self.OP_ID, attr_name),
+                                     errors.ECODE_INVAL)
+
 
 class OpInstanceGrowDisk(OpCode):
   """Grow a disk of an instance."""
@@ -1654,6 +1815,16 @@ class OpOsDiagnose(OpCode):
   OP_RESULT = _TOldQueryResult
 
 
+# ExtStorage opcodes
+class OpExtStorageDiagnose(OpCode):
+  """Compute the list of external storage providers."""
+  OP_PARAMS = [
+    _POutputFields,
+    ("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
+     "Which ExtStorage Provider to diagnose"),
+    ]
+
+
 # Exports opcodes
 class OpBackupQuery(OpCode):
   """Compute the list of exported images."""
@@ -1882,6 +2053,89 @@ class OpTestDummy(OpCode):
   WITH_LU = False
 
 
+# Network opcodes
+# Add a new network in the cluster
+class OpNetworkAdd(OpCode):
+  """Add an IP network to the cluster."""
+  OP_DSC_FIELD = "network_name"
+  OP_PARAMS = [
+    _PNetworkName,
+    _PNetworkType,
+    ("network", None, ht.TAnd(ht.TString ,_CheckCIDRNetNotation), None),
+    ("gateway", None, ht.TOr(ht.TNone, _CheckCIDRAddrNotation), None),
+    ("network6", None, ht.TOr(ht.TNone, _CheckCIDR6NetNotation), None),
+    ("gateway6", None, ht.TOr(ht.TNone, _CheckCIDR6AddrNotation), None),
+    ("mac_prefix", None, ht.TMaybeString, None),
+    ("add_reserved_ips", None,
+     ht.TOr(ht.TNone, ht.TListOf(_CheckCIDRAddrNotation)), None),
+    ("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Network tags"),
+    ]
+
+class OpNetworkRemove(OpCode):
+  """Remove an existing network from the cluster.
+     Must not be connected to any nodegroup.
+
+  """
+  OP_DSC_FIELD = "network_name"
+  OP_PARAMS = [
+    _PNetworkName,
+    _PForce,
+    ]
+
+class OpNetworkSetParams(OpCode):
+  """Modify Network's parameters except for IPv4 subnet"""
+  OP_DSC_FIELD = "network_name"
+  OP_PARAMS = [
+    _PNetworkName,
+    _PNetworkType,
+    ("gateway", None, ht.TOr(ht.TNone, _CheckCIDRAddrNotation), None),
+    ("network6", None, ht.TOr(ht.TNone, _CheckCIDR6NetNotation), None),
+    ("gateway6", None, ht.TOr(ht.TNone, _CheckCIDR6AddrNotation), None),
+    ("mac_prefix", None, ht.TMaybeString, None),
+    ("add_reserved_ips", None,
+     ht.TOr(ht.TNone, ht.TListOf(_CheckCIDRAddrNotation)), None),
+    ("remove_reserved_ips", None,
+     ht.TOr(ht.TNone, ht.TListOf(_CheckCIDRAddrNotation)), None),
+    ]
+
+class OpNetworkConnect(OpCode):
+  """Connect a Network to a specific Nodegroup with the defined netparams
+     (mode, link). Nics in this Network will inherit those params.
+     Produce errors if a NIC (that its not already assigned to a network)
+     has an IP that is contained in the Network this will produce error unless
+     --no-conflicts-check is passed.
+
+  """
+  OP_DSC_FIELD = "network_name"
+  OP_PARAMS = [
+    _PGroupName,
+    _PNetworkName,
+    ("network_mode", None, ht.TString, None),
+    ("network_link", None, ht.TString, None),
+    ("conflicts_check", True, ht.TBool, "Check for conflicting IPs"),
+    ]
+
+class OpNetworkDisconnect(OpCode):
+  """Disconnect a Network from a Nodegroup. Produce errors if NICs are
+     present in the Network unless --no-conficts-check option is passed.
+
+  """
+  OP_DSC_FIELD = "network_name"
+  OP_PARAMS = [
+    _PGroupName,
+    _PNetworkName,
+    ("conflicts_check", True, ht.TBool, "Check for conflicting IPs"),
+    ]
+
+class OpNetworkQuery(OpCode):
+  """Compute the list of networks."""
+  OP_PARAMS = [
+    _POutputFields,
+    ("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
+     "Empty list to query all groups, group names otherwise"),
+    ]
+
+
 def _GetOpList():
   """Returns list of all defined opcodes.
 
index 6ce13e1..586298e 100644 (file)
@@ -511,7 +511,7 @@ class OVFReader(object):
                        (OVF_SCHEMA, OVF_SCHEMA))
     network_names = self._GetAttributes(networks_search,
       "{%s}name" % OVF_SCHEMA)
-    required = ["ip", "mac", "link", "mode"]
+    required = ["ip", "mac", "link", "mode", "network"]
     for (counter, network_name) in enumerate(network_names):
       network_search = ("{%s}VirtualSystem/{%s}VirtualHardwareSection/{%s}Item"
                         % (OVF_SCHEMA, OVF_SCHEMA, OVF_SCHEMA))
@@ -533,6 +533,8 @@ class OVFReader(object):
                                                          GANETI_SCHEMA)
         ganeti_data["link"] = network_ganeti_data.findtext("{%s}Link" %
                                                            GANETI_SCHEMA)
+        ganeti_data["network"] = network_ganeti_data.findtext("{%s}Network" %
+                                                              GANETI_SCHEMA)
       mac_data = None
       if network_data:
         mac_data = network_data.findtext("{%s}Address" % RASD_SCHEMA)
@@ -753,6 +755,7 @@ class OVFWriter(object):
       SubElementText(nic, "gnt:MACAddress", network["mac"])
       SubElementText(nic, "gnt:IPAddress", network["ip"])
       SubElementText(nic, "gnt:Link", network["link"])
+      SubElementText(nic, "gnt:Network", network["network"])
 
   def SaveVirtualSystemData(self, name, vcpus, memory):
     """Convert virtual system information to OVF sections.
@@ -1313,6 +1316,8 @@ class OVFImporter(Converter):
       results["nic%s_mac" % nic_id] = nic_desc.get("mac", constants.VALUE_AUTO)
       results["nic%s_link" % nic_id] = \
         nic_desc.get("link", constants.VALUE_AUTO)
+      results["nic%s_network" % nic_id] = \
+        nic_desc.get("network", constants.VALUE_AUTO)
       if nic_desc.get("mode") == "bridged":
         results["nic%s_ip" % nic_id] = constants.VALUE_NONE
       else:
@@ -1634,7 +1639,7 @@ class OVFExporter(Converter):
     counter = 0
     while True:
       data_link = \
-        self.config_parser.get(constants.INISECT_INS, "nic%s_link" % counter)
+        self.config_parser.get(constants.INISECT_INS, "nic%s_network" % counter)
       if data_link is None:
         break
       results.append({
@@ -1644,7 +1649,9 @@ class OVFExporter(Converter):
            "nic%s_mac" % counter),
         "ip": self.config_parser.get(constants.INISECT_INS,
            "nic%s_ip" % counter),
-        "link": data_link,
+        "link": self.config_parser.get(constants.INISECT_INS,
+           "nic%s_link" % counter),
+        "network": data_link,
       })
       if results[counter]["mode"] not in constants.NIC_VALID_MODES:
         raise errors.OpPrereqError("Network mode %s not recognized"
index a8f19f0..3b9d315 100644 (file)
@@ -70,6 +70,10 @@ from ganeti.constants import (QFT_UNKNOWN, QFT_TEXT, QFT_BOOL, QFT_NUMBER,
                               RS_NORMAL, RS_UNKNOWN, RS_NODATA,
                               RS_UNAVAIL, RS_OFFLINE)
 
+(NETQ_CONFIG,
+ NETQ_GROUP,
+ NETQ_STATS,
+ NETQ_INST) = range(300, 304)
 
 # Constants for requesting data from the caller/data provider. Each property
 # collected/computed separately by the data provider should have its own to
@@ -1514,6 +1518,20 @@ def _GetInstNic(index, cb):
   return fn
 
 
+def _GetInstNicNetwork(ctx, _, nic): # pylint: disable=W0613
+  """Get a NIC's Network.
+
+  @type ctx: L{InstanceQueryData}
+  @type nic: L{objects.NIC}
+  @param nic: NIC object
+
+  """
+  if nic.network is None:
+    return _FS_UNAVAIL
+  else:
+    return nic.network
+
+
 def _GetInstNicIp(ctx, _, nic): # pylint: disable=W0613
   """Get a NIC's IP address.
 
@@ -1625,6 +1643,9 @@ def _GetInstanceNetworkFields():
     (_MakeField("nic.bridges", "NIC_bridges", QFT_OTHER,
                 "List containing each network interface's bridge"),
      IQ_CONFIG, 0, _GetInstAllNicBridges),
+    (_MakeField("nic.networks", "NIC_networks", QFT_OTHER,
+                "List containing each interface's network"), IQ_CONFIG, 0,
+     lambda ctx, inst: [nic.network for nic in inst.nics]),
     ]
 
   # NICs by number
@@ -1646,6 +1667,9 @@ def _GetInstanceNetworkFields():
       (_MakeField("nic.bridge/%s" % i, "NicBridge/%s" % i, QFT_TEXT,
                   "Bridge of %s network interface" % numtext),
        IQ_CONFIG, 0, _GetInstNic(i, _GetInstNicBridge)),
+      (_MakeField("nic.network/%s" % i, "NicNetwork/%s" % i, QFT_TEXT,
+                  "Network of %s network interface" % numtext),
+       IQ_CONFIG, 0, _GetInstNic(i, _GetInstNicNetwork)),
       ])
 
   aliases = [
@@ -1655,6 +1679,7 @@ def _GetInstanceNetworkFields():
     ("bridge", "nic.bridge/0"),
     ("nic_mode", "nic.mode/0"),
     ("nic_link", "nic.link/0"),
+    ("nic_network", "nic.network/0"),
     ]
 
   return (fields, aliases)
@@ -2180,6 +2205,36 @@ def _BuildOsFields():
   return _PrepareFieldList(fields, [])
 
 
+class ExtStorageInfo(objects.ConfigObject):
+  __slots__ = [
+    "name",
+    "node_status",
+    "nodegroup_status",
+    "parameters",
+    ]
+
+
+def _BuildExtStorageFields():
+  """Builds list of fields for extstorage provider queries.
+
+  """
+  fields = [
+    (_MakeField("name", "Name", QFT_TEXT, "ExtStorage provider name"),
+     None, 0, _GetItemAttr("name")),
+    (_MakeField("node_status", "NodeStatus", QFT_OTHER,
+                "Status from node"),
+     None, 0, _GetItemAttr("node_status")),
+    (_MakeField("nodegroup_status", "NodegroupStatus", QFT_OTHER,
+                "Overall Nodegroup status"),
+     None, 0, _GetItemAttr("nodegroup_status")),
+    (_MakeField("parameters", "Parameters", QFT_OTHER,
+                "ExtStorage provider parameters"),
+     None, 0, _GetItemAttr("parameters")),
+    ]
+
+  return _PrepareFieldList(fields, [])
+
+
 def _JobUnavailInner(fn, ctx, (job_id, job)): # pylint: disable=W0613
   """Return L{_FS_UNAVAIL} if job is None.
 
@@ -2401,6 +2456,137 @@ def _BuildClusterFields():
     ])
 
 
+class NetworkQueryData:
+  """Data container for network data queries.
+
+  """
+  def __init__(self, networks, network_to_groups,
+               network_to_instances, stats):
+    """Initializes this class.
+
+    @param networks: List of network objects
+    @type network_to_groups: dict; network UUID as key
+    @param network_to_groups: Per-network list of groups
+    @type network_to_instances: dict; network UUID as key
+    @param network_to_instances: Per-network list of instances
+    @type stats: dict; network UUID as key
+    @param stats: Per-network usage statistics
+
+    """
+    self.networks = networks
+    self.network_to_groups = network_to_groups
+    self.network_to_instances = network_to_instances
+    self.stats = stats
+
+  def __iter__(self):
+    """Iterate over all networks.
+
+    """
+    for net in self.networks:
+      if self.stats:
+        self.curstats = self.stats.get(net.uuid, None)
+      else:
+        self.curstats = None
+      yield net
+
+
+_NETWORK_SIMPLE_FIELDS = {
+  "name": ("Network", QFT_TEXT, 0, "The network"),
+  "network": ("Subnet", QFT_TEXT, 0, "The subnet"),
+  "gateway": ("Gateway", QFT_OTHER, 0, "The gateway"),
+  "network6": ("IPv6Subnet", QFT_OTHER, 0, "The ipv6 subnet"),
+  "gateway6": ("IPv6Gateway", QFT_OTHER, 0, "The ipv6 gateway"),
+  "mac_prefix": ("MacPrefix", QFT_OTHER, 0, "The mac prefix"),
+  "network_type": ("NetworkType", QFT_OTHER, 0, "The network type"),
+  }
+
+
+_NETWORK_STATS_FIELDS = {
+  "free_count": ("FreeCount", QFT_NUMBER, 0, "How many addresses are free"),
+  "reserved_count": ("ReservedCount", QFT_NUMBER, 0, "How many addresses are reserved"),
+  "map": ("Map", QFT_TEXT, 0, "The actual mapping"),
+  "external_reservations": ("ExternalReservations", QFT_TEXT, 0, "The external reservations"),
+  }
+
+
+def _GetNetworkStatsField(field, kind, ctx, net):
+  """Gets the value of a "stats" field from L{NetworkQueryData}.
+
+  @param field: Field name
+  @param kind: Data kind, one of L{constants.QFT_ALL}
+  @type ctx: L{NetworkQueryData}
+
+  """
+
+  try:
+    value = ctx.curstats[field]
+  except KeyError:
+    return _FS_UNAVAIL
+
+  if kind == QFT_TEXT:
+    return value
+
+  assert kind in (QFT_NUMBER, QFT_UNIT)
+
+  # Try to convert into number
+  try:
+    return int(value)
+  except (ValueError, TypeError):
+    logging.exception("Failed to convert network field '%s' (value %r) to int",
+                      value, field)
+    return _FS_UNAVAIL
+
+
+def _BuildNetworkFields():
+  """Builds list of fields for network queries.
+
+  """
+  fields = [
+    (_MakeField("tags", "Tags", QFT_OTHER, "Tags"), IQ_CONFIG, 0,
+     lambda ctx, inst: list(inst.GetTags())),
+    ]
+
+  # Add simple fields
+  fields.extend([
+    (_MakeField(name, title, kind, doc),
+     NETQ_CONFIG, 0, _GetItemAttr(name))
+     for (name, (title, kind, flags, doc)) in _NETWORK_SIMPLE_FIELDS.items()
+    ])
+
+  def _GetLength(getter):
+    return lambda ctx, network: len(getter(ctx)[network.uuid])
+
+  def _GetSortedList(getter):
+    return lambda ctx, network: utils.NiceSort(getter(ctx)[network.uuid])
+
+  network_to_groups = operator.attrgetter("network_to_groups")
+  network_to_instances = operator.attrgetter("network_to_instances")
+
+  # Add fields for node groups
+  fields.extend([
+    (_MakeField("group_cnt", "NodeGroups", QFT_NUMBER, "Number of nodegroups"),
+     NETQ_GROUP, 0, _GetLength(network_to_groups)),
+       (_MakeField("group_list", "GroupList", QFT_OTHER, "List of nodegroups"),
+     NETQ_GROUP, 0, _GetSortedList(network_to_groups)),
+    ])
+
+  # Add fields for instances
+  fields.extend([
+    (_MakeField("inst_cnt", "Instances", QFT_NUMBER, "Number of instances"),
+     NETQ_INST, 0, _GetLength(network_to_instances)),
+    (_MakeField("inst_list", "InstanceList", QFT_OTHER, "List of instances"),
+     NETQ_INST, 0, _GetSortedList(network_to_instances)),
+    ])
+
+  # Add fields for usage statistics
+  fields.extend([
+    (_MakeField(name, title, kind, doc), NETQ_STATS, 0,
+    compat.partial(_GetNetworkStatsField, name, kind))
+    for (name, (title, kind, flags, doc)) in _NETWORK_STATS_FIELDS.items()
+    ])
+
+  return _PrepareFieldList(fields, [])
+
 #: Fields for cluster information
 CLUSTER_FIELDS = _BuildClusterFields()
 
@@ -2419,12 +2605,18 @@ GROUP_FIELDS = _BuildGroupFields()
 #: Fields available for operating system queries
 OS_FIELDS = _BuildOsFields()
 
+#: Fields available for extstorage provider queries
+EXTSTORAGE_FIELDS = _BuildExtStorageFields()
+
 #: Fields available for job queries
 JOB_FIELDS = _BuildJobFields()
 
 #: Fields available for exports
 EXPORT_FIELDS = _BuildExportFields()
 
+#: Fields available for network queries
+NETWORK_FIELDS = _BuildNetworkFields()
+
 #: All available resources
 ALL_FIELDS = {
   constants.QR_CLUSTER: CLUSTER_FIELDS,
@@ -2433,8 +2625,10 @@ ALL_FIELDS = {
   constants.QR_LOCK: LOCK_FIELDS,
   constants.QR_GROUP: GROUP_FIELDS,
   constants.QR_OS: OS_FIELDS,
+  constants.QR_EXTSTORAGE: EXTSTORAGE_FIELDS,
   constants.QR_JOB: JOB_FIELDS,
   constants.QR_EXPORT: EXPORT_FIELDS,
+  constants.QR_NETWORK: NETWORK_FIELDS,
   }
 
 #: All available field lists
index b5a4df5..1b79a42 100644 (file)
@@ -1625,6 +1625,124 @@ class GanetiRapiClient(object): # pylint: disable=R0904
                              ("/%s/nodes/%s/tags" %
                               (GANETI_RAPI_VERSION, node)), query, None)
 
+  def GetNetworks(self, bulk=False):
+    """Gets all networks in the cluster.
+
+    @type bulk: bool
+    @param bulk: whether to return all information about the networks
+
+    @rtype: list of dict or str
+    @return: if bulk is true, a list of dictionaries with info about all
+        networks in the cluster, else a list of names of those networks
+
+    """
+    query = []
+    _AppendIf(query, bulk, ("bulk", 1))
+
+    networks = self._SendRequest(HTTP_GET, "/%s/networks" % GANETI_RAPI_VERSION,
+                               query, None)
+    if bulk:
+      return networks
+    else:
+      return [n["name"] for n in networks]
+
+  def GetNetwork(self, network):
+    """Gets information about a network.
+
+    @type group: str
+    @param group: name of the network whose info to return
+
+    @rtype: dict
+    @return: info about the network
+
+    """
+    return self._SendRequest(HTTP_GET,
+                             "/%s/networks/%s" % (GANETI_RAPI_VERSION, network),
+                             None, None)
+
+  def CreateNetwork(self, network_name, network, gateway=None, network6=None,
+                    gateway6=None, mac_prefix=None, network_type=None,
+                    add_reserved_ips=None, tags=None, dry_run=False):
+    """Creates a new network.
+
+    @type name: str
+    @param name: the name of network to create
+    @type dry_run: bool
+    @param dry_run: whether to peform a dry run
+
+    @rtype: string
+    @return: job id
+
+    """
+    query = []
+    _AppendDryRunIf(query, dry_run)
+
+    if add_reserved_ips:
+      add_reserved_ips = add_reserved_ips.split(',')
+
+    if tags:
+      tags = tags.split(',')
+
+    body = {
+      "network_name": network_name,
+      "gateway": gateway,
+      "network": network,
+      "gateway6": gateway6,
+      "network6": network6,
+      "mac_prefix": mac_prefix,
+      "network_type": network_type,
+      "add_reserved_ips": add_reserved_ips,
+      "tags": tags,
+      }
+
+    return self._SendRequest(HTTP_POST, "/%s/networks" % GANETI_RAPI_VERSION,
+                             query, body)
+
+  def ConnectNetwork(self, network_name, group_name, mode, link):
+    """Connects a Network to a NodeGroup with the given netparams
+
+    """
+    body = {
+      "group_name": group_name,
+      "network_mode": mode,
+      "network_link": link
+      }
+
+    return self._SendRequest(HTTP_PUT,
+                             ("/%s/networks/%s/connect" %
+                             (GANETI_RAPI_VERSION, network_name)), None, body)
+
+  def DisconnectNetwork(self, network_name, group_name):
+    """Connects a Network to a NodeGroup with the given netparams
+
+    """
+    body = {
+      "group_name": group_name
+      }
+    return self._SendRequest(HTTP_PUT,
+                             ("/%s/networks/%s/disconnect" %
+                             (GANETI_RAPI_VERSION, network_name)), None, body)
+
+
+  def DeleteNetwork(self, network, dry_run=False):
+    """Deletes a network.
+
+    @type group: str
+    @param group: the network to delete
+    @type dry_run: bool
+    @param dry_run: whether to peform a dry run
+
+    @rtype: string
+    @return: job id
+
+    """
+    query = []
+    _AppendDryRunIf(query, dry_run)
+
+    return self._SendRequest(HTTP_DELETE,
+                             ("/%s/networks/%s" %
+                              (GANETI_RAPI_VERSION, network)), query, None)
+
   def GetGroups(self, bulk=False):
     """Gets all node groups in the cluster.
 
index b701d71..a833fcf 100644 (file)
@@ -89,7 +89,8 @@ class Mapper:
 
 
 def GetHandlers(node_name_pattern, instance_name_pattern,
-                group_name_pattern, job_id_pattern, disk_pattern,
+                group_name_pattern, network_name_pattern,
+                job_id_pattern, disk_pattern,
                 query_res_pattern):
   """Returns all supported resources and their handlers.
 
@@ -167,6 +168,14 @@ def GetHandlers(node_name_pattern, instance_name_pattern,
     re.compile(r"^/2/instances/(%s)/console$" % instance_name_pattern):
       rlib2.R_2_instances_name_console,
 
+    "/2/networks": rlib2.R_2_networks,
+    re.compile(r"^/2/networks/(%s)$" % network_name_pattern):
+      rlib2.R_2_networks_name,
+    re.compile(r"^/2/networks/(%s)/connect$" % network_name_pattern):
+      rlib2.R_2_networks_name_connect,
+    re.compile(r"^/2/networks/(%s)/disconnect$" % network_name_pattern):
+      rlib2.R_2_networks_name_disconnect,
+
     "/2/groups": rlib2.R_2_groups,
     re.compile(r"^/2/groups/(%s)$" % group_name_pattern):
       rlib2.R_2_groups_name,
@@ -197,6 +206,7 @@ def GetHandlers(node_name_pattern, instance_name_pattern,
     }
 
 
-CONNECTOR.update(GetHandlers(_NAME_PATTERN, _NAME_PATTERN, _NAME_PATTERN,
+CONNECTOR.update(GetHandlers(_NAME_PATTERN, _NAME_PATTERN,
+                             _NAME_PATTERN, _NAME_PATTERN,
                              constants.JOB_ID_TEMPLATE, _DISK_PATTERN,
                              _NAME_PATTERN))
index e434340..d425b95 100644 (file)
@@ -70,7 +70,8 @@ _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
 I_FIELDS = ["name", "admin_state", "os",
             "pnode", "snodes",
             "disk_template",
-            "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
+            "nic.ips", "nic.macs", "nic.modes",
+            "nic.links", "nic.networks", "nic.bridges",
             "network_port",
             "disk.sizes", "disk_usage",
             "beparams", "hvparams",
@@ -90,6 +91,14 @@ N_FIELDS = ["name", "offline", "master_candidate", "drained",
             "group.uuid",
             ] + _COMMON_FIELDS
 
+NET_FIELDS = ["name", "network", "gateway",
+              "network6", "gateway6",
+              "mac_prefix", "network_type",
+              "free_count", "reserved_count",
+              "map", "group_list", "inst_list",
+              "external_reservations", "tags",
+             ] 
+
 G_FIELDS = [
   "alloc_policy",
   "name",
@@ -642,6 +651,101 @@ class R_2_nodes_name_storage_repair(baserlib.OpcodeResource):
       })
 
 
+class R_2_networks(baserlib.OpcodeResource):
+  """/2/networks resource.
+
+  """
+  GET_OPCODE = opcodes.OpNetworkQuery
+  POST_OPCODE = opcodes.OpNetworkAdd
+  POST_RENAME = {
+    "name": "network_name",
+    }
+
+  def GetPostOpInput(self):
+    """Create a network.
+
+    """
+    assert not self.items
+    return (self.request_body, {
+      "dry_run": self.dryRun(),
+      })
+
+  def GET(self):
+    """Returns a list of all networks.
+
+    """
+    client = self.GetClient()
+
+    if self.useBulk():
+      bulkdata = client.QueryNetworks([], NET_FIELDS, False)
+      return baserlib.MapBulkFields(bulkdata, NET_FIELDS)
+    else:
+      data = client.QueryNetworks([], ["name"], False)
+      networknames = [row[0] for row in data]
+      return baserlib.BuildUriList(networknames, "/2/networks/%s",
+                                   uri_fields=("name", "uri"))
+
+
+class R_2_networks_name(baserlib.OpcodeResource):
+  """/2/network/[network_name] resource.
+
+  """
+  DELETE_OPCODE = opcodes.OpNetworkRemove
+
+  def GET(self):
+    """Send information about a network.
+
+    """
+    network_name = self.items[0]
+    client = self.GetClient()
+
+    result = baserlib.HandleItemQueryErrors(client.QueryNetworks,
+                                            names=[network_name],
+                                            fields=NET_FIELDS,
+                                            use_locking=self.useLocking())
+
+    return baserlib.MapFields(NET_FIELDS, result[0])
+
+  def GetDeleteOpInput(self):
+    """Delete a network.
+
+    """
+    assert len(self.items) == 1
+    return (self.request_body, {
+      "network_name": self.items[0],
+      "dry_run": self.dryRun(),
+      })
+
+class R_2_networks_name_connect(baserlib.OpcodeResource):
+  """/2/network/[network_name]/connect.
+
+  """
+  PUT_OPCODE = opcodes.OpNetworkConnect
+
+  def GetPutOpInput(self):
+    """Changes some parameters of node group.
+
+    """
+    assert self.items
+    return (self.request_body, {
+      "network_name": self.items[0],
+      })
+
+class R_2_networks_name_disconnect(baserlib.OpcodeResource):
+  """/2/network/[network_name]/disconnect.
+
+  """
+  PUT_OPCODE = opcodes.OpNetworkDisconnect
+
+  def GetPutOpInput(self):
+    """Changes some parameters of node group.
+
+    """
+    assert self.items
+    return (self.request_body, {
+      "network_name": self.items[0],
+      })
+
 class R_2_groups(baserlib.OpcodeResource):
   """/2/groups resource.
 
@@ -655,6 +759,7 @@ class R_2_groups(baserlib.OpcodeResource):
   def GetPostOpInput(self):
     """Create a node group.
 
+
     """
     assert not self.items
     return (self.request_body, {
index b06c3c7..aad6905 100644 (file)
@@ -35,6 +35,7 @@ import zlib
 import base64
 import pycurl
 import threading
+import copy
 
 from ganeti import utils
 from ganeti import objects
@@ -663,6 +664,7 @@ class RpcRunner(_RpcClientBase,
       rpc_defs.ED_INST_DICT: self._InstDict,
       rpc_defs.ED_INST_DICT_HVP_BEP_DP: self._InstDictHvpBepDp,
       rpc_defs.ED_INST_DICT_OSP_DP: self._InstDictOspDp,
+      rpc_defs.ED_NIC_DICT: self._NicDict,
 
       # Encoders annotating disk parameters
       rpc_defs.ED_DISKS_DICT_DP: self._DisksDictDP,
@@ -688,6 +690,18 @@ class RpcRunner(_RpcClientBase,
     _generated_rpc.RpcClientDnsOnly.__init__(self)
     _generated_rpc.RpcClientDefault.__init__(self)
 
+  def _NicDict(self, nic):
+    """Convert the given nic to a dict and encapsulate netinfo
+
+    """
+    n = copy.deepcopy(nic)
+    if n.network:
+      net_uuid = self._cfg.LookupNetwork(n.network)
+      if net_uuid:
+        nobj = self._cfg.GetNetwork(net_uuid)
+        n.netinfo = objects.Network.ToDict(nobj)
+    return n.ToDict()
+
   def _InstDict(self, instance, hvp=None, bep=None, osp=None):
     """Convert the given instance to a dict.
 
@@ -722,6 +736,12 @@ class RpcRunner(_RpcClientBase,
       nic["nicparams"] = objects.FillDict(
         cluster.nicparams[constants.PP_DEFAULT],
         nic["nicparams"])
+      network = nic.get("network", None)
+      if network:
+        net_uuid = self._cfg.LookupNetwork(network)
+        if net_uuid:
+          nobj = self._cfg.GetNetwork(net_uuid)
+          nic["netinfo"] = objects.Network.ToDict(nobj)
     idict["disks"] = self._DisksDictDP((instance.disks, instance))
     return idict
 
index 233fce5..a85911a 100644 (file)
@@ -73,7 +73,8 @@ ACCEPT_OFFLINE_NODE = object()
  ED_COMPRESS,
  ED_BLOCKDEV_RENAME,
  ED_DISKS_DICT_DP,
- ED_SINGLE_DISK_DICT_DP) = range(1, 14)
+ ED_SINGLE_DISK_DICT_DP,
+ ED_NIC_DICT) = range(1, 15)
 
 
 def _Prepare(calls):
@@ -274,6 +275,27 @@ _INSTANCE_CALLS = [
     ("reinstall", None, None),
     ("debug", None, None),
     ], None, None, "Starts an instance"),
+  ("hot_add_nic", SINGLE, None, TMO_NORMAL, [
+    ("instance", ED_INST_DICT, "Instance object"),
+    ("nic", ED_NIC_DICT, "Nic dict to hotplug"),
+    ("seq", None, "Nic seq to hotplug"),
+    ], None, None, "Adds a nic to a running instance"),
+  ("hot_del_nic", SINGLE, None, TMO_NORMAL, [
+    ("instance", ED_INST_DICT, "Instance object"),
+    ("nic", ED_NIC_DICT, "nic dict to remove"),
+    ("seq", None, "Nic seq to hotplug"),
+    ], None, None, "Removes a nic to a running instance"),
+  ("hot_add_disk", SINGLE, None, TMO_NORMAL, [
+    ("instance", ED_INST_DICT, "Instance object"),
+    ("disk", ED_OBJECT_DICT, "Disk dict to hotplug"),
+    ("dev_path", None, "Device path"),
+    ("seq", None, "Disk seq to hotplug"),
+    ], None, None, "Adds a nic to a running instance"),
+  ("hot_del_disk", SINGLE, None, TMO_NORMAL, [
+    ("instance", ED_INST_DICT, "Instance object"),
+    ("disk", ED_OBJECT_DICT, "Disk dict to remove"),
+    ("seq", None, "Disk seq to hotplug"),
+    ], None, None, "Removes a nic to a running instance"),
   ]
 
 _IMPEXP_CALLS = [
@@ -435,6 +457,11 @@ _OS_CALLS = [
     ], None, _OsGetPostProc, "Returns an OS definition"),
   ]
 
+_EXTSTORAGE_CALLS = [
+  ("extstorage_diagnose", MULTI, None, TMO_FAST, [], None, None,
+   "Request a diagnose of ExtStorage Providers"),
+  ]
+
 _NODE_CALLS = [
   ("node_has_ip_address", SINGLE, None, TMO_FAST, [
     ("address", None, "IP address"),
@@ -503,7 +530,7 @@ CALLS = {
   "RpcClientDefault": \
     _Prepare(_IMPEXP_CALLS + _X509_CALLS + _OS_CALLS + _NODE_CALLS +
              _FILE_STORAGE_CALLS + _MISC_CALLS + _INSTANCE_CALLS +
-             _BLOCKDEV_CALLS + _STORAGE_CALLS),
+             _BLOCKDEV_CALLS + _STORAGE_CALLS + _EXTSTORAGE_CALLS),
   "RpcClientJobQueue": _Prepare([
     ("jobqueue_update", MULTI, None, TMO_URGENT, [
       ("file_name", None, None),
index 71ddb90..aacacc0 100644 (file)
@@ -372,6 +372,15 @@ class ClientOps:
       op = opcodes.OpGroupQuery(names=names, output_fields=fields)
       return self._Query(op)
 
+    elif method == luxi.REQ_QUERY_NETWORKS:
+      (names, fields, use_locking) = args
+      logging.info("Received network query request for %s", names)
+      if use_locking:
+        raise errors.OpPrereqError("Sync queries are not allowed",
+                                   errors.ECODE_INVAL)
+      op = opcodes.OpNetworkQuery(names=names, output_fields=fields)
+      return self._Query(op)
+
     elif method == luxi.REQ_QUERY_EXPORTS:
       (nodes, use_locking) = args
       if use_locking:
@@ -462,7 +471,8 @@ class GanetiContext(object):
     self.glm = locking.GanetiLockManager(
                 self.cfg.GetNodeList(),
                 self.cfg.GetNodeGroupList(),
-                self.cfg.GetInstanceList())
+                self.cfg.GetInstanceList(),
+                self.cfg.GetNetworkList())
 
     self.cfg.SetContext(self)
 
index d95680a..cc807a5 100644 (file)
@@ -559,6 +559,50 @@ class NodeRequestHandler(http.server.HttpServerHandler):
     return backend.StartInstance(instance, startup_paused)
 
   @staticmethod
+  def perspective_hot_add_disk(params):
+    """Hotplugs a nic to a running instance.
+
+    """
+    (idict, ddict, dev_path, seq) = params
+    logging.info("%s %s", idict, ddict)
+    instance = objects.Instance.FromDict(idict)
+    disk = objects.Disk.FromDict(ddict)
+    return backend.HotAddDisk(instance, disk, dev_path, seq)
+
+  @staticmethod
+  def perspective_hot_del_disk(params):
+    """Hotplugs a nic to a running instance.
+
+    """
+    (idict, ddict, seq) = params
+    logging.info("%s %s", idict, ddict)
+    instance = objects.Instance.FromDict(idict)
+    disk = objects.Disk.FromDict(ddict)
+    return backend.HotDelDisk(instance, disk, seq)
+
+  @staticmethod
+  def perspective_hot_add_nic(params):
+    """Hotplugs a nic to a running instance.
+
+    """
+    (idict, ndict, seq) = params
+    logging.info("%s %s", idict, ndict)
+    instance = objects.Instance.FromDict(idict)
+    nic = objects.NIC.FromDict(ndict)
+    return backend.HotAddNic(instance, nic, seq)
+
+  @staticmethod
+  def perspective_hot_del_nic(params):
+    """Hotplugs a nic to a running instance.
+
+    """
+    (idict, ndict, seq) = params
+    logging.info("%s %s", idict, ndict)
+    instance = objects.Instance.FromDict(idict)
+    nic = objects.NIC.FromDict(ndict)
+    return backend.HotDelNic(instance, nic, seq)
+
+  @staticmethod
   def perspective_migration_info(params):
     """Gather information about an instance to be migrated.
 
@@ -830,6 +874,15 @@ class NodeRequestHandler(http.server.HttpServerHandler):
     required, name, checks, params = params
     return backend.ValidateOS(required, name, checks, params)
 
+  # extstorage -----------------------
+
+  @staticmethod
+  def perspective_extstorage_diagnose(params):
+    """Query detailed information about existing extstorage providers.
+
+    """
+    return backend.DiagnoseExtStorage()
+
   # hooks -----------------------
 
   @staticmethod
index 2399d81..267f4a8 100644 (file)
@@ -296,6 +296,7 @@ class SimpleStore(object):
     constants.SS_MAINTAIN_NODE_HEALTH,
     constants.SS_UID_POOL,
     constants.SS_NODEGROUPS,
+    constants.SS_NETWORKS,
     )
   _MAX_SIZE = 131072
 
@@ -460,6 +461,14 @@ class SimpleStore(object):
     nl = data.splitlines(False)
     return nl
 
+  def GetNetworkList(self):
+    """Return the list of networks.
+
+    """
+    data = self._ReadFile(constants.SS_NETWORKS)
+    nl = data.splitlines(False)
+    return nl
+
   def GetClusterTags(self):
     """Return the cluster tags.
 
index a774f05..00416e6 100644 (file)
@@ -9,14 +9,15 @@ SEE ALSO
 --------
 
 Ganeti overview and specifications: **ganeti**(7) (general overview),
-**ganeti-os-interface**(7) (guest OS definitions).
+**ganeti-os-interface**(7) (guest OS definitions),
+**ganeti-extstorage-interface**(7) (external storage providers).
 
 Ganeti commands: **gnt-cluster**(8) (cluster-wide commands),
 **gnt-job**(8) (job-related commands), **gnt-node**(8) (node-related
-commands), **gnt-instance**(8) (instance commands), **gnt-os**(8)
-(guest OS commands), **gnt-group**(8) (node group commands),
-**gnt-backup**(8) (instance import/export commands), **gnt-debug**(8)
-(debug commands).
+commands), **gnt-instance**(8) (instance commands), **gnt-os**(8) (guest
+OS commands), **gnt-storage**(8) (storage commands), **gnt-group**(8)
+(node group commands), **gnt-backup**(8) (instance import/export
+commands), **gnt-debug**(8) (debug commands).
 
 Ganeti daemons: **ganeti-watcher**(8) (automatic instance restarter),
 **ganeti-cleaner**(8) (job queue cleaner), **ganeti-noded**(8) (node
diff --git a/man/ganeti-extstorage-interface.rst b/man/ganeti-extstorage-interface.rst
new file mode 100644 (file)
index 0000000..7266ee4
--- /dev/null
@@ -0,0 +1,212 @@
+ganeti-extstorage-interface(7) Ganeti | Version @GANETI_VERSION@
+================================================================
+
+Name
+----
+
+ganeti-extstorage-interface - Specifications for ExtStorage providers
+
+DESCRIPTION
+-----------
+
+The method for supporting external shared storage in Ganeti is to have
+an ExtStorage provider for each external shared storage hardware type.
+The ExtStorage provider is a set of files (executable scripts and text
+files), contained inside a directory which is named after the provider.
+This directory must be present across all nodes of a nodegroup (Ganeti
+doesn't replicate it), in order for the provider to be usable by Ganeti
+for this nodegroup (valid). The external shared storage hardware should
+also be accessible by all nodes of this nodegroup too.
+
+REFERENCE
+---------
+
+There are seven required files: *create*, *attach*, *detach*, *remove*,
+*grow*, *verify* (executables) and *parameters.list* (text file).
+
+Common environment
+~~~~~~~~~~~~~~~~~~
+
+All commands will get their input via environment variables. A common
+set of variables will be exported for all commands, and some of them
+might have extra ones. Note that all counts are zero-based.
+
+Since Ganeti version 2.5, the environment will be cleaned up before
+being passed to scripts, therefore they will not inherit the environment
+in with which the ganeti node daemon was started. If you depend on any
+environment variables (non-Ganeti), then you will need to define or
+source them appropriately.
+
+VOL_NAME
+    The name of the volume. This is unique for Ganeti and it uses it
+    to refer to a specific volume inside the external storage. Its 
+    format is ``UUID.ext.diskX`` where ``UUID`` is produced by Ganeti
+    and is unique inside the Ganeti context. ``X`` is the number of the
+    disk count.
+
+VOL_SIZE
+    The volume's size in mebibytes.
+
+VOL_NEW_SIZE
+    Available only to the **grow** script. It declares the new size of
+    the volume after grow (in mebibytes). To find the amount of grow,
+    the scipt should calculate the number VOL_NEW_SIZE - VOL_SIZE.
+
+EXTP_*name*
+    Each ExtStorage parameter (see below) will be exported in its own
+    variable, prefixed with ``EXTP_``, and upper-cased. For example, a
+    ``fromsnap`` parameter will be exported as ``EXTP_FROMSNAP``.
+
+EXECUTABLE SCRIPTS
+------------------
+
+
+create
+~~~~~~
+
+The **create** command is used for creating a new volume inside the
+external storage. The ``VOL_NAME`` denotes the volume's name, which
+should be unique. After creation, Ganeti will refer to this volume by
+this name for all other actions.
+
+Ganeti produces this name dynamically and ensures its uniqueness inside
+the Ganeti context. Therefore, you should make sure not to provision
+manually additional volumes inside the external storage with this type
+of name, because this will lead to conflicts and possible loss of data.
+
+The ``VOL_SIZE`` variable denotes the size of the new volume to be
+created in mebibytes.
+
+If the script ends successfully, a new volume of size ``VOL_SIZE``
+should exist inside the external storage. e.g:: a lun inside a NAS
+appliance.
+
+The script returns ``0`` on success.
+
+attach
+~~~~~~
+
+This command is used in order to make an already created volume visible
+to the physical node which will host the instance. This is done by
+mapping the already provisioned volume to a block device inside the host
+node.
+
+The ``VOL_NAME`` variable denotes the volume to be mapped.
+
+After successful attachment the script returns to its stdout a string,
+which is the full path of the block device to which the volume is
+mapped.  e.g:: /dev/dummy1
+
+When attach returns, this path should be a valid block device on the
+host node.
+
+The attach script should be idempotent if the volume is already mapped.
+If the requested volume is already mapped, then the script should just
+return to its stdout the path which is already mapped to.
+
+detach
+~~~~~~
+
+This command is used in order to unmap an already mapped volume from the
+host node. Detach undoes everything attach did. This is done by
+unmapping the requested volume from the block device it is mapped to.
+
+The ``VOL_NAME`` variable denotes the volume to be unmapped.
+
+``detach`` doesn't affect the volume itself. It just unmaps it from the
+host node. The volume continues to exist inside the external storage.
+It's just not accessible by the node anymore. This script doesn't return
+anything to its stdout.
+
+The detach script should be idempotent if the volume is already
+unmapped. If the volume is not mapped, the script doesn't perform any
+action at all.
+
+The script returns ``0`` on success.
+
+remove
+~~~~~~
+
+This command is used to remove an existing volume from the external
+storage. The volume is permanently removed from inside the external
+storage along with all its data.
+
+The ``VOL_NAME`` variable denotes the volume to be removed.
+
+The script returns ``0`` on success.
+
+grow
+~~~~
+
+This command is used to grow an existing volume of the external storage.
+
+The ``VOL_NAME`` variable denotes the volume to grow.
+
+The ``VOL_SIZE`` variable denotes the current volume's size (in
+mebibytes). The ``VOL_NEW_SIZE`` variable denotes the final size after
+the volume has been grown (in mebibytes).
+
+The amount of grow can be easily calculated by the scipt and is:
+
+grow_amount = VOL_NEW_SIZE - VOL_SIZE (in mebibytes)
+
+Ganeti ensures that: ``VOL_NEW_SIZE`` > ``VOL_SIZE``
+
+If the script returns successfully, then the volume inside the external
+storage will have a new size of ``VOL_NEW_SIZE``. This isn't immediately
+reflected to the instance's disk. See ``gnt-instance grow`` for more
+details on when the running instance becomes aware of its grown disk.
+
+The script returns ``0`` on success.
+
+verify
+~~~~~~
+
+The *verify* script is used to verify consistency of the external
+parameters (ext-params) (see below). The command should take one or more
+arguments denoting what checks should be performed, and return a proper
+exit code depending on whether the validation failed or succeeded.
+
+Currently, the script is not invoked by Ganeti, but should be present
+for future use and consistency with gnt-os-interface's verify script.
+
+The script should return ``0`` on success.
+
+TEXT FILES
+----------
+
+
+parameters.list
+~~~~~~~~~~~~~~~
+
+This file declares the parameters supported by the ExtStorage provider,
+one parameter per line, with name and description (space and/or tab
+separated). For example::
+
+    fromsnap Snapshot name to create the volume from
+    nas_ip The IP of the NAS appliance 
+
+The parameters can then be used during instance add as follows::
+
+    # gnt-instance add --disk=0:fromsnap="file_name",nas_ip="1.2.3.4" ...
+
+NOTES
+-----
+
+Backwards compatibility
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The ExtStorage Interface was introduced in Ganeti 2.6.
+Ganeti 2.6 and up is compatible with the ExtStorage Interface.
+
+Common behaviour
+~~~~~~~~~~~~~~~~
+
+All the scripts should display an usage message when called with a wrong
+number of arguments or when the first argument is ``-h`` or ``--help``.
+
+.. vim: set textwidth=72 :
+.. Local Variables:
+.. mode: rst
+.. fill-column: 72
+.. End:
diff --git a/man/gnt-network.rst b/man/gnt-network.rst
new file mode 100644 (file)
index 0000000..9264044
--- /dev/null
@@ -0,0 +1,181 @@
+gnt-network(8) Ganeti | Version @GANETI_VERSION@
+================================================
+
+Name
+----
+
+gnt-network - Ganeti network administration
+
+Synopsis
+--------
+
+**gnt-network** {command} [arguments...]
+
+DESCRIPTION
+-----------
+
+The **gnt-network** command is used for network definition administration
+in the Ganeti system.
+
+COMMANDS
+--------
+
+ADD
+~~~
+
+| **add**
+| [--network=*NETWORK*]
+| [--gateway=*GATEWAY*]
+| [--add-reserved-ips=*RESERVEDIPS*]
+| [--network6=*NETWORK6*]
+| [--gateway6=*GATEWAY6*]
+| [--mac-prefix=*MACPREFIX*]
+| [--network-type=*NETWORKTYPE*]
+| {*network*}
+
+Creates a new network with the given name. The network will be unused
+initially. To connect it to a node group, use ``gnt-network connect``.
+``--network`` option is mandatory. All other are optional.
+
+The ``--network`` option allows you to specify the network in a CIDR notation.
+
+The ``--gateway`` option allows you to specify the default gateway for this
+network.
+
+The ``--network-type`` can be none, private or public.
+
+IPv6 semantics can be assigned to the network via the ``--network6`` and
+``--gateway6`` options. IP pool is meaningless for ipv6 so those two values
+can be used for EUI64 generation from a NIC's mac value.
+
+MODIFY
+~~~~~~
+
+| **modify**
+| [--gateway=*GATEWAY*]
+| [--add-reserved-ips=*RESERVEDIPS*]
+| [--remove-reserved-ips=*RESERVEDIPS*]
+| [--network6=*NETWORK6*]
+| [--gateway6=*GATEWAY6*]
+| [--mac-prefix=*MACPREFIX*]
+| [--network-type=*NETWORKTYPE*]
+| {*network*}
+
+Modifies parameters from the network.
+
+Unable to modify network (ip range). Create a new network if you want to do
+so. All other options are documented in the **add** command above.
+
+REMOVE
+~~~~~~
+
+| **remove** {*network*}
+
+Deletes the indicated network, which must be not connected to any node group.
+
+LIST
+~~~~
+
+| **list** [--no-headers] [--separator=*SEPARATOR*] [-v]
+| [-o *[+]FIELD,...*] [network...]
+
+Lists all existing networks in the cluster.
+
+The ``--no-headers`` option will skip the initial header line. The
+``--separator`` option takes an argument which denotes what will be
+used between the output fields. Both these options are to help
+scripting.
+
+The ``-v`` option activates verbose mode, which changes the display of
+special field states (see **ganeti(7)**).
+
+The ``-o`` option takes a comma-separated list of output fields.
+If the value of the option starts with the character ``+``, the new
+fields will be added to the default list. This allows to quickly
+see the default list plus a few other fields, instead of retyping
+the entire list of fields.
+
+The available fields and their meaning are:
+
+name
+    the group name
+
+group_count
+    the number of nodegroups connected to the network
+
+group_list
+    the list of nodegroups connected to the network
+
+inst_cnt
+    the number of instances use the network
+
+inst_list
+    the list of instances that at least one of their NICs is assigned
+    to the network
+
+external_reservations
+    the IPs that cannot be assigned to an instance
+
+free_count
+    how many IPs have left in the pool
+
+gateway
+    the networks gateway
+
+map
+    a nice text depiction of the available/reserved IPs in the network
+
+reserved_count
+    how many IPs have been reserved so far in the network
+
+network6
+    the ipv6 prefix of the network
+
+gateway6
+    the ipv6 gateway of the network
+
+mac_prefix
+    the mac_prefix of the network (if a NIC is assigned to the network it
+    it gets the mac_prefix of the network)
+
+network_type
+    the type of the network (public, private)
+
+If no group names are given, then all groups are included. Otherwise,
+only the named groups will be listed.
+
+LIST-FIELDS
+~~~~~~~~~~~
+
+**list-fields** [field...]
+
+List available fields for networks.
+
+RENAME
+~~~~~~
+
+| **rename** {*oldname*} {*newname*}
+
+Renames a given network from *oldname* to *newname*. NOT implemeted yet
+
+INFO
+~~~~
+
+| **info** [network...]
+
+Displays information about a given network.
+
+CONNECT
+~~~~~~~
+| **connect** {*network*} {*group*} {*mode*} {*link*}
+
+Connect a network to a given nodegroup with the netparams (*mode*, *link*).
+Every nic will inherit those netparams if assigned in a network.
+*group* can be ``all`` if you want to connect to all existing nodegroups
+
+DISCONNECT
+~~~~~~~~~~
+| **disconnect** {*network*} {*group*}
+
+Disconnect a network to a nodegroup. This is possible only if no instance
+is using the network.
diff --git a/man/gnt-storage.rst b/man/gnt-storage.rst
new file mode 100644 (file)
index 0000000..9fb2325
--- /dev/null
@@ -0,0 +1,63 @@
+gnt-storage(8) Ganeti | Version @GANETI_VERSION@
+================================================
+
+Name
+----
+
+gnt-storage - Ganeti storage administration
+
+Synopsis
+--------
+
+**gnt-storage** {command} [arguments...]
+
+DESCRIPTION
+-----------
+
+The **gnt-storage** is used for managing the available storage inside
+the Ganeti cluster. At the moment, it manages only external storage
+(ExtStorage).
+
+COMMANDS
+--------
+
+
+**diagnose**
+
+This command provides detailed information about the state of all
+ExtStorage providers available in the Ganeti cluster. The state of each
+provider is calculated per nodegroup. This means that a provider may be
+valid (meaning usable) for some nodegroups, and invalid (not usable) for
+some others. This command will help you see why an installed ExtStorage
+provider is not valid for a specific nodegroup. It could be that it is
+missing from a node, or is only partially installed. This command will
+show the details of all ExtStorage providers and the reasons they are or
+aren't valid for every nodegroup in the cluster.
+
+**info**
+
+This command will list detailed information about each ExtStorage
+provider found in the cluster, including its nodegroup validity, the
+supported parameters (if any) and their documentations, etc.
+
+For each ExtStorage provider only the valid nodegroups will be listed.
+
+If run with no arguments, it will display info for all ExtStorage
+providers found in the cluster. If given ExtStorage provider's names as
+arguments it will list info only for providers given.
+
+NOTES
+-----
+
+In the future **gnt-storage** can be extended to also handle internal
+storage (such as lvm, drbd, etc) and also provide diagnostics for them
+too.
+
+It can also be extended to handle internal and external storage pools,
+if/when this kind of abstraction is implemented to Ganeti.
+
+.. vim: set textwidth=72 :
+.. Local Variables:
+.. mode: rst
+.. fill-column: 72
+.. End:
index bf93c72..fe10665 100755 (executable)
@@ -462,6 +462,7 @@ class Burner(object):
                                 constants.DT_PLAIN,
                                 constants.DT_DRBD8,
                                 constants.DT_RBD,
+                                constants.DT_EXT,
                                 )
     if options.disk_template not in supported_disk_templates:
       Err("Unknown disk template '%s'" % options.disk_template)
index 81dce1d..c7569ac 100755 (executable)
@@ -94,6 +94,32 @@ def CheckHostname(path):
                   " the master node", ssconf_master_node, hostname)
   return False
 
+def UpgradeNetworks(config_data):
+  networks = config_data.get("networks", None)
+  if not networks:
+    config_data["networks"] = {}
+
+
+def UpgradeGroups(config_data):
+  for group in config_data["nodegroups"].values():
+    networks = group.get("networks", None)
+    if not networks:
+      group["networks"] = {}
+
+def UpgradeInstances(config_data):
+  for i in config_data["instances"].values():
+    print("Updating Instance %s" % i["name"])
+    try:
+      del i["hotplugs"]
+    except:
+      pass
+    for n in i["nics"]:
+      print("Updating Instance nic with idx %d" % n["idx"])
+      try:
+        del n["idx"]
+      except:
+        pass
+
 
 def main():
   """Main program.
@@ -237,6 +263,10 @@ def main():
     if not options.dry_run:
       utils.RemoveFile(options.WATCHER_STATEFILE)
 
+  UpgradeNetworks(config_data)
+  UpgradeGroups(config_data)
+  UpgradeInstances(config_data)
+
   try:
     logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
     utils.WriteFile(file_name=options.CONFIG_DATA_PATH,