Merge branch 'stable-2.8' into master
authorBernardo Dal Seno <bdalseno@google.com>
Mon, 29 Apr 2013 16:48:52 +0000 (18:48 +0200)
committerBernardo Dal Seno <bdalseno@google.com>
Mon, 29 Apr 2013 20:13:55 +0000 (22:13 +0200)
* stable-2.8: (42 commits)
  Add shelltests for hspace allocation
  hspace: Handle multiple ipolicy specs
  QA: Test multiple instance specs
  QA: Handle multiple instance specs
  Unit test for cli.FormatPolicyInfo()
  Add command-line support for multiple specs in ipolicy
  Add multiple min/max specs in instance policy
  Separate checks for std spec compliance
  QA: Transpose instance specs
  Improve check for  "unreleased" versions in NEWS
  Update documentation for text format
  Add missing fields in htools text-backend documentation
  cfgupgrade: Remove enabled_disk_templates on downgrade
  Reason trail implementation for "start"
  Reason trail implementation for "shutdown"
  QA: More tests for instance policies in groups
  QA: Split function to set and parse instance policies
  QA: Update tests for new ipolicy specs command-line options
  Add unit tests for cfgupgrade with a real configuration
  Split functions in cfupgrade unit tests
  ...

Conflicts:
        lib/backend.py
        lib/bdev.py
        man/gnt-cluster.rst

lib/bdev.py was renamed to lib/block/bdev.py in master, so I've manually
applied the bc3427b7 commit to the new file. The other two are
straightforward conflicts.

Signed-off-by: Bernardo Dal Seno <bdalseno@google.com>
Reviewed-by: Guido Trotter <ultrotter@google.com>

71 files changed:
Makefile.am
NEWS
autotools/check-news
configure.ac
doc/rapi.rst
lib/backend.py
lib/block/bdev.py
lib/bootstrap.py
lib/cli.py
lib/client/gnt_cluster.py
lib/client/gnt_group.py
lib/cmdlib.py
lib/config.py
lib/constants.py
lib/errors.py
lib/hooksmaster.py [new file with mode: 0644]
lib/mcpu.py
lib/objects.py
lib/rapi/client.py
lib/rpc_defs.py
lib/server/noded.py
man/gnt-cluster.rst
man/gnt-group.rst
man/htools.rst
qa/ganeti-qa.py
qa/qa_cluster.py
qa/qa_group.py
qa/qa_instance.py
qa/qa_utils.py
src/Ganeti/HTools/Backend/Text.hs
src/Ganeti/HTools/Cluster.hs
src/Ganeti/HTools/Instance.hs
src/Ganeti/HTools/Program/Hspace.hs
src/Ganeti/HTools/Types.hs
src/Ganeti/Objects.hs
test/data/cluster_config_2.7.json [new file with mode: 0644]
test/data/cluster_config_downgraded_2.7.json [new file with mode: 0644]
test/data/htools/clean-nonzero-score.data
test/data/htools/common-suffix.data
test/data/htools/empty-cluster.data
test/data/htools/hail-alloc-drbd.json
test/data/htools/hail-alloc-invalid-twodisks.json
test/data/htools/hail-alloc-twodisks.json
test/data/htools/hail-change-group.json
test/data/htools/hail-node-evac.json
test/data/htools/hail-reloc-drbd.json
test/data/htools/hbal-split-insts.data
test/data/htools/hspace-tiered-dualspec.data [new file with mode: 0644]
test/data/htools/hspace-tiered.data [new file with mode: 0644]
test/data/htools/invalid-node.data
test/data/htools/missing-resources.data
test/data/htools/multiple-master.data
test/data/htools/n1-failure.data
test/data/htools/rapi/groups.json
test/data/htools/rapi/info.json
test/data/htools/unique-reboot-order.data
test/hs/Test/Ganeti/HTools/Backend/Text.hs
test/hs/Test/Ganeti/HTools/Types.hs
test/hs/Test/Ganeti/Objects.hs
test/hs/Test/Ganeti/TestHTools.hs
test/hs/shelltests/htools-hspace.test
test/py/cfgupgrade_unittest.py
test/py/ganeti.cli_unittest.py
test/py/ganeti.cmdlib_unittest.py
test/py/ganeti.config_unittest.py
test/py/ganeti.hooks_unittest.py
test/py/ganeti.objects_unittest.py
test/py/ganeti.rapi.client_unittest.py
test/py/ganeti.rapi.rlib2_unittest.py
tools/cfgupgrade
tools/users-setup.in

index 25b31cd..4fd1b3f 100644 (file)
@@ -267,6 +267,7 @@ pkgpython_PYTHON = \
        lib/constants.py \
        lib/daemon.py \
        lib/errors.py \
+       lib/hooksmaster.py \
        lib/ht.py \
        lib/jqueue.py \
        lib/jstore.py \
@@ -1012,6 +1013,8 @@ TEST_FILES = \
        test/data/htools/hail-reloc-drbd.json \
        test/data/htools/hbal-excl-tags.data \
        test/data/htools/hbal-split-insts.data \
+       test/data/htools/hspace-tiered-dualspec.data \
+       test/data/htools/hspace-tiered.data \
        test/data/htools/invalid-node.data \
        test/data/htools/missing-resources.data \
        test/data/htools/multiple-master.data \
@@ -1055,6 +1058,8 @@ TEST_FILES = \
        test/data/bdev-rbd/output_invalid.txt \
        test/data/cert1.pem \
        test/data/cert2.pem \
+       test/data/cluster_config_2.7.json \
+       test/data/cluster_config_downgraded_2.7.json \
        test/data/instance-minor-pairing.txt \
        test/data/ip-addr-show-dummy0.txt \
        test/data/ip-addr-show-lo-ipv4.txt \
@@ -1691,7 +1696,7 @@ check-local: check-dirs $(GENERATED_FILES)
        $(CHECK_PYTHON_CODE) $(check_python_code)
        PYTHONPATH=. $(CHECK_HEADER) $(check_python_code)
        $(CHECK_VERSION) $(VERSION) $(top_srcdir)/NEWS
-       $(CHECK_NEWS) < $(top_srcdir)/NEWS
+       RELEASE=$(PACKAGE_VERSION) $(CHECK_NEWS) < $(top_srcdir)/NEWS
        PYTHONPATH=. $(RUN_IN_TEMPDIR) $(CURDIR)/$(CHECK_IMPORTS) . $(standalone_python_modules)
        @expver=$(VERSION_MAJOR).$(VERSION_MINOR); \
        error= ; \
@@ -1832,18 +1837,12 @@ distcheck-hook:
          echo "Found empty files or directories in final archive." 1>&2; \
          exit 1; \
        fi
-       if test -n "$(BUILD_RELEASE)" && \
-          grep -n -H -E '^\*.*unreleased' $(top_distdir)/NEWS; then \
-          echo "Found unreleased version in NEWS." >&2; \
-          exit 1; \
-       fi
        if test -e $(top_distdir)/doc/man-html; then \
          echo "Found documentation including man pages in final archive" >&2; \
          exit 1; \
        fi
 
-# When building a release, stricter checks should be used
-distcheck-release dist-release: export BUILD_RELEASE = 1
+# Backwards compatible distcheck-release target
 distcheck-release: distcheck
 
 distrebuildcheck: dist
diff --git a/NEWS b/NEWS
index 8b075f8..9fe93ca 100644 (file)
--- a/NEWS
+++ b/NEWS
@@ -26,57 +26,22 @@ Version 2.8.0 beta1
   '--enabled-disk-templates'.
 
 
-Version 2.7.0 rc1
------------------
-
-*(unreleased)*
-
-- Fix hail to verify disk instance policies on a per-disk basis (Issue 418).
-
-
-Version 2.7.0 beta2
+Version 2.7.0 beta3
 -------------------
 
-*(Released Tue, 2 Apr 2013)*
+*(Released Mon, 22 Apr 2013)*
 
-- Networks no longer have a "type" slot, since this information was
-  unused in Ganeti: instead of it tags should be used.
-- Diskless instances are now externally mirrored (Issue 237). This for
-  now has only been tested in conjunction with explicit target nodes for
-  migration/failover.
-- The rapi client now has a ``target_node`` option to MigrateInstance.
-- Fix early exit return code for hbal (Issue 386).
-- Fix ``gnt-instance migrate/failover -n`` (Issue 396).
-- Fix ``rbd showmapped`` output parsing (Issue 312).
-- Networks are now referenced indexed by UUID, rather than name. This
-  will require running cfgupgrade, from 2.7.0beta1, if networks are in
-  use.
-- The OS environment now includes network information.
-- Deleting of a network is now disallowed if any instance nic is using
-  it, to prevent dangling references.
-- External storage is now documented in man pages.
-- The exclusive_storage flag can now only be set at nodegroup level.
-- Hbal can now submit an explicit priority with its jobs.
-- Many network related locking fixes.
-- Bump up the required pylint version to 0.25.1.
-- Fix the ``no_remember`` option in RAPI client.
-- Many ipolicy related tests, qa, and fixes.
-- Many documentation improvements and fixes.
-- Fix building with ``--disable-file-storage``.
-- Fix ``-q`` option in htools, which was broken if passed more than
-  once.
-- Some haskell/python interaction improvements and fixes.
-- Fix iallocator in case of missing LVM storage.
-- Fix confd config load in case of ``--no-lvm-storage``.
-- The confd/query functionality is now mentioned in the security
-  documentation.
-
-
-Version 2.7.0 beta1
--------------------
-
-*(Released Wed, 6 Feb 2013)*
+Incompatible/important changes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
+- Instance policies for disk size were documented to be on a per-disk
+  basis, but hail applied them to the sum of all disks. This has been
+  fixed.
+- ``hbal`` will now exit with status 0 if, during job execution over
+  LUXI, early exit has been requested and all jobs are successful;
+  before, exit status 1 was used, which cannot be differentiated from
+  "job error" case
+- Compatibility with newer versions of rbd has been fixed
 - ``gnt-instance batch-create`` has been changed to use the bulk create
   opcode from Ganeti. This lead to incompatible changes in the format of
   the JSON file. It's now not a custom dict anymore but a dict
@@ -101,6 +66,35 @@ Version 2.7.0 beta1
   destination node, named ``prepare-node-join``, to configure the SSH
   daemon. Paramiko is no longer necessary to configure nodes' SSH
   daemons via ``gnt-node add``.
+- Draining (``gnt-cluster queue drain``) and un-draining the job queue
+  (``gnt-cluster queue undrain``) now affects all nodes in a cluster and
+  the flag is not reset after a master failover.
+- Python 2.4 has *not* been tested with this release. Using 2.6 or above
+  is recommended. 2.6 will be mandatory from the 2.8 series.
+
+
+New features
+~~~~~~~~~~~~
+
+- New network management functionality to support automatic allocation
+  of IP addresses and managing of network parameters. See
+  :manpage:`gnt-network(8)` for more details.
+- New external storage backend, to allow managing arbitrary storage
+  systems external to the cluster. See
+  :manpage:`ganeti-extstorage-interface(7)`.
+- New ``exclusive-storage`` node parameter added, restricted to
+  nodegroup level. When it's set to true, physical disks are assigned in
+  an exclusive fashion to instances, as documented in :doc:`Partitioned
+  Ganeti <design-partitioned>`.  Currently, only instances using the
+  ``plain`` disk template are supported.
+- The KVM hypervisor has been updated with many new hypervisor
+  parameters, including a generic one for passing arbitrary command line
+  values. See a complete list in :manpage:`gnt-instance(8)`.
+- A new tool, called ``mon-collector``, is the stand-alone executor of
+  the data collectors for a monitoring system. As of this version, it
+  just includes the DRBD data collector, that can be executed by calling
+  ``mon-collector`` using the ``drbd`` parameter. See
+  :manpage:`mon-collector(7)`.
 - A new user option, :pyeval:`rapi.RAPI_ACCESS_READ`, has been added
   for RAPI users. It allows granting permissions to query for
   information to a specific user without giving
@@ -109,9 +103,6 @@ Version 2.7.0 beta1
   a cluster from a machine by stopping all daemons, removing
   certificates and ssconf files. Unless the ``--no-backup`` option is
   given, copies of the certificates are made.
-- Draining (``gnt-cluster queue drain``) and un-draining the job queue
-  (``gnt-cluster queue undrain``) now affects all nodes in a cluster and
-  the flag is not reset after a master failover.
 - Instance creations now support the use of opportunistic locking,
   potentially speeding up the (parallel) creation of multiple instances.
   This feature is currently only available via the :doc:`RAPI
@@ -122,45 +113,95 @@ Version 2.7.0 beta1
   allocate the instance, the temporary error code
   :pyeval:`errors.ECODE_TEMP_NORES` is returned. The operation can be
   retried thereafter, with or without opportunistic locking.
-- The functionality for allocating multiple instances at once has been
-  overhauled and is now also available through :doc:`RAPI <rapi>`.
-- Man pages can now be included when the documentation is built, in
-  which case the output is in ``doc/man-html``. The configure-time
-  option is ``--enable-manpages-in-doc``. Sphinx 1.0 or higher is
-  required.
-- A new htool called ``hroller`` helps scheduling parallel shutdown of
-  nodes on clusters where all instances are shut down, but without ever
-  rebooting both primary and secondary node for an instance at the same
-  time.
+- New experimental linux-ha resource scripts.
+- Restricted-commands support: ganeti can now be asked (via command line
+  or rapi) to perform commands on a node. These are passed via ganeti
+  RPC rather than ssh. This functionality is restricted to commands
+  specified on the ``$sysconfdir/ganeti/restricted-commands`` for security
+  reasons. The file is not copied automatically.
+
+
+Misc changes
+~~~~~~~~~~~~
+
+- Diskless instances are now externally mirrored (Issue 237). This for
+  now has only been tested in conjunction with explicit target nodes for
+  migration/failover.
 - Queries not needing locks or RPC access to the node can now be
   performed by the confd daemon, making them independent from jobs, and
   thus faster to execute. This is selectable at configure time.
-- The KVM hypervisor has been updated with many new hypervisor
-  parameters, including a generic one for passing arbitrary command line
-  values. See a complete list in :manpage:`gnt-instance(8)`.
-- A new tool, called ``mon-collector``, is the stand-alone executor of
-  the data collectors for a monitoring system. As of this version, it
-  just includes the DRBD data collector, that can be executed by calling
-  ``mon-collector`` using the ``drbd`` parameter. See
-  :manpage:`mon-collector(7)`.
-- New ``exclusive-storage`` node parameter added. When it's set to true,
-  physical disks are assigned in an exclusive fashion to instances, as
-  documented in :doc:`Partitioned Ganeti <design-partitioned>`.
-  Currently, only instances using the ``plain`` disk template are
-  supported.
-- New network management functionality to support automatic allocation
-  of IP addresses and managing of network parameters. See
-  :manpage:`gnt-network(8)` for more details.
-- New external storage backend, to allow managing arbitrary storage
-  systems external to the cluster. See
-  :manpage:`ganeti-extstorage-interface(7)`.
-- Instance renames of LVM-based instances will now update the LV tags
-  (which can be used to recover the instance-to-LV mapping in case of
-  emergencies)
-- ``hbal`` will now exit with status 0 if, during job execution over
-  LUXI, early exit has been requested and all jobs are successful;
-  before, exit status 1 was used, which cannot be differentiated from
-  "job error" case
+- The functionality for allocating multiple instances at once has been
+  overhauled and is now also available through :doc:`RAPI <rapi>`.
+
+Since beta2:
+
+- Fix hail to verify disk instance policies on a per-disk basis (Issue 418).
+- Fix data loss on wrong usage of ``gnt-instance move``
+- Properly export errors in confd-based job queries
+- Add ``users-setup`` tool
+- Fix iallocator protocol to report 0 as a disk size for diskless
+  instances. This avoids hail breaking when a diskless instance is
+  present.
+- Fix job queue directory permission problem that made confd job queries
+  fail. This requires running an ``ensure-dirs --full-run`` on upgrade
+  for access to archived jobs (Issue 406).
+- Limit the sizes of networks supported by ``gnt-network`` to something
+  between a ``/16`` and a ``/30`` to prevent memory bloat and crashes.
+- Fix bugs in instance disk template conversion
+- Fix GHC 7 compatibility
+- Fix ``burnin`` install path (Issue 426).
+- Allow very small disk grows (Issue 347).
+- Fix a ``ganeti-noded`` memory bloat introduced in 2.5, by making sure
+  that noded doesn't import masterd code (Issue 419).
+- Make sure the default metavg at cluster init is the same as the vg, if
+  unspecified (Issue 358).
+- Fix cleanup of partially created disks (part of Issue 416)
+
+
+Version 2.7.0 beta2
+-------------------
+
+*(Released Tue, 2 Apr 2013)*
+
+This was the second beta release of the 2.7 series. Since beta1:
+
+- Networks no longer have a "type" slot, since this information was
+  unused in Ganeti: instead of it tags should be used.
+- The rapi client now has a ``target_node`` option to MigrateInstance.
+- Fix early exit return code for hbal (Issue 386).
+- Fix ``gnt-instance migrate/failover -n`` (Issue 396).
+- Fix ``rbd showmapped`` output parsing (Issue 312).
+- Networks are now referenced indexed by UUID, rather than name. This
+  will require running cfgupgrade, from 2.7.0beta1, if networks are in
+  use.
+- The OS environment now includes network information.
+- Deleting of a network is now disallowed if any instance nic is using
+  it, to prevent dangling references.
+- External storage is now documented in man pages.
+- The exclusive_storage flag can now only be set at nodegroup level.
+- Hbal can now submit an explicit priority with its jobs.
+- Many network related locking fixes.
+- Bump up the required pylint version to 0.25.1.
+- Fix the ``no_remember`` option in RAPI client.
+- Many ipolicy related tests, qa, and fixes.
+- Many documentation improvements and fixes.
+- Fix building with ``--disable-file-storage``.
+- Fix ``-q`` option in htools, which was broken if passed more than
+  once.
+- Some haskell/python interaction improvements and fixes.
+- Fix iallocator in case of missing LVM storage.
+- Fix confd config load in case of ``--no-lvm-storage``.
+- The confd/query functionality is now mentioned in the security
+  documentation.
+
+
+Version 2.7.0 beta1
+-------------------
+
+*(Released Wed, 6 Feb 2013)*
+
+This was the first beta release of the 2.7 series. All important changes
+are listed in the latest 2.7 entry.
 
 
 Version 2.6.2
index cb5ac9a..0ad8d63 100755 (executable)
@@ -32,13 +32,14 @@ import datetime
 import locale
 import fileinput
 import re
+import os
 
 
 DASHES_RE = re.compile(r"^\s*-+\s*$")
 RELEASED_RE = re.compile(r"^\*\(Released (?P<day>[A-Z][a-z]{2}),"
                          r" (?P<date>.+)\)\*$")
 UNRELEASED_RE = re.compile(r"^\*\(unreleased\)\*$")
-VERSION_RE = re.compile(r"^Version \d+(\.\d+)+( (beta|rc)\d+)?$")
+VERSION_RE = re.compile(r"^Version (\d+(\.\d+)+( (beta|rc)\d+)?)$")
 
 #: How many days release timestamps may be in the future
 TIMESTAMP_FUTURE_DAYS_MAX = 3
@@ -73,17 +74,36 @@ def main():
   if curlocale != (None, None):
     Error("Invalid locale %s" % curlocale)
 
+  # Get the release version, but replace "~" with " " as the version
+  # in the NEWS file uses spaces for beta and rc releases.
+  release = os.environ.get('RELEASE', "").replace("~", " ")
+
   prevline = None
   expect_date = False
   count_empty = 0
+  allow_unreleased = True
+  found_versions = set()
 
   for line in fileinput.input():
     line = line.rstrip("\n")
 
-    if VERSION_RE.match(line):
+    version_match = VERSION_RE.match(line)
+    if version_match:
       ReqNLines(2, count_empty, fileinput.filelineno(), line)
-
-    if UNRELEASED_RE.match(line) or RELEASED_RE.match(line):
+      version = version_match.group(1)
+      if version in found_versions:
+        Error("Line %s: Duplicate release %s found" %
+              (fileinput.filelineno(), version))
+      found_versions.add(version)
+      if version == release:
+        allow_unreleased = False
+
+    unreleased_match = UNRELEASED_RE.match(line)
+    if unreleased_match and not allow_unreleased:
+      Error("Line %s: Unreleased version after current release %s" %
+            (fileinput.filelineno(), release))
+
+    if unreleased_match or RELEASED_RE.match(line):
       ReqNLines(1, count_empty, fileinput.filelineno(), line)
 
     if line:
index 80f6156..e7c05d0 100644 (file)
@@ -2,7 +2,7 @@
 m4_define([gnt_version_major], [2])
 m4_define([gnt_version_minor], [7])
 m4_define([gnt_version_revision], [0])
-m4_define([gnt_version_suffix], [~beta2])
+m4_define([gnt_version_suffix], [~beta3])
 m4_define([gnt_version_full],
           m4_format([%d.%d.%d%s],
                     gnt_version_major, gnt_version_minor,
index 10cd80e..eb115dc 100644 (file)
@@ -96,6 +96,8 @@ by using the standard HTTP basic access authentication. This means that
 for accessing the protected URL ``https://cluster.example.com/resource``,
 the address ``https://username:password@cluster.example.com/resource`` should
 be used instead.
+be used instead. Alternatively, the appropriate parameter of your HTTP client
+(such as ``-u`` for ``curl``) can be used.
 
 .. [#pwhash] Using the MD5 hash of username, realm and password is
    described in :rfc:`2617` ("HTTP Authentication"), sections 3.2.2.2
@@ -249,7 +251,7 @@ The instance policy specification is a dict with the following fields:
 
 
 :pyeval:`constants.ISPECS_MINMAX`
-  A dict with the following two fields:
+  A list of dictionaries, each with the following two fields:
 
   |ispec-min|, |ispec-max|
     A sub- `dict` with the following fields, which sets the limit of the
@@ -304,7 +306,10 @@ or ``curl``::
   $ curl https://%CLUSTERNAME%:5080/2/info
 
 Note: with ``curl``, the request method (GET, POST, PUT) can be specified
-using the ``-X`` command line option.
+using the ``-X`` command line option, and the username/password can be
+specified with the ``-u`` option. In case of POST requests with a body, the
+Content-Type can be set to JSON (as per the Protocol_ section) using the
+parameter ``-H "Content-Type: application/json"``.
 
 Python
 ++++++
index d7ed407..509e5e4 100644 (file)
@@ -61,12 +61,12 @@ from ganeti import ssconf
 from ganeti import serializer
 from ganeti import netutils
 from ganeti import runtime
-from ganeti import mcpu
 from ganeti import compat
 from ganeti import pathutils
 from ganeti import vcluster
 from ganeti import ht
 from ganeti.block.base import BlockDev
+from ganeti import hooksmaster
 
 
 _BOOT_ID_PATH = "/proc/sys/kernel/random/boot_id"
@@ -327,10 +327,10 @@ def RunLocalHooks(hook_opcode, hooks_path, env_builder_fn):
 
       cfg = _GetConfig()
       hr = HooksRunner()
-      hm = mcpu.HooksMaster(hook_opcode, hooks_path, nodes, hr.RunLocalHooks,
-                            None, env_fn, logging.warning, cfg.GetClusterName(),
-                            cfg.GetMasterNode())
-
+      hm = hooksmaster.HooksMaster(hook_opcode, hooks_path, nodes,
+                                   hr.RunLocalHooks, None, env_fn,
+                                   logging.warning, cfg.GetClusterName(),
+                                   cfg.GetMasterNode())
       hm.RunPhase(constants.HOOKS_PHASE_PRE)
       result = fn(*args, **kwargs)
       hm.RunPhase(constants.HOOKS_PHASE_POST)
@@ -1330,13 +1330,17 @@ def _GatherAndLinkBlockDevs(instance):
   return block_devices
 
 
-def StartInstance(instance, startup_paused):
+def StartInstance(instance, startup_paused, reason, store_reason=True):
   """Start an instance.
 
   @type instance: L{objects.Instance}
   @param instance: the instance object
   @type startup_paused: bool
   @param instance: pause instance at startup?
+  @type reason: list of reasons
+  @param reason: the reason trail for this startup
+  @type store_reason: boolean
+  @param store_reason: whether to store the shutdown reason trail on file
   @rtype: None
 
   """
@@ -1350,6 +1354,8 @@ def StartInstance(instance, startup_paused):
     block_devices = _GatherAndLinkBlockDevs(instance)
     hyper = hypervisor.GetHypervisor(instance.hypervisor)
     hyper.StartInstance(instance, block_devices, startup_paused)
+    if store_reason:
+      _StoreInstReasonTrail(instance.name, reason)
   except errors.BlockDeviceError, err:
     _Fail("Block device error: %s", err, exc=True)
   except errors.HypervisorError, err:
@@ -1357,7 +1363,7 @@ def StartInstance(instance, startup_paused):
     _Fail("Hypervisor error: %s", err, exc=True)
 
 
-def InstanceShutdown(instance, timeout):
+def InstanceShutdown(instance, timeout, reason, store_reason=True):
   """Shut an instance down.
 
   @note: this functions uses polling with a hardcoded timeout.
@@ -1366,6 +1372,10 @@ def InstanceShutdown(instance, timeout):
   @param instance: the instance object
   @type timeout: integer
   @param timeout: maximum timeout for soft shutdown
+  @type reason: list of reasons
+  @param reason: the reason trail for this shutdown
+  @type store_reason: boolean
+  @param store_reason: whether to store the shutdown reason trail on file
   @rtype: None
 
   """
@@ -1387,6 +1397,8 @@ def InstanceShutdown(instance, timeout):
 
       try:
         hyper.StopInstance(instance, retry=self.tried_once)
+        if store_reason:
+          _StoreInstReasonTrail(instance.name, reason)
       except errors.HypervisorError, err:
         if iname not in hyper.ListInstances():
           # if the instance is no longer existing, consider this a
@@ -1462,8 +1474,8 @@ def InstanceReboot(instance, reboot_type, shutdown_timeout, reason):
       _Fail("Failed to soft reboot instance %s: %s", instance.name, err)
   elif reboot_type == constants.INSTANCE_REBOOT_HARD:
     try:
-      InstanceShutdown(instance, shutdown_timeout)
-      result = StartInstance(instance, False)
+      InstanceShutdown(instance, shutdown_timeout, reason, store_reason=False)
+      result = StartInstance(instance, False, reason, store_reason=False)
       _StoreInstReasonTrail(instance.name, reason)
       return result
     except errors.HypervisorError, err:
index 773a6d4..cf3b9a8 100644 (file)
@@ -497,7 +497,7 @@ class LogicalVolume(base.BlockDev):
     """
     self.attached = False
     result = utils.RunCmd(["lvs", "--noheadings", "--separator=,",
-                           "--units=m", "--nosuffix",
+                           "--units=k", "--nosuffix",
                            "-olv_attr,lv_kernel_major,lv_kernel_minor,"
                            "vg_extent_size,stripes", self.dev_path])
     if result.failed:
@@ -690,10 +690,12 @@ class LogicalVolume(base.BlockDev):
       if not self.Attach():
         base.ThrowError("Can't attach to LV during Grow()")
     full_stripe_size = self.pe_size * self.stripe_count
+    # pe_size is in KB
+    amount *= 1024
     rest = amount % full_stripe_size
     if rest != 0:
       amount += full_stripe_size - rest
-    cmd = ["lvextend", "-L", "+%dm" % amount]
+    cmd = ["lvextend", "-L", "+%dk" % amount]
     if dryrun:
       cmd.append("--test")
     # we try multiple algorithms since the 'best' ones might not have
index 4b42f96..d648c77 100644 (file)
@@ -563,6 +563,11 @@ def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
                                           utils.CommaJoin(unknown_params)),
                                  errors.ECODE_INVAL)
     utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
+    if template == constants.DT_DRBD8 and vg_name is not None:
+      # The default METAVG value is equal to the VG name set at init time,
+      # if provided
+      dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name
+
   try:
     utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS)
   except errors.OpPrereqError, err:
index cd19384..212cd67 100644 (file)
@@ -108,6 +108,7 @@ __all__ = [
   "IGNORE_REMOVE_FAILURES_OPT",
   "IGNORE_SECONDARIES_OPT",
   "IGNORE_SIZE_OPT",
+  "INCLUDEDEFAULTS_OPT",
   "INTERVAL_OPT",
   "MAC_PREFIX_OPT",
   "MAINTAIN_NODE_HEALTH_OPT",
@@ -188,6 +189,8 @@ __all__ = [
   "SPECS_DISK_SIZE_OPT",
   "SPECS_MEM_SIZE_OPT",
   "SPECS_NIC_COUNT_OPT",
+  "SPLIT_ISPECS_OPTS",
+  "IPOLICY_STD_SPECS_OPT",
   "IPOLICY_DISK_TEMPLATES",
   "IPOLICY_VCPU_RATIO",
   "SPICE_CACERT_OPT",
@@ -236,6 +239,7 @@ __all__ = [
   "FormatQueryResult",
   "FormatParamsDictInfo",
   "FormatPolicyInfo",
+  "PrintIPolicyCommand",
   "PrintGenericInfo",
   "GenerateTable",
   "AskUser",
@@ -563,12 +567,13 @@ def check_unit(option, opt, value): # pylint: disable=W0613
     raise OptionValueError("option %s: %s" % (opt, err))
 
 
-def _SplitKeyVal(opt, data):
+def _SplitKeyVal(opt, data, parse_prefixes):
   """Convert a KeyVal string into a dict.
 
   This function will convert a key=val[,...] string into a dict. Empty
   values will be converted specially: keys which have the prefix 'no_'
-  will have the value=False and the prefix stripped, the others will
+  will have the value=False and the prefix stripped, keys with the prefix
+  "-" will have value=None and the prefix stripped, and the others will
   have value=True.
 
   @type opt: string
@@ -576,6 +581,8 @@ def _SplitKeyVal(opt, data):
       data, used in building error messages
   @type data: string
   @param data: a string of the format key=val,key=val,...
+  @type parse_prefixes: bool
+  @param parse_prefixes: whether to handle prefixes specially
   @rtype: dict
   @return: {key=val, key=val}
   @raises errors.ParameterError: if there are duplicate keys
@@ -586,13 +593,16 @@ def _SplitKeyVal(opt, data):
     for elem in utils.UnescapeAndSplit(data, sep=","):
       if "=" in elem:
         key, val = elem.split("=", 1)
-      else:
+      elif parse_prefixes:
         if elem.startswith(NO_PREFIX):
           key, val = elem[len(NO_PREFIX):], False
         elif elem.startswith(UN_PREFIX):
           key, val = elem[len(UN_PREFIX):], None
         else:
           key, val = elem, True
+      else:
+        raise errors.ParameterError("Missing value for key '%s' in option %s" %
+                                    (elem, opt))
       if key in kv_dict:
         raise errors.ParameterError("Duplicate key '%s' in option %s" %
                                     (key, opt))
@@ -600,11 +610,19 @@ def _SplitKeyVal(opt, data):
   return kv_dict
 
 
-def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
-  """Custom parser for ident:key=val,key=val options.
+def _SplitIdentKeyVal(opt, value, parse_prefixes):
+  """Helper function to parse "ident:key=val,key=val" options.
 
-  This will store the parsed values as a tuple (ident, {key: val}). As such,
-  multiple uses of this option via action=append is possible.
+  @type opt: string
+  @param opt: option name, used in error messages
+  @type value: string
+  @param value: expected to be in the format "ident:key=val,key=val,..."
+  @type parse_prefixes: bool
+  @param parse_prefixes: whether to handle prefixes specially (see
+      L{_SplitKeyVal})
+  @rtype: tuple
+  @return: (ident, {key=val, key=val})
+  @raises errors.ParameterError: in case of duplicates or other parsing errors
 
   """
   if ":" not in value:
@@ -612,31 +630,67 @@ def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
   else:
     ident, rest = value.split(":", 1)
 
-  if ident.startswith(NO_PREFIX):
+  if parse_prefixes and ident.startswith(NO_PREFIX):
     if rest:
       msg = "Cannot pass options when removing parameter groups: %s" % value
       raise errors.ParameterError(msg)
     retval = (ident[len(NO_PREFIX):], False)
-  elif (ident.startswith(UN_PREFIX) and
-        (len(ident) <= len(UN_PREFIX) or
-         not ident[len(UN_PREFIX)][0].isdigit())):
+  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
+        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
     if rest:
       msg = "Cannot pass options when removing parameter groups: %s" % value
       raise errors.ParameterError(msg)
     retval = (ident[len(UN_PREFIX):], None)
   else:
-    kv_dict = _SplitKeyVal(opt, rest)
+    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
     retval = (ident, kv_dict)
   return retval
 
 
+def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
+  """Custom parser for ident:key=val,key=val options.
+
+  This will store the parsed values as a tuple (ident, {key: val}). As such,
+  multiple uses of this option via action=append is possible.
+
+  """
+  return _SplitIdentKeyVal(opt, value, True)
+
+
 def check_key_val(option, opt, value):  # pylint: disable=W0613
   """Custom parser class for key=val,key=val options.
 
   This will store the parsed values as a dict {key: val}.
 
   """
-  return _SplitKeyVal(opt, value)
+  return _SplitKeyVal(opt, value, True)
+
+
+def _SplitListKeyVal(opt, value):
+  retval = {}
+  for elem in value.split("/"):
+    if not elem:
+      raise errors.ParameterError("Empty section in option '%s'" % opt)
+    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
+    if ident in retval:
+      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
+             (ident, opt, elem))
+      raise errors.ParameterError(msg)
+    retval[ident] = valdict
+  return retval
+
+
+def check_multilist_ident_key_val(_, opt, value):
+  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
+
+  @rtype: list of dictionary
+  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
+
+  """
+  retval = []
+  for line in value.split("//"):
+    retval.append(_SplitListKeyVal(opt, line))
+  return retval
 
 
 def check_bool(option, opt, value): # pylint: disable=W0613
@@ -711,6 +765,7 @@ class CliOption(Option):
     "completion_suggest",
     ]
   TYPES = Option.TYPES + (
+    "multilistidentkeyval",
     "identkeyval",
     "keyval",
     "unit",
@@ -719,6 +774,7 @@ class CliOption(Option):
     "maybefloat",
     )
   TYPE_CHECKER = Option.TYPE_CHECKER.copy()
+  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
   TYPE_CHECKER["identkeyval"] = check_ident_key_val
   TYPE_CHECKER["keyval"] = check_key_val
   TYPE_CHECKER["unit"] = check_unit
@@ -908,6 +964,18 @@ SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
                                  help="NIC count specs: list of key=value,"
                                  " where key is one of min, max, std")
 
+IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
+IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
+                                      dest="ipolicy_bounds_specs",
+                                      type="multilistidentkeyval", default=None,
+                                      help="Complete instance specs limits")
+
+IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
+IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
+                                   dest="ipolicy_std_specs",
+                                   type="keyval", default=None,
+                                   help="Complte standard instance specs")
+
 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
                                     dest="ipolicy_disk_templates",
                                     type="list", default=None,
@@ -1559,6 +1627,10 @@ NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
                                   action="store_false",
                                   help="Don't check for conflicting IPs")
 
+INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
+                                 default=False, action="store_true",
+                                 help="Include default values")
+
 #: Options provided by all commands
 COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
 
@@ -1589,14 +1661,19 @@ COMMON_CREATE_OPTS = [
 
 # common instance policy options
 INSTANCE_POLICY_OPTS = [
+  IPOLICY_BOUNDS_SPECS_OPT,
+  IPOLICY_DISK_TEMPLATES,
+  IPOLICY_VCPU_RATIO,
+  IPOLICY_SPINDLE_RATIO,
+  ]
+
+# instance policy split specs options
+SPLIT_ISPECS_OPTS = [
   SPECS_CPU_COUNT_OPT,
   SPECS_DISK_COUNT_OPT,
   SPECS_DISK_SIZE_OPT,
   SPECS_MEM_SIZE_OPT,
   SPECS_NIC_COUNT_OPT,
-  IPOLICY_DISK_TEMPLATES,
-  IPOLICY_VCPU_RATIO,
-  IPOLICY_SPINDLE_RATIO,
   ]
 
 
@@ -3662,13 +3739,24 @@ def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
   if iscluster:
     eff_ipolicy = custom_ipolicy
 
+  minmax_out = []
   custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
-  ret = [
-    (key,
-     FormatParamsDictInfo(custom_minmax.get(key, {}),
-                          eff_ipolicy[constants.ISPECS_MINMAX][key]))
-    for key in constants.ISPECS_MINMAX_KEYS
-    ]
+  if custom_minmax:
+    for (k, minmax) in enumerate(custom_minmax):
+      minmax_out.append([
+        ("%s/%s" % (key, k),
+         FormatParamsDictInfo(minmax[key], minmax[key]))
+        for key in constants.ISPECS_MINMAX_KEYS
+        ])
+  else:
+    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
+      minmax_out.append([
+        ("%s/%s" % (key, k),
+         FormatParamsDictInfo({}, minmax[key]))
+        for key in constants.ISPECS_MINMAX_KEYS
+        ])
+  ret = [("bounds specs", minmax_out)]
+
   if iscluster:
     stdspecs = custom_ipolicy[constants.ISPECS_STD]
     ret.append(
@@ -3688,6 +3776,46 @@ def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
   return ret
 
 
+def _PrintSpecsParameters(buf, specs):
+  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
+  buf.write(",".join(values))
+
+
+def PrintIPolicyCommand(buf, ipolicy, isgroup):
+  """Print the command option used to generate the given instance policy.
+
+  Currently only the parts dealing with specs are supported.
+
+  @type buf: StringIO
+  @param buf: stream to write into
+  @type ipolicy: dict
+  @param ipolicy: instance policy
+  @type isgroup: bool
+  @param isgroup: whether the policy is at group level
+
+  """
+  if not isgroup:
+    stdspecs = ipolicy.get("std")
+    if stdspecs:
+      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
+      _PrintSpecsParameters(buf, stdspecs)
+  minmaxes = ipolicy.get("minmax", [])
+  first = True
+  for minmax in minmaxes:
+    minspecs = minmax.get("min")
+    maxspecs = minmax.get("max")
+    if minspecs and maxspecs:
+      if first:
+        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
+        first = False
+      else:
+        buf.write("//")
+      buf.write("min:")
+      _PrintSpecsParameters(buf, minspecs)
+      buf.write("/max:")
+      _PrintSpecsParameters(buf, maxspecs)
+
+
 def ConfirmOperation(names, list_type, text, extra=""):
   """Ask the user to confirm an operation on a list of list_type.
 
@@ -3740,9 +3868,9 @@ def _MaybeParseUnit(elements):
   return parsed
 
 
-def _InitIspecsFromOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
-                        ispecs_disk_count, ispecs_disk_size, ispecs_nic_count,
-                        group_ipolicy, allowed_values):
+def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
+                             ispecs_disk_count, ispecs_disk_size,
+                             ispecs_nic_count, group_ipolicy, fill_all):
   try:
     if ispecs_mem_size:
       ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
@@ -3769,7 +3897,8 @@ def _InitIspecsFromOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
   else:
     forced_type = TISPECS_CLUSTER_TYPES
   for specs in ispecs_transposed.values():
-    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
+    assert type(specs) is dict
+    utils.ForceDictType(specs, forced_type)
 
   # then transpose
   ispecs = {
@@ -3782,9 +3911,76 @@ def _InitIspecsFromOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
     for key, val in specs.items(): # {min: .. ,max: .., std: ..}
       assert key in ispecs
       ispecs[key][name] = val
+  minmax_out = {}
   for key in constants.ISPECS_MINMAX_KEYS:
-    ipolicy[constants.ISPECS_MINMAX][key] = ispecs[key]
-  ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
+    if fill_all:
+      minmax_out[key] = \
+        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
+    else:
+      minmax_out[key] = ispecs[key]
+  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
+  if fill_all:
+    ipolicy[constants.ISPECS_STD] = \
+        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
+                         ispecs[constants.ISPECS_STD])
+  else:
+    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
+
+
+def _ParseSpecUnit(spec, keyname):
+  ret = spec.copy()
+  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
+    if k in ret:
+      try:
+        ret[k] = utils.ParseUnit(ret[k])
+      except (TypeError, ValueError, errors.UnitParseError), err:
+        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
+                                    " specs: %s" % (k, ret[k], keyname, err)),
+                                   errors.ECODE_INVAL)
+  return ret
+
+
+def _ParseISpec(spec, keyname, required):
+  ret = _ParseSpecUnit(spec, keyname)
+  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
+  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
+  if required and missing:
+    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
+                               (keyname, utils.CommaJoin(missing)),
+                               errors.ECODE_INVAL)
+  return ret
+
+
+def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
+  ret = None
+  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
+      len(minmax_ispecs[0]) == 1):
+    for (key, spec) in minmax_ispecs[0].items():
+      # This loop is executed exactly once
+      if key in allowed_values and not spec:
+        ret = key
+  return ret
+
+
+def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
+                            group_ipolicy, allowed_values):
+  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
+  if found_allowed is not None:
+    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
+  elif minmax_ispecs is not None:
+    minmax_out = []
+    for mmpair in minmax_ispecs:
+      mmpair_out = {}
+      for (key, spec) in mmpair.items():
+        if key not in constants.ISPECS_MINMAX_KEYS:
+          msg = "Invalid key in bounds instance specifications: %s" % key
+          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+        mmpair_out[key] = _ParseISpec(spec, key, True)
+      minmax_out.append(mmpair_out)
+    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
+  if std_ispecs is not None:
+    assert not group_ipolicy # This is not an option for gnt-group
+    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
 
 
 def CreateIPolicyFromOpts(ispecs_mem_size=None,
@@ -3792,6 +3988,8 @@ def CreateIPolicyFromOpts(ispecs_mem_size=None,
                           ispecs_disk_count=None,
                           ispecs_disk_size=None,
                           ispecs_nic_count=None,
+                          minmax_ispecs=None,
+                          std_ispecs=None,
                           ipolicy_disk_templates=None,
                           ipolicy_vcpu_ratio=None,
                           ipolicy_spindle_ratio=None,
@@ -3803,16 +4001,31 @@ def CreateIPolicyFromOpts(ispecs_mem_size=None,
   @param fill_all: whether for cluster policies we should ensure that
     all values are filled
 
-
   """
+  assert not (fill_all and allowed_values)
+
+  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
+                 ispecs_disk_size or ispecs_nic_count)
+  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
+    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
+                               " together with any --ipolicy-xxx-specs option",
+                               errors.ECODE_INVAL)
 
   ipolicy_out = objects.MakeEmptyIPolicy()
-  _InitIspecsFromOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
-                      ispecs_disk_count, ispecs_disk_size, ispecs_nic_count,
-                      group_ipolicy, allowed_values)
+  if split_specs:
+    assert fill_all
+    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
+                             ispecs_disk_count, ispecs_disk_size,
+                             ispecs_nic_count, group_ipolicy, fill_all)
+  elif (minmax_ispecs is not None or std_ispecs is not None):
+    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
+                            group_ipolicy, allowed_values)
 
   if ipolicy_disk_templates is not None:
-    ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
+    if allowed_values and ipolicy_disk_templates in allowed_values:
+      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
+    else:
+      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
   if ipolicy_vcpu_ratio is not None:
     ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
   if ipolicy_spindle_ratio is not None:
index 88a2e19..d469f82 100644 (file)
@@ -26,6 +26,7 @@
 # W0614: Unused import %s from wildcard import (since we need cli)
 # C0103: Invalid name gnt-cluster
 
+from cStringIO import StringIO
 import os.path
 import time
 import OpenSSL
@@ -178,6 +179,8 @@ def InitCluster(opts, args):
     ispecs_disk_count=opts.ispecs_disk_count,
     ispecs_disk_size=opts.ispecs_disk_size,
     ispecs_nic_count=opts.ispecs_nic_count,
+    minmax_ispecs=opts.ipolicy_bounds_specs,
+    std_ispecs=opts.ipolicy_std_specs,
     ipolicy_disk_templates=opts.ipolicy_disk_templates,
     ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
     ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
@@ -985,11 +988,8 @@ def SetClusterParams(opts, args):
           opts.hv_state or
           opts.enabled_disk_templates or
           opts.disk_state or
-          opts.ispecs_mem_size or
-          opts.ispecs_cpu_count or
-          opts.ispecs_disk_count or
-          opts.ispecs_disk_size or
-          opts.ispecs_nic_count or
+          opts.ipolicy_bounds_specs is not None or
+          opts.ipolicy_std_specs is not None or
           opts.ipolicy_disk_templates is not None or
           opts.ipolicy_vcpu_ratio is not None or
           opts.ipolicy_spindle_ratio is not None):
@@ -1046,11 +1046,8 @@ def SetClusterParams(opts, args):
     utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
 
   ipolicy = CreateIPolicyFromOpts(
-    ispecs_mem_size=opts.ispecs_mem_size,
-    ispecs_cpu_count=opts.ispecs_cpu_count,
-    ispecs_disk_count=opts.ispecs_disk_count,
-    ispecs_disk_size=opts.ispecs_disk_size,
-    ispecs_nic_count=opts.ispecs_nic_count,
+    minmax_ispecs=opts.ipolicy_bounds_specs,
+    std_ispecs=opts.ipolicy_std_specs,
     ipolicy_disk_templates=opts.ipolicy_disk_templates,
     ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
     ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
@@ -1511,6 +1508,26 @@ def Epo(opts, args, cl=None, _on_fn=_EpoOn, _off_fn=_EpoOff,
     return _off_fn(opts, node_list, inst_map)
 
 
+def _GetCreateCommand(info):
+  buf = StringIO()
+  buf.write("gnt-cluster init")
+  PrintIPolicyCommand(buf, info["ipolicy"], False)
+  buf.write(" ")
+  buf.write(info["name"])
+  return buf.getvalue()
+
+
+def ShowCreateCommand(opts, args):
+  """Shows the command that can be used to re-create the cluster.
+
+  Currently it works only for ipolicy specs.
+
+  """
+  cl = GetClient(query=True)
+  result = cl.QueryClusterInfo()
+  ToStdout(_GetCreateCommand(result))
+
+
 commands = {
   "init": (
     InitCluster, [ArgHost(min=1, max=1)],
@@ -1521,8 +1538,8 @@ commands = {
      MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT,
      DEFAULT_IALLOCATOR_OPT, PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT,
      NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT, USE_EXTERNAL_MIP_SCRIPT,
-     DISK_PARAMS_OPT, HV_STATE_OPT, DISK_STATE_OPT, ENABLED_DISK_TEMPLATES_OPT]
-     + INSTANCE_POLICY_OPTS,
+     DISK_PARAMS_OPT, HV_STATE_OPT, DISK_STATE_OPT, ENABLED_DISK_TEMPLATES_OPT,
+     IPOLICY_STD_SPECS_OPT] + INSTANCE_POLICY_OPTS + SPLIT_ISPECS_OPTS,
     "[opts...] <cluster_name>", "Initialises a new cluster configuration"),
   "destroy": (
     DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],
@@ -1600,8 +1617,8 @@ commands = {
      DRBD_HELPER_OPT, NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT,
      RESERVED_LVS_OPT, DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT,
      NODE_PARAMS_OPT, USE_EXTERNAL_MIP_SCRIPT, DISK_PARAMS_OPT, HV_STATE_OPT,
-     DISK_STATE_OPT, SUBMIT_OPT, ENABLED_DISK_TEMPLATES_OPT] +
-    INSTANCE_POLICY_OPTS,
+     DISK_STATE_OPT, SUBMIT_OPT, ENABLED_DISK_TEMPLATES_OPT,
+     IPOLICY_STD_SPECS_OPT] + INSTANCE_POLICY_OPTS,
     "[opts...]",
     "Alters the parameters of the cluster"),
   "renew-crypto": (
@@ -1623,6 +1640,9 @@ commands = {
   "deactivate-master-ip": (
     DeactivateMasterIp, ARGS_NONE, [CONFIRM_OPT], "",
     "Deactivates the master IP"),
+  "show-ispecs-cmd": (
+    ShowCreateCommand, ARGS_NONE, [], "",
+    "Show the command line to re-create the cluster"),
   }
 
 
index 2d11618..5bef440 100644 (file)
@@ -24,6 +24,8 @@
 # W0401: Wildcard import ganeti.cli
 # W0614: Unused import %s from wildcard import (since we need cli)
 
+from cStringIO import StringIO
+
 from ganeti.cli import *
 from ganeti import constants
 from ganeti import opcodes
@@ -48,11 +50,7 @@ def AddGroup(opts, args):
 
   """
   ipolicy = CreateIPolicyFromOpts(
-    ispecs_mem_size=opts.ispecs_mem_size,
-    ispecs_cpu_count=opts.ispecs_cpu_count,
-    ispecs_disk_count=opts.ispecs_disk_count,
-    ispecs_disk_size=opts.ispecs_disk_size,
-    ispecs_nic_count=opts.ispecs_nic_count,
+    minmax_ispecs=opts.ipolicy_bounds_specs,
     ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
     ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
     group_ipolicy=True)
@@ -159,10 +157,9 @@ def SetGroupParams(opts, args):
 
   """
   allmods = [opts.ndparams, opts.alloc_policy, opts.diskparams, opts.hv_state,
-             opts.disk_state, opts.ispecs_mem_size, opts.ispecs_cpu_count,
-             opts.ispecs_disk_count, opts.ispecs_disk_size,
-             opts.ispecs_nic_count, opts.ipolicy_vcpu_ratio,
-             opts.ipolicy_spindle_ratio, opts.diskparams]
+             opts.disk_state, opts.ipolicy_bounds_specs,
+             opts.ipolicy_vcpu_ratio, opts.ipolicy_spindle_ratio,
+             opts.diskparams]
   if allmods.count(None) == len(allmods):
     ToStderr("Please give at least one of the parameters.")
     return 1
@@ -176,26 +173,9 @@ def SetGroupParams(opts, args):
 
   diskparams = dict(opts.diskparams)
 
-  # set the default values
-  to_ipolicy = [
-    opts.ispecs_mem_size,
-    opts.ispecs_cpu_count,
-    opts.ispecs_disk_count,
-    opts.ispecs_disk_size,
-    opts.ispecs_nic_count,
-    ]
-  for ispec in to_ipolicy:
-    for param in ispec:
-      if isinstance(ispec[param], basestring):
-        if ispec[param].lower() == "default":
-          ispec[param] = constants.VALUE_DEFAULT
   # create ipolicy object
   ipolicy = CreateIPolicyFromOpts(
-    ispecs_mem_size=opts.ispecs_mem_size,
-    ispecs_cpu_count=opts.ispecs_cpu_count,
-    ispecs_disk_count=opts.ispecs_disk_count,
-    ispecs_disk_size=opts.ispecs_disk_size,
-    ispecs_nic_count=opts.ispecs_nic_count,
+    minmax_ispecs=opts.ipolicy_bounds_specs,
     ipolicy_disk_templates=opts.ipolicy_disk_templates,
     ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
     ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
@@ -310,6 +290,35 @@ def GroupInfo(_, args):
     ])
 
 
+def _GetCreateCommand(group):
+  (name, ipolicy) = group
+  buf = StringIO()
+  buf.write("gnt-group add")
+  PrintIPolicyCommand(buf, ipolicy, True)
+  buf.write(" ")
+  buf.write(name)
+  return buf.getvalue()
+
+
+def ShowCreateCommand(opts, args):
+  """Shows the command that can be used to re-create a node group.
+
+  Currently it works only for ipolicy specs.
+
+  """
+  cl = GetClient(query=True)
+  selected_fields = ["name"]
+  if opts.include_defaults:
+    selected_fields += ["ipolicy"]
+  else:
+    selected_fields += ["custom_ipolicy"]
+  result = cl.QueryGroups(names=args, fields=selected_fields,
+                          use_locking=False)
+
+  for group in result:
+    ToStdout(_GetCreateCommand(group))
+
+
 commands = {
   "add": (
     AddGroup, ARGS_ONE_GROUP,
@@ -363,6 +372,10 @@ commands = {
   "info": (
     GroupInfo, ARGS_MANY_GROUPS, [], "[<group_name>...]",
     "Show group information"),
+  "show-ispecs-cmd": (
+    ShowCreateCommand, ARGS_MANY_GROUPS, [INCLUDEDEFAULTS_OPT],
+    "[--include-defaults] [<group_name>...]",
+    "Show the command line to re-create a group"),
   }
 
 
index 092fda3..553e645 100644 (file)
@@ -813,20 +813,6 @@ def _GetUpdatedParams(old_params, update_dict,
   return params_copy
 
 
-def _UpdateMinMaxISpecs(ipolicy, new_minmax, group_policy):
-  use_none = use_default = group_policy
-  minmax = ipolicy.setdefault(constants.ISPECS_MINMAX, {})
-  for (key, value) in new_minmax.items():
-    if key not in constants.ISPECS_MINMAX_KEYS:
-      raise errors.OpPrereqError("Invalid key in new ipolicy/%s: %s" %
-                                 (constants.ISPECS_MINMAX, key),
-                                 errors.ECODE_INVAL)
-    old_spec = minmax.get(key, {})
-    minmax[key] = _GetUpdatedParams(old_spec, value, use_none=use_none,
-                                    use_default=use_default)
-    utils.ForceDictType(minmax[key], constants.ISPECS_PARAMETER_TYPES)
-
-
 def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
   """Return the new version of an instance policy.
 
@@ -834,41 +820,45 @@ def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
     we should support removal of policy entries
 
   """
-  use_none = use_default = group_policy
   ipolicy = copy.deepcopy(old_ipolicy)
   for key, value in new_ipolicy.items():
     if key not in constants.IPOLICY_ALL_KEYS:
       raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
                                  errors.ECODE_INVAL)
-    if key == constants.ISPECS_MINMAX:
-      _UpdateMinMaxISpecs(ipolicy, value, group_policy)
-    elif key == constants.ISPECS_STD:
-      ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
-                                       use_none=use_none,
-                                       use_default=use_default)
-      utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
+    if (not value or value == [constants.VALUE_DEFAULT] or
+        value == constants.VALUE_DEFAULT):
+      if group_policy:
+        if key in ipolicy:
+          del ipolicy[key]
+      else:
+        raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
+                                   " on the cluster'" % key,
+                                   errors.ECODE_INVAL)
     else:
-      if (not value or value == [constants.VALUE_DEFAULT] or
-          value == constants.VALUE_DEFAULT):
+      if key in constants.IPOLICY_PARAMETERS:
+        # FIXME: we assume all such values are float
+        try:
+          ipolicy[key] = float(value)
+        except (TypeError, ValueError), err:
+          raise errors.OpPrereqError("Invalid value for attribute"
+                                     " '%s': '%s', error: %s" %
+                                     (key, value, err), errors.ECODE_INVAL)
+      elif key == constants.ISPECS_MINMAX:
+        for minmax in value:
+          for k in minmax.keys():
+            utils.ForceDictType(minmax[k], constants.ISPECS_PARAMETER_TYPES)
+        ipolicy[key] = value
+      elif key == constants.ISPECS_STD:
         if group_policy:
-          del ipolicy[key]
-        else:
-          raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
-                                     " on the cluster'" % key,
-                                     errors.ECODE_INVAL)
+          msg = "%s cannot appear in group instance specs" % key
+          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+        ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
+                                         use_none=False, use_default=False)
+        utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
       else:
-        if key in constants.IPOLICY_PARAMETERS:
-          # FIXME: we assume all such values are float
-          try:
-            ipolicy[key] = float(value)
-          except (TypeError, ValueError), err:
-            raise errors.OpPrereqError("Invalid value for attribute"
-                                       " '%s': '%s', error: %s" %
-                                       (key, value, err), errors.ECODE_INVAL)
-        else:
-          # FIXME: we assume all others are lists; this should be redone
-          # in a nicer way
-          ipolicy[key] = list(value)
+        # FIXME: we assume all others are lists; this should be redone
+        # in a nicer way
+        ipolicy[key] = list(value)
   try:
     objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
   except errors.ConfigurationError, err:
@@ -1288,10 +1278,15 @@ def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
     ret.append("Disk template %s is not allowed (allowed templates: %s)" %
                (disk_template, utils.CommaJoin(allowed_dts)))
 
-  minmax = ipolicy[constants.ISPECS_MINMAX]
-  return ret + filter(None,
-                      (_compute_fn(name, qualifier, minmax, value)
-                       for (name, qualifier, value) in test_settings))
+  min_errs = None
+  for minmax in ipolicy[constants.ISPECS_MINMAX]:
+    errs = filter(None,
+                  (_compute_fn(name, qualifier, minmax, value)
+                   for (name, qualifier, value) in test_settings))
+    if min_errs is None or len(errs) < len(min_errs):
+      min_errs = errs
+  assert min_errs is not None
+  return ret + min_errs
 
 
 def _ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
@@ -7421,6 +7416,7 @@ class LUInstanceStartup(LogicalUnit):
     """
     instance = self.instance
     force = self.op.force
+    reason = self.op.reason
 
     if not self.op.no_remember:
       self.cfg.MarkInstanceUp(instance.name)
@@ -7437,7 +7433,7 @@ class LUInstanceStartup(LogicalUnit):
         self.rpc.call_instance_start(node_current,
                                      (instance, self.op.hvparams,
                                       self.op.beparams),
-                                     self.op.startup_paused)
+                                     self.op.startup_paused, reason)
       msg = result.fail_msg
       if msg:
         _ShutdownInstanceDisks(self, instance)
@@ -7521,7 +7517,8 @@ class LUInstanceReboot(LogicalUnit):
     else:
       if instance_running:
         result = self.rpc.call_instance_shutdown(node_current, instance,
-                                                 self.op.shutdown_timeout)
+                                                 self.op.shutdown_timeout,
+                                                 reason)
         result.Raise("Could not shutdown instance for full reboot")
         _ShutdownInstanceDisks(self, instance)
       else:
@@ -7529,7 +7526,8 @@ class LUInstanceReboot(LogicalUnit):
                      instance.name)
       _StartInstanceDisks(self, instance, ignore_secondaries)
       result = self.rpc.call_instance_start(node_current,
-                                            (instance, None, None), False)
+                                            (instance, None, None), False,
+                                             reason)
       msg = result.fail_msg
       if msg:
         _ShutdownInstanceDisks(self, instance)
@@ -7597,6 +7595,7 @@ class LUInstanceShutdown(LogicalUnit):
     instance = self.instance
     node_current = instance.primary_node
     timeout = self.op.timeout
+    reason = self.op.reason
 
     # If the instance is offline we shouldn't mark it as down, as that
     # resets the offline flag.
@@ -7607,7 +7606,8 @@ class LUInstanceShutdown(LogicalUnit):
       assert self.op.ignore_offline_nodes
       self.LogInfo("Primary node offline, marked instance as stopped")
     else:
-      result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
+      result = self.rpc.call_instance_shutdown(node_current, instance, timeout,
+                                               reason)
       msg = result.fail_msg
       if msg:
         self.LogWarning("Could not shutdown instance: %s", msg)
@@ -8188,7 +8188,8 @@ class LUInstanceRemove(LogicalUnit):
                  instance.name, instance.primary_node)
 
     result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
-                                             self.op.shutdown_timeout)
+                                             self.op.shutdown_timeout,
+                                             self.op.reason)
     msg = result.fail_msg
     if msg:
       if self.op.ignore_failures:
@@ -8553,7 +8554,8 @@ class LUInstanceMove(LogicalUnit):
             self.owned_locks(locking.LEVEL_NODE_RES))
 
     result = self.rpc.call_instance_shutdown(source_node, instance,
-                                             self.op.shutdown_timeout)
+                                             self.op.shutdown_timeout,
+                                             self.op.reason)
     msg = result.fail_msg
     if msg:
       if self.op.ignore_consistency:
@@ -8624,7 +8626,8 @@ class LUInstanceMove(LogicalUnit):
         raise errors.OpExecError("Can't activate the instance's disks")
 
       result = self.rpc.call_instance_start(target_node,
-                                            (instance, None, None), False)
+                                            (instance, None, None), False,
+                                             self.op.reason)
       msg = result.fail_msg
       if msg:
         _ShutdownInstanceDisks(self, instance)
@@ -9326,7 +9329,8 @@ class TLMigrateInstance(Tasklet):
                  instance.name, source_node)
 
     result = self.rpc.call_instance_shutdown(source_node, instance,
-                                             self.shutdown_timeout)
+                                             self.shutdown_timeout,
+                                             self.lu.op.reason)
     msg = result.fail_msg
     if msg:
       if self.ignore_consistency or primary_node.offline:
@@ -9363,7 +9367,7 @@ class TLMigrateInstance(Tasklet):
       self.feedback_fn("* starting the instance on the target node %s" %
                        target_node)
       result = self.rpc.call_instance_start(target_node, (instance, None, None),
-                                            False)
+                                            False, self.lu.op.reason)
       msg = result.fail_msg
       if msg:
         _ShutdownInstanceDisks(self.lu, instance)
@@ -9443,20 +9447,34 @@ def _CreateBlockDevInner(lu, node, instance, device, force_create,
   @type excl_stor: boolean
   @param excl_stor: Whether exclusive_storage is active for the node
 
+  @return: list of created devices
   """
-  if device.CreateOnSecondary():
-    force_create = True
+  created_devices = []
+  try:
+    if device.CreateOnSecondary():
+      force_create = True
 
-  if device.children:
-    for child in device.children:
-      _CreateBlockDevInner(lu, node, instance, child, force_create,
-                           info, force_open, excl_stor)
+    if device.children:
+      for child in device.children:
+        devs = _CreateBlockDevInner(lu, node, instance, child, force_create,
+                                    info, force_open, excl_stor)
+        created_devices.extend(devs)
 
-  if not force_create:
-    return
+    if not force_create:
+      return created_devices
+
+    _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
+                          excl_stor)
+    # The device has been completely created, so there is no point in keeping
+    # its subdevices in the list. We just add the device itself instead.
+    created_devices = [(node, device)]
+    return created_devices
 
-  _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
-                        excl_stor)
+  except errors.DeviceCreationError, e:
+    e.created_devices.extend(created_devices)
+    raise e
+  except errors.OpExecError, e:
+    raise errors.DeviceCreationError(str(e), created_devices)
 
 
 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
@@ -9828,13 +9846,17 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None):
       except errors.OpExecError:
         logging.warning("Creating disk %s for instance '%s' failed",
                         idx, instance.name)
+      except errors.DeviceCreationError, e:
+        logging.warning("Creating disk %s for instance '%s' failed",
+                        idx, instance.name)
+        disks_created.extend(e.created_devices)
         for (node, disk) in disks_created:
           lu.cfg.SetDiskID(disk, node)
           result = lu.rpc.call_blockdev_remove(node, disk)
           if result.fail_msg:
             logging.warning("Failed to remove newly-created disk %s on node %s:"
                             " %s", device, node, result.fail_msg)
-        raise
+        raise errors.OpExecError(e.message)
 
 
 def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
@@ -11295,7 +11317,7 @@ class LUInstanceCreate(LogicalUnit):
       logging.info("Starting instance %s on node %s", instance, pnode_name)
       feedback_fn("* starting instance...")
       result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
-                                            False)
+                                            False, self.op.reason)
       result.Raise("Could not start instance")
 
     return list(iobj.all_nodes)
@@ -14999,7 +15021,8 @@ class LUBackupExport(LogicalUnit):
       # shutdown the instance, but not the disks
       feedback_fn("Shutting down instance %s" % instance.name)
       result = self.rpc.call_instance_shutdown(src_node, instance,
-                                               self.op.shutdown_timeout)
+                                               self.op.shutdown_timeout,
+                                               self.op.reason)
       # TODO: Maybe ignore failures if ignore_remove_failures is set
       result.Raise("Could not shutdown instance %s on"
                    " node %s" % (instance.name, src_node))
@@ -15028,7 +15051,8 @@ class LUBackupExport(LogicalUnit):
           assert not activate_disks
           feedback_fn("Starting instance %s" % instance.name)
           result = self.rpc.call_instance_start(src_node,
-                                                (instance, None, None), False)
+                                                (instance, None, None), False,
+                                                 self.op.reason)
           msg = result.fail_msg
           if msg:
             feedback_fn("Failed to start instance: %s" % msg)
index 6c82991..b968fcf 100644 (file)
@@ -633,7 +633,8 @@ class ConfigWriter:
         result.append("%s has invalid instance policy: %s" % (owner, err))
       for key, value in ipolicy.items():
         if key == constants.ISPECS_MINMAX:
-          _helper_ispecs(owner, "ipolicy/" + key, value)
+          for k in range(len(value)):
+            _helper_ispecs(owner, "ipolicy/%s[%s]" % (key, k), value[k])
         elif key == constants.ISPECS_STD:
           _helper(owner, "ipolicy/" + key, value,
                   constants.ISPECS_PARAMETER_TYPES)
index 2f80c7b..5e29b10 100644 (file)
@@ -2215,7 +2215,7 @@ ISPECS_MINMAX_DEFAULTS = {
     },
   }
 IPOLICY_DEFAULTS = {
-  ISPECS_MINMAX: ISPECS_MINMAX_DEFAULTS,
+  ISPECS_MINMAX: [ISPECS_MINMAX_DEFAULTS],
   ISPECS_STD: {
     ISPEC_MEM_SIZE: 128,
     ISPEC_CPU_COUNT: 1,
index 3a264c3..22b7502 100644 (file)
@@ -209,6 +209,22 @@ class OpResultError(GenericError):
   """
 
 
+class DeviceCreationError(GenericError):
+  """Error during the creation of a device.
+
+  This exception should contain the list of the devices actually created
+  up to now, in the form of pairs (node, device)
+
+  """
+  def __init__(self, message, created_devices):
+    GenericError.__init__(self)
+    self.message = message
+    self.created_devices = created_devices
+
+  def __str__(self):
+    return self.message
+
+
 class OpCodeUnknown(GenericError):
   """Unknown opcode submitted.
 
diff --git a/lib/hooksmaster.py b/lib/hooksmaster.py
new file mode 100644 (file)
index 0000000..9f40087
--- /dev/null
@@ -0,0 +1,270 @@
+#
+#
+
+# Copyright (C) 2006, 2007, 2011, 2012 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+
+"""Module implementing the logic for running hooks.
+
+"""
+
+from ganeti import constants
+from ganeti import errors
+from ganeti import utils
+from ganeti import compat
+from ganeti import pathutils
+
+
+def _RpcResultsToHooksResults(rpc_results):
+  """Function to convert RPC results to the format expected by HooksMaster.
+
+  @type rpc_results: dict(node: L{rpc.RpcResult})
+  @param rpc_results: RPC results
+  @rtype: dict(node: (fail_msg, offline, hooks_results))
+  @return: RPC results unpacked according to the format expected by
+    L({hooksmaster.HooksMaster}
+
+  """
+  return dict((node, (rpc_res.fail_msg, rpc_res.offline, rpc_res.payload))
+              for (node, rpc_res) in rpc_results.items())
+
+
+class HooksMaster(object):
+  def __init__(self, opcode, hooks_path, nodes, hooks_execution_fn,
+               hooks_results_adapt_fn, build_env_fn, log_fn, htype=None,
+               cluster_name=None, master_name=None):
+    """Base class for hooks masters.
+
+    This class invokes the execution of hooks according to the behaviour
+    specified by its parameters.
+
+    @type opcode: string
+    @param opcode: opcode of the operation to which the hooks are tied
+    @type hooks_path: string
+    @param hooks_path: prefix of the hooks directories
+    @type nodes: 2-tuple of lists
+    @param nodes: 2-tuple of lists containing nodes on which pre-hooks must be
+      run and nodes on which post-hooks must be run
+    @type hooks_execution_fn: function that accepts the following parameters:
+      (node_list, hooks_path, phase, environment)
+    @param hooks_execution_fn: function that will execute the hooks; can be
+      None, indicating that no conversion is necessary.
+    @type hooks_results_adapt_fn: function
+    @param hooks_results_adapt_fn: function that will adapt the return value of
+      hooks_execution_fn to the format expected by RunPhase
+    @type build_env_fn: function that returns a dictionary having strings as
+      keys
+    @param build_env_fn: function that builds the environment for the hooks
+    @type log_fn: function that accepts a string
+    @param log_fn: logging function
+    @type htype: string or None
+    @param htype: None or one of L{constants.HTYPE_CLUSTER},
+     L{constants.HTYPE_NODE}, L{constants.HTYPE_INSTANCE}
+    @type cluster_name: string
+    @param cluster_name: name of the cluster
+    @type master_name: string
+    @param master_name: name of the master
+
+    """
+    self.opcode = opcode
+    self.hooks_path = hooks_path
+    self.hooks_execution_fn = hooks_execution_fn
+    self.hooks_results_adapt_fn = hooks_results_adapt_fn
+    self.build_env_fn = build_env_fn
+    self.log_fn = log_fn
+    self.htype = htype
+    self.cluster_name = cluster_name
+    self.master_name = master_name
+
+    self.pre_env = self._BuildEnv(constants.HOOKS_PHASE_PRE)
+    (self.pre_nodes, self.post_nodes) = nodes
+
+  def _BuildEnv(self, phase):
+    """Compute the environment and the target nodes.
+
+    Based on the opcode and the current node list, this builds the
+    environment for the hooks and the target node list for the run.
+
+    """
+    if phase == constants.HOOKS_PHASE_PRE:
+      prefix = "GANETI_"
+    elif phase == constants.HOOKS_PHASE_POST:
+      prefix = "GANETI_POST_"
+    else:
+      raise AssertionError("Unknown phase '%s'" % phase)
+
+    env = {}
+
+    if self.hooks_path is not None:
+      phase_env = self.build_env_fn()
+      if phase_env:
+        assert not compat.any(key.upper().startswith(prefix)
+                              for key in phase_env)
+        env.update(("%s%s" % (prefix, key), value)
+                   for (key, value) in phase_env.items())
+
+    if phase == constants.HOOKS_PHASE_PRE:
+      assert compat.all((key.startswith("GANETI_") and
+                         not key.startswith("GANETI_POST_"))
+                        for key in env)
+
+    elif phase == constants.HOOKS_PHASE_POST:
+      assert compat.all(key.startswith("GANETI_POST_") for key in env)
+      assert isinstance(self.pre_env, dict)
+
+      # Merge with pre-phase environment
+      assert not compat.any(key.startswith("GANETI_POST_")
+                            for key in self.pre_env)
+      env.update(self.pre_env)
+    else:
+      raise AssertionError("Unknown phase '%s'" % phase)
+
+    return env
+
+  def _RunWrapper(self, node_list, hpath, phase, phase_env):
+    """Simple wrapper over self.callfn.
+
+    This method fixes the environment before executing the hooks.
+
+    """
+    env = {
+      "PATH": constants.HOOKS_PATH,
+      "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
+      "GANETI_OP_CODE": self.opcode,
+      "GANETI_DATA_DIR": pathutils.DATA_DIR,
+      "GANETI_HOOKS_PHASE": phase,
+      "GANETI_HOOKS_PATH": hpath,
+      }
+
+    if self.htype:
+      env["GANETI_OBJECT_TYPE"] = self.htype
+
+    if self.cluster_name is not None:
+      env["GANETI_CLUSTER"] = self.cluster_name
+
+    if self.master_name is not None:
+      env["GANETI_MASTER"] = self.master_name
+
+    if phase_env:
+      env = utils.algo.JoinDisjointDicts(env, phase_env)
+
+    # Convert everything to strings
+    env = dict([(str(key), str(val)) for key, val in env.iteritems()])
+
+    assert compat.all(key == "PATH" or key.startswith("GANETI_")
+                      for key in env)
+
+    return self.hooks_execution_fn(node_list, hpath, phase, env)
+
+  def RunPhase(self, phase, nodes=None):
+    """Run all the scripts for a phase.
+
+    This is the main function of the HookMaster.
+    It executes self.hooks_execution_fn, and after running
+    self.hooks_results_adapt_fn on its results it expects them to be in the form
+    {node_name: (fail_msg, [(script, result, output), ...]}).
+
+    @param phase: one of L{constants.HOOKS_PHASE_POST} or
+        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
+    @param nodes: overrides the predefined list of nodes for the given phase
+    @return: the processed results of the hooks multi-node rpc call
+    @raise errors.HooksFailure: on communication failure to the nodes
+    @raise errors.HooksAbort: on failure of one of the hooks
+
+    """
+    if phase == constants.HOOKS_PHASE_PRE:
+      if nodes is None:
+        nodes = self.pre_nodes
+      env = self.pre_env
+    elif phase == constants.HOOKS_PHASE_POST:
+      if nodes is None:
+        nodes = self.post_nodes
+      env = self._BuildEnv(phase)
+    else:
+      raise AssertionError("Unknown phase '%s'" % phase)
+
+    if not nodes:
+      # empty node list, we should not attempt to run this as either
+      # we're in the cluster init phase and the rpc client part can't
+      # even attempt to run, or this LU doesn't do hooks at all
+      return
+
+    results = self._RunWrapper(nodes, self.hooks_path, phase, env)
+    if not results:
+      msg = "Communication Failure"
+      if phase == constants.HOOKS_PHASE_PRE:
+        raise errors.HooksFailure(msg)
+      else:
+        self.log_fn(msg)
+        return results
+
+    converted_res = results
+    if self.hooks_results_adapt_fn:
+      converted_res = self.hooks_results_adapt_fn(results)
+
+    errs = []
+    for node_name, (fail_msg, offline, hooks_results) in converted_res.items():
+      if offline:
+        continue
+
+      if fail_msg:
+        self.log_fn("Communication failure to node %s: %s", node_name, fail_msg)
+        continue
+
+      for script, hkr, output in hooks_results:
+        if hkr == constants.HKR_FAIL:
+          if phase == constants.HOOKS_PHASE_PRE:
+            errs.append((node_name, script, output))
+          else:
+            if not output:
+              output = "(no output)"
+            self.log_fn("On %s script %s failed, output: %s" %
+                        (node_name, script, output))
+
+    if errs and phase == constants.HOOKS_PHASE_PRE:
+      raise errors.HooksAbort(errs)
+
+    return results
+
+  def RunConfigUpdate(self):
+    """Run the special configuration update hook
+
+    This is a special hook that runs only on the master after each
+    top-level LI if the configuration has been updated.
+
+    """
+    phase = constants.HOOKS_PHASE_POST
+    hpath = constants.HOOKS_NAME_CFGUPDATE
+    nodes = [self.master_name]
+    self._RunWrapper(nodes, hpath, phase, self.pre_env)
+
+  @staticmethod
+  def BuildFromLu(hooks_execution_fn, lu):
+    if lu.HPATH is None:
+      nodes = (None, None)
+    else:
+      nodes = map(frozenset, lu.BuildHooksNodes())
+
+    master_name = cluster_name = None
+    if lu.cfg:
+      master_name = lu.cfg.GetMasterNode()
+      cluster_name = lu.cfg.GetClusterName()
+
+    return HooksMaster(lu.op.OP_ID, lu.HPATH, nodes, hooks_execution_fn,
+                       _RpcResultsToHooksResults, lu.BuildHooksEnv,
+                       lu.LogWarning, lu.HTYPE, cluster_name, master_name)
index 594e16e..4498747 100644 (file)
@@ -38,11 +38,11 @@ import traceback
 from ganeti import opcodes
 from ganeti import constants
 from ganeti import errors
+from ganeti import hooksmaster
 from ganeti import cmdlib
 from ganeti import locking
 from ganeti import utils
 from ganeti import compat
-from ganeti import pathutils
 
 
 _OP_PREFIX = "Op"
@@ -245,20 +245,6 @@ def _FailingSubmitManyJobs(_):
                                " queries) can not submit jobs")
 
 
-def _RpcResultsToHooksResults(rpc_results):
-  """Function to convert RPC results to the format expected by HooksMaster.
-
-  @type rpc_results: dict(node: L{rpc.RpcResult})
-  @param rpc_results: RPC results
-  @rtype: dict(node: (fail_msg, offline, hooks_results))
-  @return: RPC results unpacked according to the format expected by
-    L({mcpu.HooksMaster}
-
-  """
-  return dict((node, (rpc_res.fail_msg, rpc_res.offline, rpc_res.payload))
-              for (node, rpc_res) in rpc_results.items())
-
-
 def _VerifyLocks(lu, glm, _mode_whitelist=_NODE_ALLOC_MODE_WHITELIST,
                  _nal_whitelist=_NODE_ALLOC_WHITELIST):
   """Performs consistency checks on locks acquired by a logical unit.
@@ -314,7 +300,7 @@ class Processor(object):
     self._ec_id = ec_id
     self._cbs = None
     self.rpc = context.rpc
-    self.hmclass = HooksMaster
+    self.hmclass = hooksmaster.HooksMaster
     self._enable_locks = enable_locks
 
   def _CheckLocksEnabled(self):
@@ -603,229 +589,3 @@ class Processor(object):
       raise errors.ProgrammerError("Tried to use execution context id when"
                                    " not set")
     return self._ec_id
-
-
-class HooksMaster(object):
-  def __init__(self, opcode, hooks_path, nodes, hooks_execution_fn,
-               hooks_results_adapt_fn, build_env_fn, log_fn, htype=None,
-               cluster_name=None, master_name=None):
-    """Base class for hooks masters.
-
-    This class invokes the execution of hooks according to the behaviour
-    specified by its parameters.
-
-    @type opcode: string
-    @param opcode: opcode of the operation to which the hooks are tied
-    @type hooks_path: string
-    @param hooks_path: prefix of the hooks directories
-    @type nodes: 2-tuple of lists
-    @param nodes: 2-tuple of lists containing nodes on which pre-hooks must be
-      run and nodes on which post-hooks must be run
-    @type hooks_execution_fn: function that accepts the following parameters:
-      (node_list, hooks_path, phase, environment)
-    @param hooks_execution_fn: function that will execute the hooks; can be
-      None, indicating that no conversion is necessary.
-    @type hooks_results_adapt_fn: function
-    @param hooks_results_adapt_fn: function that will adapt the return value of
-      hooks_execution_fn to the format expected by RunPhase
-    @type build_env_fn: function that returns a dictionary having strings as
-      keys
-    @param build_env_fn: function that builds the environment for the hooks
-    @type log_fn: function that accepts a string
-    @param log_fn: logging function
-    @type htype: string or None
-    @param htype: None or one of L{constants.HTYPE_CLUSTER},
-     L{constants.HTYPE_NODE}, L{constants.HTYPE_INSTANCE}
-    @type cluster_name: string
-    @param cluster_name: name of the cluster
-    @type master_name: string
-    @param master_name: name of the master
-
-    """
-    self.opcode = opcode
-    self.hooks_path = hooks_path
-    self.hooks_execution_fn = hooks_execution_fn
-    self.hooks_results_adapt_fn = hooks_results_adapt_fn
-    self.build_env_fn = build_env_fn
-    self.log_fn = log_fn
-    self.htype = htype
-    self.cluster_name = cluster_name
-    self.master_name = master_name
-
-    self.pre_env = self._BuildEnv(constants.HOOKS_PHASE_PRE)
-    (self.pre_nodes, self.post_nodes) = nodes
-
-  def _BuildEnv(self, phase):
-    """Compute the environment and the target nodes.
-
-    Based on the opcode and the current node list, this builds the
-    environment for the hooks and the target node list for the run.
-
-    """
-    if phase == constants.HOOKS_PHASE_PRE:
-      prefix = "GANETI_"
-    elif phase == constants.HOOKS_PHASE_POST:
-      prefix = "GANETI_POST_"
-    else:
-      raise AssertionError("Unknown phase '%s'" % phase)
-
-    env = {}
-
-    if self.hooks_path is not None:
-      phase_env = self.build_env_fn()
-      if phase_env:
-        assert not compat.any(key.upper().startswith(prefix)
-                              for key in phase_env)
-        env.update(("%s%s" % (prefix, key), value)
-                   for (key, value) in phase_env.items())
-
-    if phase == constants.HOOKS_PHASE_PRE:
-      assert compat.all((key.startswith("GANETI_") and
-                         not key.startswith("GANETI_POST_"))
-                        for key in env)
-
-    elif phase == constants.HOOKS_PHASE_POST:
-      assert compat.all(key.startswith("GANETI_POST_") for key in env)
-      assert isinstance(self.pre_env, dict)
-
-      # Merge with pre-phase environment
-      assert not compat.any(key.startswith("GANETI_POST_")
-                            for key in self.pre_env)
-      env.update(self.pre_env)
-    else:
-      raise AssertionError("Unknown phase '%s'" % phase)
-
-    return env
-
-  def _RunWrapper(self, node_list, hpath, phase, phase_env):
-    """Simple wrapper over self.callfn.
-
-    This method fixes the environment before executing the hooks.
-
-    """
-    env = {
-      "PATH": constants.HOOKS_PATH,
-      "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
-      "GANETI_OP_CODE": self.opcode,
-      "GANETI_DATA_DIR": pathutils.DATA_DIR,
-      "GANETI_HOOKS_PHASE": phase,
-      "GANETI_HOOKS_PATH": hpath,
-      }
-
-    if self.htype:
-      env["GANETI_OBJECT_TYPE"] = self.htype
-
-    if self.cluster_name is not None:
-      env["GANETI_CLUSTER"] = self.cluster_name
-
-    if self.master_name is not None:
-      env["GANETI_MASTER"] = self.master_name
-
-    if phase_env:
-      env = utils.algo.JoinDisjointDicts(env, phase_env)
-
-    # Convert everything to strings
-    env = dict([(str(key), str(val)) for key, val in env.iteritems()])
-
-    assert compat.all(key == "PATH" or key.startswith("GANETI_")
-                      for key in env)
-
-    return self.hooks_execution_fn(node_list, hpath, phase, env)
-
-  def RunPhase(self, phase, nodes=None):
-    """Run all the scripts for a phase.
-
-    This is the main function of the HookMaster.
-    It executes self.hooks_execution_fn, and after running
-    self.hooks_results_adapt_fn on its results it expects them to be in the form
-    {node_name: (fail_msg, [(script, result, output), ...]}).
-
-    @param phase: one of L{constants.HOOKS_PHASE_POST} or
-        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
-    @param nodes: overrides the predefined list of nodes for the given phase
-    @return: the processed results of the hooks multi-node rpc call
-    @raise errors.HooksFailure: on communication failure to the nodes
-    @raise errors.HooksAbort: on failure of one of the hooks
-
-    """
-    if phase == constants.HOOKS_PHASE_PRE:
-      if nodes is None:
-        nodes = self.pre_nodes
-      env = self.pre_env
-    elif phase == constants.HOOKS_PHASE_POST:
-      if nodes is None:
-        nodes = self.post_nodes
-      env = self._BuildEnv(phase)
-    else:
-      raise AssertionError("Unknown phase '%s'" % phase)
-
-    if not nodes:
-      # empty node list, we should not attempt to run this as either
-      # we're in the cluster init phase and the rpc client part can't
-      # even attempt to run, or this LU doesn't do hooks at all
-      return
-
-    results = self._RunWrapper(nodes, self.hooks_path, phase, env)
-    if not results:
-      msg = "Communication Failure"
-      if phase == constants.HOOKS_PHASE_PRE:
-        raise errors.HooksFailure(msg)
-      else:
-        self.log_fn(msg)
-        return results
-
-    converted_res = results
-    if self.hooks_results_adapt_fn:
-      converted_res = self.hooks_results_adapt_fn(results)
-
-    errs = []
-    for node_name, (fail_msg, offline, hooks_results) in converted_res.items():
-      if offline:
-        continue
-
-      if fail_msg:
-        self.log_fn("Communication failure to node %s: %s", node_name, fail_msg)
-        continue
-
-      for script, hkr, output in hooks_results:
-        if hkr == constants.HKR_FAIL:
-          if phase == constants.HOOKS_PHASE_PRE:
-            errs.append((node_name, script, output))
-          else:
-            if not output:
-              output = "(no output)"
-            self.log_fn("On %s script %s failed, output: %s" %
-                        (node_name, script, output))
-
-    if errs and phase == constants.HOOKS_PHASE_PRE:
-      raise errors.HooksAbort(errs)
-
-    return results
-
-  def RunConfigUpdate(self):
-    """Run the special configuration update hook
-
-    This is a special hook that runs only on the master after each
-    top-level LI if the configuration has been updated.
-
-    """
-    phase = constants.HOOKS_PHASE_POST
-    hpath = constants.HOOKS_NAME_CFGUPDATE
-    nodes = [self.master_name]
-    self._RunWrapper(nodes, hpath, phase, self.pre_env)
-
-  @staticmethod
-  def BuildFromLu(hooks_execution_fn, lu):
-    if lu.HPATH is None:
-      nodes = (None, None)
-    else:
-      nodes = map(frozenset, lu.BuildHooksNodes())
-
-    master_name = cluster_name = None
-    if lu.cfg:
-      master_name = lu.cfg.GetMasterNode()
-      cluster_name = lu.cfg.GetClusterName()
-
-    return HooksMaster(lu.op.OP_ID, lu.HPATH, nodes, hooks_execution_fn,
-                       _RpcResultsToHooksResults, lu.BuildHooksEnv,
-                       lu.LogWarning, lu.HTYPE, cluster_name, master_name)
index 9be79ac..8d809c4 100644 (file)
@@ -82,35 +82,17 @@ def FillDict(defaults_dict, custom_dict, skip_keys=None):
   return ret_dict
 
 
-def _FillMinMaxISpecs(default_specs, custom_specs):
-  assert frozenset(default_specs.keys()) == constants.ISPECS_MINMAX_KEYS
-  ret_specs = {}
-  for key in constants.ISPECS_MINMAX_KEYS:
-    ret_specs[key] = FillDict(default_specs[key],
-                              custom_specs.get(key, {}))
-  return ret_specs
-
-
 def FillIPolicy(default_ipolicy, custom_ipolicy):
   """Fills an instance policy with defaults.
 
   """
   assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
-  ret_dict = {}
-  # Instance specs
-  new_mm = _FillMinMaxISpecs(default_ipolicy[constants.ISPECS_MINMAX],
-                             custom_ipolicy.get(constants.ISPECS_MINMAX, {}))
-  ret_dict[constants.ISPECS_MINMAX] = new_mm
-  new_std = FillDict(default_ipolicy[constants.ISPECS_STD],
-                     custom_ipolicy.get(constants.ISPECS_STD, {}))
-  ret_dict[constants.ISPECS_STD] = new_std
-  # list items
-  for key in [constants.IPOLICY_DTS]:
-    ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key]))
-  # other items which we know we can directly copy (immutables)
-  for key in constants.IPOLICY_PARAMETERS:
-    ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key])
-
+  ret_dict = copy.deepcopy(custom_ipolicy)
+  for key in default_ipolicy:
+    if key not in ret_dict:
+      ret_dict[key] = copy.deepcopy(default_ipolicy[key])
+    elif key == constants.ISPECS_STD:
+      ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
   return ret_dict
 
 
@@ -198,13 +180,7 @@ def MakeEmptyIPolicy():
   """Create empty IPolicy dictionary.
 
   """
-  return {
-    constants.ISPECS_MINMAX: {
-      constants.ISPECS_MIN: {},
-      constants.ISPECS_MAX: {},
-      },
-    constants.ISPECS_STD: {},
-    }
+  return {}
 
 
 class ConfigObject(outils.ValidatedSlots):
@@ -942,14 +918,7 @@ class InstancePolicy(ConfigObject):
     @raise errors.ConfigurationError: when the policy is not legal
 
     """
-    if constants.ISPECS_MINMAX in ipolicy:
-      if check_std and constants.ISPECS_STD not in ipolicy:
-        msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
-        raise errors.ConfigurationError(msg)
-      minmaxspecs = ipolicy[constants.ISPECS_MINMAX]
-      stdspec = ipolicy.get(constants.ISPECS_STD)
-      for param in constants.ISPECS_PARAMETERS:
-        InstancePolicy.CheckISpecSyntax(minmaxspecs, stdspec, param, check_std)
+    InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
     if constants.IPOLICY_DTS in ipolicy:
       InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
     for key in constants.IPOLICY_PARAMETERS:
@@ -961,7 +930,57 @@ class InstancePolicy(ConfigObject):
                                       utils.CommaJoin(wrong_keys))
 
   @classmethod
-  def CheckISpecSyntax(cls, minmaxspecs, stdspec, name, check_std):
+  def _CheckIncompleteSpec(cls, spec, keyname):
+    missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
+    if missing_params:
+      msg = ("Missing instance specs parameters for %s: %s" %
+             (keyname, utils.CommaJoin(missing_params)))
+      raise errors.ConfigurationError(msg)
+
+  @classmethod
+  def CheckISpecSyntax(cls, ipolicy, check_std):
+    """Check the instance policy specs for validity.
+
+    @type ipolicy: dict
+    @param ipolicy: dictionary with min/max/std specs
+    @type check_std: bool
+    @param check_std: Whether to check std value or just assume compliance
+    @raise errors.ConfigurationError: when specs are not valid
+
+    """
+    if constants.ISPECS_MINMAX not in ipolicy:
+      # Nothing to check
+      return
+
+    if check_std and constants.ISPECS_STD not in ipolicy:
+      msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
+      raise errors.ConfigurationError(msg)
+    stdspec = ipolicy.get(constants.ISPECS_STD)
+    if check_std:
+      InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
+
+    if not ipolicy[constants.ISPECS_MINMAX]:
+      raise errors.ConfigurationError("Empty minmax specifications")
+    std_is_good = False
+    for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
+      missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
+      if missing:
+        msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
+        raise errors.ConfigurationError(msg)
+      for (key, spec) in minmaxspecs.items():
+        InstancePolicy._CheckIncompleteSpec(spec, key)
+
+      spec_std_ok = True
+      for param in constants.ISPECS_PARAMETERS:
+        par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
+                                                           param, check_std)
+        spec_std_ok = spec_std_ok and par_std_ok
+      std_is_good = std_is_good or spec_std_ok
+    if not std_is_good:
+      raise errors.ConfigurationError("Invalid std specifications")
+
+  @classmethod
+  def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
     """Check the instance policy specs for validity on a given key.
 
     We check if the instance specs makes sense for a given key, that is
@@ -975,34 +994,27 @@ class InstancePolicy(ConfigObject):
     @param name: what are the limits for
     @type check_std: bool
     @param check_std: Whether to check std value or just assume compliance
-    @raise errors.ConfigurationError: when specs for the given name are not
-        valid
+    @rtype: bool
+    @return: C{True} when specs are valid, C{False} when standard spec for the
+        given name is not valid
+    @raise errors.ConfigurationError: when min/max specs for the given name
+        are not valid
 
     """
-    missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
-    if missing:
-      msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
-      raise errors.ConfigurationError(msg)
-
     minspec = minmaxspecs[constants.ISPECS_MIN]
     maxspec = minmaxspecs[constants.ISPECS_MAX]
-    min_v = minspec.get(name, 0)
+    min_v = minspec[name]
+    max_v = maxspec[name]
 
-    if check_std:
+    if min_v > max_v:
+      err = ("Invalid specification of min/max values for %s: %s/%s" %
+             (name, min_v, max_v))
+      raise errors.ConfigurationError(err)
+    elif check_std:
       std_v = stdspec.get(name, min_v)
-      std_msg = std_v
+      return std_v >= min_v and std_v <= max_v
     else:
-      std_v = min_v
-      std_msg = "-"
-
-    max_v = maxspec.get(name, std_v)
-    if min_v > std_v or std_v > max_v:
-      err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
-             (name,
-              minspec.get(name, "-"),
-              maxspec.get(name, "-"),
-              std_msg))
-      raise errors.ConfigurationError(err)
+      return True
 
   @classmethod
   def CheckDiskTemplates(cls, disk_templates):
index ace4d1e..eb3c21e 100644 (file)
@@ -1031,7 +1031,7 @@ class GanetiRapiClient(object): # pylint: disable=R0904
                               (GANETI_RAPI_VERSION, instance)), query, None)
 
   def ShutdownInstance(self, instance, dry_run=False, no_remember=False,
-                       **kwargs):
+                       reason=None, **kwargs):
     """Shuts down an instance.
 
     @type instance: str
@@ -1040,6 +1040,8 @@ class GanetiRapiClient(object): # pylint: disable=R0904
     @param dry_run: whether to perform a dry run
     @type no_remember: bool
     @param no_remember: if true, will not record the state change
+    @type reason: string
+    @param reason: the reason for the shutdown
     @rtype: string
     @return: job id
 
@@ -1049,12 +1051,14 @@ class GanetiRapiClient(object): # pylint: disable=R0904
 
     _AppendDryRunIf(query, dry_run)
     _AppendIf(query, no_remember, ("no_remember", 1))
+    _AppendIf(query, reason, ("reason", reason))
 
     return self._SendRequest(HTTP_PUT,
                              ("/%s/instances/%s/shutdown" %
                               (GANETI_RAPI_VERSION, instance)), query, body)
 
-  def StartupInstance(self, instance, dry_run=False, no_remember=False):
+  def StartupInstance(self, instance, dry_run=False, no_remember=False,
+                      reason=None):
     """Starts up an instance.
 
     @type instance: str
@@ -1063,6 +1067,8 @@ class GanetiRapiClient(object): # pylint: disable=R0904
     @param dry_run: whether to perform a dry run
     @type no_remember: bool
     @param no_remember: if true, will not record the state change
+    @type reason: string
+    @param reason: the reason for the startup
     @rtype: string
     @return: job id
 
@@ -1070,6 +1076,7 @@ class GanetiRapiClient(object): # pylint: disable=R0904
     query = []
     _AppendDryRunIf(query, dry_run)
     _AppendIf(query, no_remember, ("no_remember", 1))
+    _AppendIf(query, reason, ("reason", reason))
 
     return self._SendRequest(HTTP_PUT,
                              ("/%s/instances/%s/startup" %
index bf3cbe8..c6f9a09 100644 (file)
@@ -235,6 +235,7 @@ _INSTANCE_CALLS = [
   ("instance_shutdown", SINGLE, None, constants.RPC_TMO_NORMAL, [
     ("instance", ED_INST_DICT, "Instance object"),
     ("timeout", None, None),
+    ("reason", None, "The reason for the shutdown"),
     ], None, None, "Stops an instance"),
   ("instance_balloon_memory", SINGLE, None, constants.RPC_TMO_NORMAL, [
     ("instance", ED_INST_DICT, "Instance object"),
@@ -278,6 +279,7 @@ _INSTANCE_CALLS = [
   ("instance_start", SINGLE, None, constants.RPC_TMO_NORMAL, [
     ("instance_hvp_bep", ED_INST_DICT_HVP_BEP_DP, None),
     ("startup_paused", None, None),
+    ("reason", None, "The reason for the startup"),
     ], None, None, "Starts an instance"),
   ("instance_os_add", SINGLE, None, constants.RPC_TMO_1DAY, [
     ("instance_osp", ED_INST_DICT_OSP_DP, None),
index c81352b..a7d7a88 100644 (file)
@@ -589,16 +589,19 @@ class NodeRequestHandler(http.server.HttpServerHandler):
     """
     instance = objects.Instance.FromDict(params[0])
     timeout = params[1]
-    return backend.InstanceShutdown(instance, timeout)
+    trail = params[2]
+    _extendReasonTrail(trail, "shutdown")
+    return backend.InstanceShutdown(instance, timeout, trail)
 
   @staticmethod
   def perspective_instance_start(params):
     """Start an instance.
 
     """
-    (instance_name, startup_paused) = params
+    (instance_name, startup_paused, trail) = params
     instance = objects.Instance.FromDict(instance_name)
-    return backend.StartInstance(instance, startup_paused)
+    _extendReasonTrail(trail, "start")
+    return backend.StartInstance(instance, startup_paused, trail)
 
   @staticmethod
   def perspective_migration_info(params):
index e75a9aa..4458e2b 100644 (file)
@@ -152,6 +152,14 @@ Passing the ``--roman`` option gnt-cluster info will try to print
 its integer fields in a latin friendly way. This allows further
 diffusion of Ganeti among ancient cultures.
 
+SHOW-ISPECS-CMD
+~~~~~~~~~~~~~~~
+
+**show-ispecs-cmd**
+
+Shows the command line that can be used to recreate the cluster with the
+same options relative to specs in the instance policies.
+
 INIT
 ~~~~
 
@@ -182,7 +190,11 @@ INIT
 | [\--specs-disk-size *spec-param*=*value* [,*spec-param*=*value*...]]
 | [\--specs-mem-size *spec-param*=*value* [,*spec-param*=*value*...]]
 | [\--specs-nic-count *spec-param*=*value* [,*spec-param*=*value*...]]
+| [\--ipolicy-std-specs *spec*=*value* [,*spec*=*value*...]]
+| [\--ipolicy-bounds-specs *bounds_ispecs*]
 | [\--ipolicy-disk-templates *template* [,*template*...]]
+| [\--ipolicy-spindle-ratio *ratio*]
+| [\--ipolicy-vcpu-ratio *ratio*]
 | [\--disk-state *diskstate*]
 | [\--hypervisor-state *hvstate*]
 | [\--drbd-usermode-helper *helper*]
@@ -218,7 +230,11 @@ The ``--vg-name`` option will let you specify a volume group
 different than "xenvg" for Ganeti to use when creating instance
 disks. This volume group must have the same name on all nodes. Once
 the cluster is initialized this can be altered by using the
-**modify** command. If you don't want to use lvm storage at all use
+**modify** command. Note that if the volume group name is modified after
+the cluster creation and DRBD support is enabled you might have to
+manually modify the metavg as well.
+
+If you don't want to use lvm storage at all use
 the ``--enabled-disk-template`` option to restrict the set of enabled
 disk templates. Once the cluster is initialized
 you can change this setup with the **modify** command.
@@ -484,14 +500,26 @@ The ``-C (--candidate-pool-size)`` option specifies the
 that the master will try to keep as master\_candidates. For more
 details about this role and other node roles, see the **ganeti**\(7).
 
-The ``--specs-...`` and ``--ipolicy-disk-templates`` options specify
-instance policy on the cluster. For the ``--specs-...`` options, each
-option can have three values: ``min``, ``max`` and ``std``, which can
-also be modified on group level (except for ``std``, which is defined
-once for the entire cluster). Please note, that ``std`` values are not
-the same as defaults set by ``--beparams``, but they are used for the
-capacity calculations. The ``--ipolicy-disk-templates`` option takes a
-comma-separated list of disk templates.
+The ``--specs-...`` and ``--ipolicy-...`` options specify the instance
+policy on the cluster. The ``--ipolicy-bounds-specs`` option sets the
+minimum and maximum specifications for instances. The format is:
+min:*param*=*value*,.../max:*param*=*value*,... and further
+specifications pairs can be added by using ``//`` as a separator. The
+``--ipolicy-std-specs`` option takes a list of parameter/value pairs.
+For both options, *param* can be:
+
+- ``cpu-count``: number of VCPUs for an instance
+- ``disk-count``: number of disk for an instance
+- ``disk-size``: size of each disk
+- ``memory-size``: instance memory
+- ``nic-count``: number of network interface
+- ``spindle-use``: spindle usage for an instance
+
+For the ``--specs-...`` options, each option can have three values:
+``min``, ``max`` and ``std``, which can also be modified on group level
+(except for ``std``, which is defined once for the entire cluster).
+Please note, that ``std`` values are not the same as defaults set by
+``--beparams``, but they are used for the capacity calculations.
 
 - ``--specs-cpu-count`` limits the number of VCPUs that can be used by an
   instance.
@@ -499,7 +527,18 @@ comma-separated list of disk templates.
 - ``--specs-disk-size`` limits the disk size for every disk used
 - ``--specs-mem-size`` limits the amount of memory available
 - ``--specs-nic-count`` sets limits on the number of NICs used
+
+The ``--ipolicy-disk-templates`` and ``--ipolicy-spindle-ratio`` options
+take a decimal number. The ``--ipolicy-disk-templates`` option takes a
+comma-separated list of disk templates.
+
 - ``--ipolicy-disk-templates`` limits the allowed disk templates
+- ``--ipolicy-spindle-ratio`` limits the instances-spindles ratio
+- ``--ipolicy-vcpu-ratio`` limits the vcpu-cpu ratio
+
+All the instance policy elements can be overridden at group level. Group
+level overrides can be removed by specifying ``default`` as the value of
+an item.
 
 The ``--drbd-usermode-helper`` option can be used to specify a usermode
 helper. Check that this string is the one used by the DRBD kernel.
@@ -574,12 +613,11 @@ MODIFY
 | [\--use-external-mip-script {yes \| no}]
 | [\--hypervisor-state *hvstate*]
 | [\--disk-state *diskstate*]
-| [\--specs-cpu-count *spec-param*=*value* [,*spec-param*=*value*...]]
-| [\--specs-disk-count *spec-param*=*value* [,*spec-param*=*value*...]]
-| [\--specs-disk-size *spec-param*=*value* [,*spec-param*=*value*...]]
-| [\--specs-mem-size *spec-param*=*value* [,*spec-param*=*value*...]]
-| [\--specs-nic-count *spec-param*=*value* [,*spec-param*=*value*...]]
+| [\--ipolicy-std-specs *spec*=*value* [,*spec*=*value*...]]
+| [\--ipolicy-bounds-specs *bounds_ispecs*]
 | [\--ipolicy-disk-templates *template* [,*template*...]]
+| [\--ipolicy-spindle-ratio *ratio*]
+| [\--ipolicy-vcpu-ratio *ratio*]
 | [\--enabled-disk-templates *template* [,*template*...]]
 | [\--drbd-usermode-helper *helper*]
 
@@ -617,8 +655,7 @@ The ``-I (--default-iallocator)`` is described in the **init**
 command. To clear the default iallocator, just pass an empty string
 ('').
 
-The ``--specs-...`` and ``--ipolicy-disk-templates`` options are
-described in the **init** command.
+The ``--ipolicy-...`` options are described in the **init** command.
 
 See **ganeti**\(7) for a description of ``--submit`` and other common
 options.
@@ -693,24 +730,24 @@ RENEW-CRYPTO
 This command will stop all Ganeti daemons in the cluster and start
 them again once the new certificates and keys are replicated. The
 options ``--new-cluster-certificate`` and ``--new-confd-hmac-key``
-can be used to regenerate the cluster-internal SSL certificate
-respective the HMAC key used by **ganeti-confd**\(8).
+can be used to regenerate respectively the cluster-internal SSL
+certificate and the HMAC key used by **ganeti-confd**\(8).
 
 To generate a new self-signed RAPI certificate (used by
 **ganeti-rapi**\(8)) specify ``--new-rapi-certificate``. If you want to
 use your own certificate, e.g. one signed by a certificate
 authority (CA), pass its filename to ``--rapi-certificate``.
 
-To generate a new self-signed SPICE certificate, used by SPICE
+To generate a new self-signed SPICE certificate, used for SPICE
 connections to the KVM hypervisor, specify the
 ``--new-spice-certificate`` option. If you want to provide a
 certificate, pass its filename to ``--spice-certificate`` and pass the
 signing CA certificate to ``--spice-ca-certificate``.
 
-``--new-cluster-domain-secret`` generates a new, random cluster
-domain secret. ``--cluster-domain-secret`` reads the secret from a
-file. The cluster domain secret is used to sign information
-exchanged between separate clusters via a third party.
+Finally ``--new-cluster-domain-secret`` generates a new, random
+cluster domain secret, and ``--cluster-domain-secret`` reads the
+secret from a file. The cluster domain secret is used to sign
+information exchanged between separate clusters via a third party.
 
 REPAIR-DISK-SIZES
 ~~~~~~~~~~~~~~~~~
index bbb775d..12b4c84 100644 (file)
@@ -27,12 +27,10 @@ ADD
 | [\--node-parameters=*NDPARAMS*]
 | [\--alloc-policy=*POLICY*]
 | [{-D|\--disk-parameters} *disk-template*:*disk-param*=*value*[,*disk-param*=*value*...]]
-| [\--specs-cpu-count *spec-param*=*value* [,*spec-param*=*value*...]]
-| [\--specs-disk-count *spec-param*=*value* [,*spec-param*=*value*...]]
-| [\--specs-disk-size *spec-param*=*value* [,*spec-param*=*value*...]]
-| [\--specs-mem-size *spec-param*=*value* [,*spec-param*=*value*...]]
-| [\--specs-nic-count *spec-param*=*value* [,*spec-param*=*value*...]]
+| [\--ipolicy-bounds-specs *bound_ispecs*]
 | [\--ipolicy-disk-templates *template* [,*template*...]]
+| [\--ipolicy-spindle-ratio *ratio*]
+| [\--ipolicy-vcpu-ratio *ratio*]
 | [\--disk-state *diskstate*]
 | [\--hypervisor-state *hvstate*]
 | {*group*}
@@ -67,9 +65,8 @@ parameters for the node group; please see the section about
 **gnt-cluster add** in **gnt-cluster**\(8) for more information about
 disk parameters
 
-The ``--specs-...`` and ``--ipolicy-disk-templates`` options specify
-instance policies on the node group, and are documented in the
-**gnt-cluster**\(8) man page.
+The ``--ipolicy-...`` options specify instance policies on the node
+group, and are documented in the **gnt-cluster**\(8) man page.
 
 See **ganeti**\(7) for a description of ``--submit`` and other common
 options.
@@ -102,12 +99,10 @@ MODIFY
 | [\--hypervisor-state *hvstate*]
 | [{-D|\--disk-parameters} *disk-template*:*disk-param*=*value*[,*disk-param*=*value*...]]
 | [\--disk-state *diskstate*]
-| [\--specs-cpu-count *spec-param*=*value* [,*spec-param*=*value*...]]
-| [\--specs-disk-count *spec-param*=*value* [,*spec-param*=*value*...]]
-| [\--specs-disk-size *spec-param*=*value* [,*spec-param*=*value*...]]
-| [\--specs-mem-size *spec-param*=*value* [,*spec-param*=*value*...]]
-| [\--specs-nic-count *spec-param*=*value* [,*spec-param*=*value*...]]
+| [\--ipolicy-bounds-specs *bound_ispecs*]
 | [\--ipolicy-disk-templates *template* [,*template*...]]
+| [\--ipolicy-spindle-ratio *ratio*]
+| [\--ipolicy-vcpu-ratio *ratio*]
 | {*group*}
 
 Modifies some parameters from the node group.
@@ -120,9 +115,8 @@ The ``--node-parameters``, ``--alloc-policy``, ``-D
 (--disk-parameters)`` options are documented in the **add** command
 above.
 
-The ``--specs-...`` and ``--ipolicy-disk-templates`` options specify
-instance policies on the node group, and are documented in the
-**gnt-cluster**\(8) man page.
+The ``--ipolicy-...`` options specify instance policies on the node
+group, and are documented in the **gnt-cluster**\(8) man page.
 
 See **ganeti**\(7) for a description of ``--submit`` and other common
 options.
@@ -251,10 +245,23 @@ be interpreted as stdin.
 INFO
 ~~~~
 
-**info** [group...]
+**info** [*group*...]
 
 Shows config information for all (or given) groups.
 
+SHOW-ISPECS-CMD
+~~~~~~~~~~~~~~~
+
+**show-ispecs-cmd** [\--include-defaults] [*group*...]
+
+Shows the command line that can be used to recreate the given groups (or
+all groups, if none is given) with the same options relative to specs in
+the instance policies.
+
+If ``--include-defaults`` is specified, include also the default values
+(i.e. the cluster-level settings), and not only the configuration items
+that a group overrides.
+
 
 .. vim: set textwidth=72 :
 .. Local Variables:
index 85e8f52..22b67bb 100644 (file)
@@ -153,24 +153,28 @@ support all options. Some common options are:
   other backends must be selected. The option is described in the man
   page **htools**\(1).
 
-  The file should contain text data, line-based, with two empty lines
+  The file should contain text data, line-based, with single empty lines
   separating sections. The lines themselves are column-based, with the
   pipe symbol (``|``) acting as separator.
 
-  The first section contains group data, with two columns:
+  The first section contains group data, with the following columns:
 
   - group name
   - group uuid
+  - allocation policy
+  - tags (separated by comma)
 
   The second sections contains node data, with the following columns:
 
   - node name
   - node total memory
+  - memory used by the node
   - node free memory
   - node total disk
   - node free disk
   - node physical cores
-  - offline field (as ``Y`` or ``N``)
+  - offline/role field (``Y`` for offline nodes, ``N`` for online non-master
+    nodes, and ``M`` for the master node which is always online)
   - group UUID
   - node spindle count
 
@@ -194,8 +198,10 @@ support all options. Some common options are:
   groups, in the following format (separated by ``|``):
 
   - owner (empty if cluster, group name otherwise)
-  - standard, min, max instance specs, containing the following values
-    separated by commas:
+  - standard, min, max instance specs; min and max instance specs are
+    separated between them by a semicolon, and can be specified multiple
+    times (min;max;min;max...); each of the specs contains the following
+    values separated by commas:
     - memory size
     - cpu count
     - disk size
index 1d35baf..eeee935 100755 (executable)
 # pylint: disable=C0103
 # due to invalid name
 
-import sys
+import copy
 import datetime
 import optparse
+import sys
 
 import qa_cluster
 import qa_config
@@ -523,33 +524,52 @@ def RunExclusiveStorageTests():
 
 
 def _BuildSpecDict(par, mn, st, mx):
-  return {par: {"min": mn, "std": st, "max": mx}}
+  return {
+    constants.ISPECS_MINMAX: [{
+      constants.ISPECS_MIN: {par: mn},
+      constants.ISPECS_MAX: {par: mx},
+      }],
+    constants.ISPECS_STD: {par: st},
+    }
+
+
+def _BuildDoubleSpecDict(index, par, mn, st, mx):
+  new_spec = {
+    constants.ISPECS_MINMAX: [{}, {}],
+    }
+  if st is not None:
+    new_spec[constants.ISPECS_STD] = {par: st}
+  new_spec[constants.ISPECS_MINMAX][index] = {
+    constants.ISPECS_MIN: {par: mn},
+    constants.ISPECS_MAX: {par: mx},
+    }
+  return new_spec
 
 
 def TestIPolicyPlainInstance():
   """Test instance policy interaction with instances"""
-  params = ["mem-size", "cpu-count", "disk-count", "disk-size", "nic-count"]
+  params = ["memory-size", "cpu-count", "disk-count", "disk-size", "nic-count"]
   if not qa_config.IsTemplateSupported(constants.DT_PLAIN):
     print "Template %s not supported" % constants.DT_PLAIN
     return
 
   # This test assumes that the group policy is empty
-  (_, old_specs) = qa_cluster.TestClusterSetISpecs({})
+  (_, old_specs) = qa_cluster.TestClusterSetISpecs()
+  # We also assume to have only one min/max bound
+  assert len(old_specs[constants.ISPECS_MINMAX]) == 1
   node = qa_config.AcquireNode()
   try:
-    # Log of policy changes, list of tuples: (change, policy_violated)
+    # Log of policy changes, list of tuples:
+    # (full_change, incremental_change, policy_violated)
     history = []
     instance = qa_instance.TestInstanceAddWithPlainDisk([node])
     try:
       policyerror = [constants.CV_EINSTANCEPOLICY]
       for par in params:
-        qa_cluster.AssertClusterVerify()
         (iminval, imaxval) = qa_instance.GetInstanceSpec(instance.name, par)
         # Some specs must be multiple of 4
         new_spec = _BuildSpecDict(par, imaxval + 4, imaxval + 4, imaxval + 4)
-        history.append((new_spec, True))
-        qa_cluster.TestClusterSetISpecs(new_spec)
-        qa_cluster.AssertClusterVerify(warnings=policyerror)
+        history.append((None, new_spec, True))
         if iminval > 0:
           # Some specs must be multiple of 4
           if iminval >= 4:
@@ -557,19 +577,57 @@ def TestIPolicyPlainInstance():
           else:
             upper = iminval - 1
           new_spec = _BuildSpecDict(par, 0, upper, upper)
-          history.append((new_spec, True))
-          qa_cluster.TestClusterSetISpecs(new_spec)
+          history.append((None, new_spec, True))
+        history.append((old_specs, None, False))
+
+      # Test with two instance specs
+      double_specs = copy.deepcopy(old_specs)
+      double_specs[constants.ISPECS_MINMAX] = \
+          double_specs[constants.ISPECS_MINMAX] * 2
+      (par1, par2) = params[0:2]
+      (_, imaxval1) = qa_instance.GetInstanceSpec(instance.name, par1)
+      (_, imaxval2) = qa_instance.GetInstanceSpec(instance.name, par2)
+      old_minmax = old_specs[constants.ISPECS_MINMAX][0]
+      history.extend([
+        (double_specs, None, False),
+        # The first min/max limit is being violated
+        (None,
+         _BuildDoubleSpecDict(0, par1, imaxval1 + 4, imaxval1 + 4,
+                              imaxval1 + 4),
+         False),
+        # Both min/max limits are being violated
+        (None,
+         _BuildDoubleSpecDict(1, par2, imaxval2 + 4, None, imaxval2 + 4),
+         True),
+        # The second min/max limit is being violated
+        (None,
+         _BuildDoubleSpecDict(0, par1,
+                              old_minmax[constants.ISPECS_MIN][par1],
+                              old_specs[constants.ISPECS_STD][par1],
+                              old_minmax[constants.ISPECS_MAX][par1]),
+         False),
+        (old_specs, None, False),
+        ])
+
+      # Apply the changes, and check policy violations after each change
+      qa_cluster.AssertClusterVerify()
+      for (new_specs, diff_specs, failed) in history:
+        qa_cluster.TestClusterSetISpecs(new_specs=new_specs,
+                                        diff_specs=diff_specs)
+        if failed:
           qa_cluster.AssertClusterVerify(warnings=policyerror)
-        qa_cluster.TestClusterSetISpecs(old_specs)
-        history.append((old_specs, False))
+        else:
+          qa_cluster.AssertClusterVerify()
+
       qa_instance.TestInstanceRemove(instance)
     finally:
       instance.Release()
 
     # Now we replay the same policy changes, and we expect that the instance
     # cannot be created for the cases where we had a policy violation above
-    for (change, failed) in history:
-      qa_cluster.TestClusterSetISpecs(change)
+    for (new_specs, diff_specs, failed) in history:
+      qa_cluster.TestClusterSetISpecs(new_specs=new_specs,
+                                      diff_specs=diff_specs)
       if failed:
         qa_instance.TestInstanceAddWithPlainDisk([node], fail=True)
       # Instance creation with no policy violation has been tested already
index 061b13c..02c076c 100644 (file)
@@ -634,30 +634,17 @@ def _GetClusterIPolicy():
   @rtype: tuple
   @return: (policy, specs), where:
       - policy is a dictionary of the policy values, instance specs excluded
-      - specs is dict of dict, specs[par][key] is a spec value, where key is
-        "min", "max", or "std"
+      - specs is a dictionary containing only the specs, using the internal
+        format (see L{constants.IPOLICY_DEFAULTS} for an example)
 
   """
   info = qa_utils.GetObjectInfo(["gnt-cluster", "info"])
   policy = info["Instance policy - limits for instances"]
-  ret_specs = {}
-  ret_policy = {}
-  ispec_keys = constants.ISPECS_MINMAX_KEYS | frozenset([constants.ISPECS_STD])
-  for (key, val) in policy.items():
-    if key in ispec_keys:
-      for (par, pval) in val.items():
-        if par == "memory-size":
-          par = "mem-size"
-        d = ret_specs.setdefault(par, {})
-        d[key] = pval
-    else:
-      ret_policy[key] = val
+  (ret_policy, ret_specs) = qa_utils.ParseIPolicy(policy)
 
   # Sanity checks
-  assert len(ret_specs) > 0
-  good = all("min" in d and "std" in d and "max" in d
-             for d in ret_specs.values())
-  assert good, "Missing item in specs: %s" % ret_specs
+  assert "minmax" in ret_specs and "std" in ret_specs
+  assert len(ret_specs["minmax"]) > 0
   assert len(ret_policy) > 0
   return (ret_policy, ret_specs)
 
@@ -719,54 +706,40 @@ def TestClusterModifyIPolicy():
       AssertEqual(eff_policy[p], old_policy[p])
 
 
-def TestClusterSetISpecs(new_specs, fail=False, old_values=None):
+def TestClusterSetISpecs(new_specs=None, diff_specs=None, fail=False,
+                         old_values=None):
   """Change instance specs.
 
-  @type new_specs: dict of dict
-  @param new_specs: new_specs[par][key], where key is "min", "max", "std". It
-      can be an empty dictionary.
+  At most one of new_specs or diff_specs can be specified.
+
+  @type new_specs: dict
+  @param new_specs: new complete specs, in the same format returned by
+      L{_GetClusterIPolicy}
+  @type diff_specs: dict
+  @param diff_specs: partial specs, it can be an incomplete specifications, but
+      if min/max specs are specified, their number must match the number of the
+      existing specs
   @type fail: bool
   @param fail: if the change is expected to fail
   @type old_values: tuple
   @param old_values: (old_policy, old_specs), as returned by
-     L{_GetClusterIPolicy}
+      L{_GetClusterIPolicy}
   @return: same as L{_GetClusterIPolicy}
 
   """
-  if old_values:
-    (old_policy, old_specs) = old_values
-  else:
-    (old_policy, old_specs) = _GetClusterIPolicy()
-  if new_specs:
-    cmd = ["gnt-cluster", "modify"]
-    for (par, keyvals) in new_specs.items():
-      if par == "spindle-use":
-        # ignore spindle-use, which is not settable
-        continue
-      cmd += [
-        "--specs-%s" % par,
-        ",".join(["%s=%s" % (k, v) for (k, v) in keyvals.items()]),
-        ]
-    AssertCommand(cmd, fail=fail)
-  # Check the new state
-  (eff_policy, eff_specs) = _GetClusterIPolicy()
-  AssertEqual(eff_policy, old_policy)
-  if fail:
-    AssertEqual(eff_specs, old_specs)
-  else:
-    for par in eff_specs:
-      for key in eff_specs[par]:
-        if par in new_specs and key in new_specs[par]:
-          AssertEqual(int(eff_specs[par][key]), int(new_specs[par][key]))
-        else:
-          AssertEqual(int(eff_specs[par][key]), int(old_specs[par][key]))
-  return (eff_policy, eff_specs)
+  build_cmd = lambda opts: ["gnt-cluster", "modify"] + opts
+  return qa_utils.TestSetISpecs(
+    new_specs=new_specs, diff_specs=diff_specs,
+    get_policy_fn=_GetClusterIPolicy, build_cmd_fn=build_cmd,
+    fail=fail, old_values=old_values)
 
 
 def TestClusterModifyISpecs():
   """gnt-cluster modify --specs-*"""
-  params = ["mem-size", "disk-size", "disk-count", "cpu-count", "nic-count"]
+  params = ["memory-size", "disk-size", "disk-count", "cpu-count", "nic-count"]
   (cur_policy, cur_specs) = _GetClusterIPolicy()
+  # This test assumes that there is only one min/max bound
+  assert len(cur_specs[constants.ISPECS_MINMAX]) == 1
   for par in params:
     test_values = [
       (True, 0, 4, 12),
@@ -783,14 +756,37 @@ def TestClusterModifyISpecs():
       (False, 0, 4, "a"),
       # This is to restore the old values
       (True,
-       cur_specs[par]["min"], cur_specs[par]["std"], cur_specs[par]["max"])
+       cur_specs[constants.ISPECS_MINMAX][0][constants.ISPECS_MIN][par],
+       cur_specs[constants.ISPECS_STD][par],
+       cur_specs[constants.ISPECS_MINMAX][0][constants.ISPECS_MAX][par])
       ]
     for (good, mn, st, mx) in test_values:
-      new_vals = {par: {"min": str(mn), "std": str(st), "max": str(mx)}}
+      new_vals = {
+        constants.ISPECS_MINMAX: [{
+          constants.ISPECS_MIN: {par: mn},
+          constants.ISPECS_MAX: {par: mx}
+          }],
+        constants.ISPECS_STD: {par: st}
+        }
       cur_state = (cur_policy, cur_specs)
       # We update cur_specs, as we've copied the values to restore already
-      (cur_policy, cur_specs) = TestClusterSetISpecs(new_vals, fail=not good,
-                                                     old_values=cur_state)
+      (cur_policy, cur_specs) = TestClusterSetISpecs(
+        diff_specs=new_vals, fail=not good, old_values=cur_state)
+
+    # Get the ipolicy command
+    mnode = qa_config.GetMasterNode()
+    initcmd = GetCommandOutput(mnode.primary, "gnt-cluster show-ispecs-cmd")
+    modcmd = ["gnt-cluster", "modify"]
+    opts = initcmd.split()
+    assert opts[0:2] == ["gnt-cluster", "init"]
+    for k in range(2, len(opts) - 1):
+      if opts[k].startswith("--ipolicy-"):
+        assert k + 2 <= len(opts)
+        modcmd.extend(opts[k:k + 2])
+    # Re-apply the ipolicy (this should be a no-op)
+    AssertCommand(modcmd)
+    new_initcmd = GetCommandOutput(mnode.primary, "gnt-cluster show-ispecs-cmd")
+    AssertEqual(initcmd, new_initcmd)
 
 
 def TestClusterInfo():
index e48d37f..e71451a 100644 (file)
@@ -79,31 +79,162 @@ def TestGroupAddWithOptions():
   AssertCommand(["gnt-group", "remove", group1])
 
 
+def _GetGroupIPolicy(groupname):
+  """Return the run-time values of the cluster-level instance policy.
+
+  @type groupname: string
+  @param groupname: node group name
+  @rtype: tuple
+  @return: (policy, specs), where:
+      - policy is a dictionary of the policy values, instance specs excluded
+      - specs is a dictionary containing only the specs, using the internal
+        format (see L{constants.IPOLICY_DEFAULTS} for an example), but without
+        the standard values
+
+  """
+  info = qa_utils.GetObjectInfo(["gnt-group", "info", groupname])
+  assert len(info) == 1
+  policy = info[0]["Instance policy"]
+
+  (ret_policy, ret_specs) = qa_utils.ParseIPolicy(policy)
+
+  # Sanity checks
+  assert "minmax" in ret_specs
+  assert len(ret_specs["minmax"]) > 0
+  assert len(ret_policy) > 0
+  return (ret_policy, ret_specs)
+
+
+def _TestGroupSetISpecs(groupname, new_specs=None, diff_specs=None,
+                        fail=False, old_values=None):
+  """Change instance specs on a group.
+
+  At most one of new_specs or diff_specs can be specified.
+
+  @type groupname: string
+  @param groupname: group name
+  @type new_specs: dict
+  @param new_specs: new complete specs, in the same format returned by
+      L{_GetGroupIPolicy}
+  @type diff_specs: dict
+  @param diff_specs: partial specs, it can be an incomplete specifications, but
+      if min/max specs are specified, their number must match the number of the
+      existing specs
+  @type fail: bool
+  @param fail: if the change is expected to fail
+  @type old_values: tuple
+  @param old_values: (old_policy, old_specs), as returned by
+      L{_GetGroupIPolicy}
+  @return: same as L{_GetGroupIPolicy}
+
+  """
+  build_cmd = lambda opts: ["gnt-group", "modify"] + opts + [groupname]
+  get_policy = lambda: _GetGroupIPolicy(groupname)
+  return qa_utils.TestSetISpecs(
+    new_specs=new_specs, diff_specs=diff_specs,
+    get_policy_fn=get_policy, build_cmd_fn=build_cmd,
+    fail=fail, old_values=old_values)
+
+
+def _TestGroupModifyISpecs(groupname):
+  # This test is built on the assumption that the default ipolicy holds for
+  # the node group under test
+  old_values = _GetGroupIPolicy(groupname)
+  samevals = dict((p, 4) for p in constants.ISPECS_PARAMETERS)
+  base_specs = {
+    constants.ISPECS_MINMAX: [{
+      constants.ISPECS_MIN: samevals,
+      constants.ISPECS_MAX: samevals,
+      }],
+    }
+  mod_values = _TestGroupSetISpecs(groupname, new_specs=base_specs,
+                                   old_values=old_values)
+  for par in constants.ISPECS_PARAMETERS:
+    # First make sure that the test works with good values
+    good_specs = {
+      constants.ISPECS_MINMAX: [{
+        constants.ISPECS_MIN: {par: 8},
+        constants.ISPECS_MAX: {par: 8},
+        }],
+      }
+    mod_values = _TestGroupSetISpecs(groupname, diff_specs=good_specs,
+                                     old_values=mod_values)
+    bad_specs = {
+      constants.ISPECS_MINMAX: [{
+        constants.ISPECS_MIN: {par: 8},
+        constants.ISPECS_MAX: {par: 4},
+        }],
+      }
+    _TestGroupSetISpecs(groupname, diff_specs=bad_specs, fail=True,
+                        old_values=mod_values)
+  AssertCommand(["gnt-group", "modify", "--ipolicy-bounds-specs", "default",
+                 groupname])
+  AssertEqual(_GetGroupIPolicy(groupname), old_values)
+
+  # Get the ipolicy command (from the cluster config)
+  mnode = qa_config.GetMasterNode()
+  addcmd = GetCommandOutput(mnode.primary, utils.ShellQuoteArgs([
+    "gnt-group", "show-ispecs-cmd", "--include-defaults", groupname,
+    ]))
+  modcmd = ["gnt-group", "modify"]
+  opts = addcmd.split()
+  assert opts[0:2] == ["gnt-group", "add"]
+  for k in range(2, len(opts) - 1):
+    if opts[k].startswith("--ipolicy-"):
+      assert k + 2 <= len(opts)
+      modcmd.extend(opts[k:k + 2])
+  modcmd.append(groupname)
+  # Apply the ipolicy to the group and verify the result
+  AssertCommand(modcmd)
+  new_addcmd = GetCommandOutput(mnode.primary, utils.ShellQuoteArgs([
+    "gnt-group", "show-ispecs-cmd", groupname,
+    ]))
+  AssertEqual(addcmd, new_addcmd)
+
+
+def _TestGroupModifyIPolicy(groupname):
+  _TestGroupModifyISpecs(groupname)
+
+  # We assume that the default ipolicy holds
+  (old_policy, old_specs) = _GetGroupIPolicy(groupname)
+  for (par, setval, iname, expval) in [
+    ("vcpu-ratio", 1.5, None, 1.5),
+    ("spindle-ratio", 1.5, None, 1.5),
+    ("disk-templates", constants.DT_PLAIN,
+     "enabled disk templates", constants.DT_PLAIN)
+    ]:
+    if not iname:
+      iname = par
+    build_cmdline = lambda val: ["gnt-group", "modify", "--ipolicy-" + par,
+                                 str(val), groupname]
+
+    AssertCommand(build_cmdline(setval))
+    (new_policy, new_specs) = _GetGroupIPolicy(groupname)
+    AssertEqual(new_specs, old_specs)
+    for (p, val) in new_policy.items():
+      if p == iname:
+        AssertEqual(val, expval)
+      else:
+        AssertEqual(val, old_policy[p])
+
+    AssertCommand(build_cmdline("default"))
+    (new_policy, new_specs) = _GetGroupIPolicy(groupname)
+    AssertEqual(new_specs, old_specs)
+    AssertEqual(new_policy, old_policy)
+
+
 def TestGroupModify():
   """gnt-group modify"""
   (group1, ) = qa_utils.GetNonexistentGroups(1)
 
   AssertCommand(["gnt-group", "add", group1])
 
-  std_defaults = constants.IPOLICY_DEFAULTS[constants.ISPECS_STD]
-  min_v = std_defaults[constants.ISPEC_MEM_SIZE] * 10
-  max_v = min_v * 10
-
   try:
+    _TestGroupModifyIPolicy(group1)
     AssertCommand(["gnt-group", "modify", "--alloc-policy", "unallocable",
                    "--node-parameters", "oob_program=/bin/false", group1])
     AssertCommand(["gnt-group", "modify",
                    "--alloc-policy", "notvalid", group1], fail=True)
-    AssertCommand(["gnt-group", "modify", "--specs-mem-size",
-                   "min=%s,max=%s,std=0" % (min_v, max_v), group1], fail=True)
-    AssertCommand(["gnt-group", "modify", "--specs-mem-size",
-                   "min=%s,max=%s" % (min_v, max_v), group1])
-    AssertCommand(["gnt-group", "modify", "--specs-mem-size",
-                   "min=default,max=default", group1])
-    AssertCommand(["gnt-group", "modify", "--ipolicy-vcpu-ratio",
-                   "3.5", group1])
-    AssertCommand(["gnt-group", "modify", "--ipolicy-vcpu-ratio",
-                   "default", group1])
     AssertCommand(["gnt-group", "modify",
                    "--node-parameters", "spindle_count=10", group1])
     if qa_config.TestEnabled("htools"):
index da623c7..87c9a8f 100644 (file)
@@ -338,7 +338,7 @@ def GetInstanceSpec(instance, spec):
   @type instance: string
   @param instance: Instance name
   @type spec: string
-  @param spec: one of the supported parameters: "mem-size", "cpu-count",
+  @param spec: one of the supported parameters: "memory-size", "cpu-count",
       "disk-count", "disk-size", "nic-count"
   @rtype: tuple
   @return: (minspec, maxspec); minspec and maxspec can be different only for
@@ -346,7 +346,7 @@ def GetInstanceSpec(instance, spec):
 
   """
   specmap = {
-    "mem-size": ["be/minmem", "be/maxmem"],
+    "memory-size": ["be/minmem", "be/maxmem"],
     "cpu-count": ["vcpus"],
     "disk-count": ["disk.count"],
     "disk-size": ["disk.size/ "],
index e8b49bd..7ec0d49 100644 (file)
@@ -23,6 +23,7 @@
 
 """
 
+import copy
 import operator
 import os
 import random
@@ -777,3 +778,127 @@ def MakeNodePath(node, path):
     return "%s%s" % (vcluster.MakeNodeRoot(basedir, name), path)
   else:
     return path
+
+
+def _GetParameterOptions(specs):
+  """Helper to build policy options."""
+  values = ["%s=%s" % (par, val)
+            for (par, val) in specs.items()]
+  return ",".join(values)
+
+
+def TestSetISpecs(new_specs=None, diff_specs=None, get_policy_fn=None,
+                  build_cmd_fn=None, fail=False, old_values=None):
+  """Change instance specs for an object.
+
+  At most one of new_specs or diff_specs can be specified.
+
+  @type new_specs: dict
+  @param new_specs: new complete specs, in the same format returned by
+      L{ParseIPolicy}.
+  @type diff_specs: dict
+  @param diff_specs: partial specs, it can be an incomplete specifications, but
+      if min/max specs are specified, their number must match the number of the
+      existing specs
+  @type get_policy_fn: function
+  @param get_policy_fn: function that returns the current policy as in
+      L{ParseIPolicy}
+  @type build_cmd_fn: function
+  @param build_cmd_fn: function that return the full command line from the
+      options alone
+  @type fail: bool
+  @param fail: if the change is expected to fail
+  @type old_values: tuple
+  @param old_values: (old_policy, old_specs), as returned by
+     L{ParseIPolicy}
+  @return: same as L{ParseIPolicy}
+
+  """
+  assert get_policy_fn is not None
+  assert build_cmd_fn is not None
+  assert new_specs is None or diff_specs is None
+
+  if old_values:
+    (old_policy, old_specs) = old_values
+  else:
+    (old_policy, old_specs) = get_policy_fn()
+
+  if diff_specs:
+    new_specs = copy.deepcopy(old_specs)
+    if constants.ISPECS_MINMAX in diff_specs:
+      AssertEqual(len(new_specs[constants.ISPECS_MINMAX]),
+                  len(diff_specs[constants.ISPECS_MINMAX]))
+      for (new_minmax, diff_minmax) in zip(new_specs[constants.ISPECS_MINMAX],
+                                           diff_specs[constants.ISPECS_MINMAX]):
+        for (key, parvals) in diff_minmax.items():
+          for (par, val) in parvals.items():
+            new_minmax[key][par] = val
+    for (par, val) in diff_specs.get(constants.ISPECS_STD, {}).items():
+      new_specs[constants.ISPECS_STD][par] = val
+
+  if new_specs:
+    cmd = []
+    if (diff_specs is None or constants.ISPECS_MINMAX in diff_specs):
+      minmax_opt_items = []
+      for minmax in new_specs[constants.ISPECS_MINMAX]:
+        minmax_opts = []
+        for key in ["min", "max"]:
+          keyopt = _GetParameterOptions(minmax[key])
+          minmax_opts.append("%s:%s" % (key, keyopt))
+        minmax_opt_items.append("/".join(minmax_opts))
+      cmd.extend([
+        "--ipolicy-bounds-specs",
+        "//".join(minmax_opt_items)
+        ])
+    if diff_specs is None:
+      std_source = new_specs
+    else:
+      std_source = diff_specs
+    std_opt = _GetParameterOptions(std_source.get("std", {}))
+    if std_opt:
+      cmd.extend(["--ipolicy-std-specs", std_opt])
+    AssertCommand(build_cmd_fn(cmd), fail=fail)
+
+    # Check the new state
+    (eff_policy, eff_specs) = get_policy_fn()
+    AssertEqual(eff_policy, old_policy)
+    if fail:
+      AssertEqual(eff_specs, old_specs)
+    else:
+      AssertEqual(eff_specs, new_specs)
+
+  else:
+    (eff_policy, eff_specs) = (old_policy, old_specs)
+
+  return (eff_policy, eff_specs)
+
+
+def ParseIPolicy(policy):
+  """Parse and split instance an instance policy.
+
+  @type policy: dict
+  @param policy: policy, as returned by L{GetObjectInfo}
+  @rtype: tuple
+  @return: (policy, specs), where:
+      - policy is a dictionary of the policy values, instance specs excluded
+      - specs is a dictionary containing only the specs, using the internal
+        format (see L{constants.IPOLICY_DEFAULTS} for an example)
+
+  """
+  ret_specs = {}
+  ret_policy = {}
+  for (key, val) in policy.items():
+    if key == "bounds specs":
+      ret_specs[constants.ISPECS_MINMAX] = []
+      for minmax in val:
+        ret_minmax = {}
+        for key in minmax:
+          keyparts = key.split("/", 1)
+          assert len(keyparts) > 1
+          ret_minmax[keyparts[0]] = minmax[key]
+        ret_specs[constants.ISPECS_MINMAX].append(ret_minmax)
+    elif key == constants.ISPECS_STD:
+      ret_specs[key] = val
+    else:
+      ret_policy[key] = val
+  return (ret_policy, ret_specs)
index 579ce77..2370eb7 100644 (file)
@@ -32,12 +32,14 @@ module Ganeti.HTools.Backend.Text
   , loadInst
   , loadNode
   , loadISpec
+  , loadMultipleMinMaxISpecs
   , loadIPolicy
   , serializeInstances
   , serializeNode
   , serializeNodes
   , serializeGroup
   , serializeISpec
+  , serializeMultipleMinMaxISpecs
   , serializeIPolicy
   , serializeCluster
   ) where
@@ -117,6 +119,10 @@ serializeInstances :: Node.List -> Instance.List -> String
 serializeInstances nl =
   unlines . map (serializeInstance nl) . Container.elems
 
+-- | Separator between ISpecs (in MinMaxISpecs).
+iSpecsSeparator :: Char
+iSpecsSeparator = ';'
+
 -- | Generate a spec data from a given ISpec object.
 serializeISpec :: ISpec -> String
 serializeISpec ispec =
@@ -130,15 +136,20 @@ serializeISpec ispec =
 serializeDiskTemplates :: [DiskTemplate] -> String
 serializeDiskTemplates = intercalate "," . map diskTemplateToRaw
 
+-- | Generate min/max instance specs data.
+serializeMultipleMinMaxISpecs :: [MinMaxISpecs] -> String
+serializeMultipleMinMaxISpecs minmaxes =
+  intercalate [iSpecsSeparator] $ foldr serialpair [] minmaxes
+  where serialpair (MinMaxISpecs minspec maxspec) acc =
+          serializeISpec minspec : serializeISpec maxspec : acc
+
 -- | Generate policy data from a given policy object.
 serializeIPolicy :: String -> IPolicy -> String
 serializeIPolicy owner ipol =
   let IPolicy minmax stdspec dts vcpu_ratio spindle_ratio = ipol
-      MinMaxISpecs minspec maxspec = minmax
       strings = [ owner
                 , serializeISpec stdspec
-                , serializeISpec minspec
-                , serializeISpec maxspec
+                , serializeMultipleMinMaxISpecs minmax
                 , serializeDiskTemplates dts
                 , show vcpu_ratio
                 , show spindle_ratio
@@ -255,18 +266,41 @@ loadISpec owner [mem_s, cpu_c, dsk_s, dsk_c, nic_c, su] = do
   return $ ISpec xmem_s xcpu_c xdsk_s xdsk_c xnic_c xsu
 loadISpec owner s = fail $ "Invalid ispec data for " ++ owner ++ ": " ++ show s
 
+-- | Load a single min/max ISpec pair
+loadMinMaxISpecs :: String -> String -> String -> Result MinMaxISpecs
+loadMinMaxISpecs owner minspec maxspec = do
+  xminspec <- loadISpec (owner ++ "/minspec") (commaSplit minspec)
+  xmaxspec <- loadISpec (owner ++ "/maxspec") (commaSplit maxspec)
+  return $ MinMaxISpecs xminspec xmaxspec
+
+-- | Break a list of ispecs strings into a list of (min/max) ispecs pairs
+breakISpecsPairs :: String -> [String] -> Result [(String, String)]
+breakISpecsPairs _ [] =
+  return []
+breakISpecsPairs owner (x:y:xs) = do
+  rest <- breakISpecsPairs owner xs
+  return $ (x, y) : rest
+breakISpecsPairs owner _ =
+  fail $ "Odd number of min/max specs for " ++ owner
+
+-- | Load a list of min/max ispecs pairs
+loadMultipleMinMaxISpecs :: String -> [String] -> Result [MinMaxISpecs]
+loadMultipleMinMaxISpecs owner ispecs = do
+  pairs <- breakISpecsPairs owner ispecs
+  mapM (uncurry $ loadMinMaxISpecs owner) pairs
+
 -- | Loads an ipolicy from a field list.
 loadIPolicy :: [String] -> Result (String, IPolicy)
-loadIPolicy [owner, stdspec, minspec, maxspec, dtemplates,
+loadIPolicy [owner, stdspec, minmaxspecs, dtemplates,
              vcpu_ratio, spindle_ratio] = do
   xstdspec <- loadISpec (owner ++ "/stdspec") (commaSplit stdspec)
-  xminspec <- loadISpec (owner ++ "/minspec") (commaSplit minspec)
-  xmaxspec <- loadISpec (owner ++ "/maxspec") (commaSplit maxspec)
+  xminmaxspecs <- loadMultipleMinMaxISpecs owner $
+                  sepSplit iSpecsSeparator minmaxspecs
   xdts <- mapM diskTemplateFromRaw $ commaSplit dtemplates
   xvcpu_ratio <- tryRead (owner ++ "/vcpu_ratio") vcpu_ratio
   xspindle_ratio <- tryRead (owner ++ "/spindle_ratio") spindle_ratio
   return (owner,
-          IPolicy (MinMaxISpecs xminspec xmaxspec) xstdspec
+          IPolicy xminmaxspecs xstdspec
                 xdts xvcpu_ratio xspindle_ratio)
 loadIPolicy s = fail $ "Invalid ipolicy data: '" ++ show s ++ "'"
 
index ddd5c17..96ea845 100644 (file)
@@ -33,6 +33,7 @@ module Ganeti.HTools.Cluster
   , EvacSolution(..)
   , Table(..)
   , CStats(..)
+  , AllocNodes
   , AllocResult
   , AllocMethod
   , AllocSolutionList
index 8de1009..4ba3e91 100644 (file)
@@ -279,12 +279,26 @@ instAboveISpec inst ispec
   | vcpus inst < T.iSpecCpuCount ispec = Bad T.FailCPU
   | otherwise = Ok ()
 
+-- | Checks if an instance matches a min/max specs pair
+instMatchesMinMaxSpecs :: Instance -> T.MinMaxISpecs -> T.OpResult ()
+instMatchesMinMaxSpecs inst minmax = do
+  instAboveISpec inst (T.minMaxISpecsMinSpec minmax)
+  instBelowISpec inst (T.minMaxISpecsMaxSpec minmax)
+
+-- | Checks if an instance matches any specs of a policy
+instMatchesSpecs :: Instance -> [T.MinMaxISpecs] -> T.OpResult ()
+ -- Return Ok for no constraints, though this should never happen
+instMatchesSpecs _ [] = Ok ()
+instMatchesSpecs inst (minmax:minmaxes) =
+  foldr eithermatch (instMatchesMinMaxSpecs inst minmax) minmaxes
+  where eithermatch mm (Bad _) = instMatchesMinMaxSpecs inst mm
+        eithermatch _ y@(Ok ()) = y
+--  # See 04f231771
+
 -- | Checks if an instance matches a policy.
 instMatchesPolicy :: Instance -> T.IPolicy -> T.OpResult ()
 instMatchesPolicy inst ipol = do
-  let minmax = T.iPolicyMinMaxISpecs ipol
-  instAboveISpec inst (T.minMaxISpecsMinSpec minmax)
-  instBelowISpec inst (T.minMaxISpecsMaxSpec minmax)
+  instMatchesSpecs inst $ T.iPolicyMinMaxISpecs ipol
   if diskTemplate inst `elem` T.iPolicyDiskTemplates ipol
     then Ok ()
     else Bad T.FailDisk
index f4b0836..78f0687 100644 (file)
@@ -398,6 +398,18 @@ instFromSpec spx =
   Instance.create "new" (rspecMem spx) (rspecDsk spx) [rspecDsk spx]
     (rspecCpu spx) Running [] True (-1) (-1)
 
+combineTiered :: Maybe Int -> Cluster.AllocNodes -> Cluster.AllocResult ->
+           Instance.Instance -> Result Cluster.AllocResult
+combineTiered limit allocnodes result inst = do
+  let (_, nl, il, ixes, cstats) = result
+      ixes_cnt = length ixes
+      (stop, newlimit) = case limit of
+        Nothing -> (False, Nothing)
+        Just n -> (n <= ixes_cnt, Just (n - ixes_cnt))
+  if stop
+    then return result
+    else Cluster.tieredAlloc nl il newlimit inst allocnodes ixes cstats
+
 -- | Main function.
 main :: Options -> [String] -> IO ()
 main opts args = do
@@ -446,14 +458,19 @@ main opts args = do
 
   -- Run the tiered allocation
 
-  let minmax = iPolicyMinMaxISpecs ipol
-  let tspec = fromMaybe (rspecFromISpec (minMaxISpecsMaxSpec minmax))
-              (optTieredSpec opts)
+  let minmaxes = iPolicyMinMaxISpecs ipol
+      tspecs = case optTieredSpec opts of
+                 Nothing -> map (rspecFromISpec . minMaxISpecsMaxSpec)
+                            minmaxes
+                 Just t -> [t]
+      tinsts = map (\ts -> instFromSpec ts disk_template su) tspecs
+  tspec <- case tspecs of
+    [] -> exitErr "Empty list of specs received from the cluster"
+    t:_ -> return t
 
   (treason, trl_nl, _, spec_map) <-
     runAllocation cdata stop_allocation
-       (Cluster.tieredAlloc nl il alloclimit
-        (instFromSpec tspec disk_template su) allocnodes [] [])
+       (foldM (combineTiered alloclimit allocnodes) ([], nl, il, [], []) tinsts)
        tspec disk_template SpecTiered opts
 
   printTiered machine_r spec_map nl trl_nl treason
index e579e36..e9e9bad 100644 (file)
@@ -169,12 +169,12 @@ $(THH.buildObject "ISpec" "iSpec"
 
 -- | The default minimum ispec.
 defMinISpec :: ISpec
-defMinISpec = ISpec { iSpecMemorySize = C.ipolicyDefaultsMinmaxMinMemorySize
-                    , iSpecCpuCount   = C.ipolicyDefaultsMinmaxMinCpuCount
-                    , iSpecDiskSize   = C.ipolicyDefaultsMinmaxMinDiskSize
-                    , iSpecDiskCount  = C.ipolicyDefaultsMinmaxMinDiskCount
-                    , iSpecNicCount   = C.ipolicyDefaultsMinmaxMinNicCount
-                    , iSpecSpindleUse = C.ipolicyDefaultsMinmaxMinSpindleUse
+defMinISpec = ISpec { iSpecMemorySize = C.ispecsMinmaxDefaultsMinMemorySize
+                    , iSpecCpuCount   = C.ispecsMinmaxDefaultsMinCpuCount
+                    , iSpecDiskSize   = C.ispecsMinmaxDefaultsMinDiskSize
+                    , iSpecDiskCount  = C.ispecsMinmaxDefaultsMinDiskCount
+                    , iSpecNicCount   = C.ispecsMinmaxDefaultsMinNicCount
+                    , iSpecSpindleUse = C.ispecsMinmaxDefaultsMinSpindleUse
                     }
 
 -- | The default standard ispec.
@@ -189,12 +189,12 @@ defStdISpec = ISpec { iSpecMemorySize = C.ipolicyDefaultsStdMemorySize
 
 -- | The default max ispec.
 defMaxISpec :: ISpec
-defMaxISpec = ISpec { iSpecMemorySize = C.ipolicyDefaultsMinmaxMaxMemorySize
-                    , iSpecCpuCount   = C.ipolicyDefaultsMinmaxMaxCpuCount
-                    , iSpecDiskSize   = C.ipolicyDefaultsMinmaxMaxDiskSize
-                    , iSpecDiskCount  = C.ipolicyDefaultsMinmaxMaxDiskCount
-                    , iSpecNicCount   = C.ipolicyDefaultsMinmaxMaxNicCount
-                    , iSpecSpindleUse = C.ipolicyDefaultsMinmaxMaxSpindleUse
+defMaxISpec = ISpec { iSpecMemorySize = C.ispecsMinmaxDefaultsMaxMemorySize
+                    , iSpecCpuCount   = C.ispecsMinmaxDefaultsMaxCpuCount
+                    , iSpecDiskSize   = C.ispecsMinmaxDefaultsMaxDiskSize
+                    , iSpecDiskCount  = C.ispecsMinmaxDefaultsMaxDiskCount
+                    , iSpecNicCount   = C.ispecsMinmaxDefaultsMaxNicCount
+                    , iSpecSpindleUse = C.ispecsMinmaxDefaultsMaxSpindleUse
                     }
 
 -- | Minimum and maximum instance specs type.
@@ -204,15 +204,15 @@ $(THH.buildObject "MinMaxISpecs" "minMaxISpecs"
   ])
 
 -- | Defult minimum and maximum instance specs.
-defMinMaxISpecs :: MinMaxISpecs
-defMinMaxISpecs = MinMaxISpecs { minMaxISpecsMinSpec = defMinISpec
-                               , minMaxISpecsMaxSpec = defMaxISpec
-                               }
+defMinMaxISpecs :: [MinMaxISpecs]
+defMinMaxISpecs = [MinMaxISpecs { minMaxISpecsMinSpec = defMinISpec
+                                , minMaxISpecsMaxSpec = defMaxISpec
+                                }]
 
 -- | Instance policy type.
 $(THH.buildObject "IPolicy" "iPolicy"
   [ THH.renameField "MinMaxISpecs" $
-      THH.simpleField C.ispecsMinmax [t| MinMaxISpecs |]
+      THH.simpleField C.ispecsMinmax [t| [MinMaxISpecs] |]
   , THH.renameField "StdSpec" $ THH.simpleField C.ispecsStd [t| ISpec |]
   , THH.renameField "DiskTemplates" $
       THH.simpleField C.ipolicyDts [t| [DiskTemplate] |]
index d6cd124..1550309 100644 (file)
@@ -66,9 +66,7 @@ module Ganeti.Objects
   , PartialISpecParams(..)
   , fillISpecParams
   , allISpecParamFields
-  , FilledMinMaxISpecs(..)
-  , PartialMinMaxISpecs(..)
-  , fillMinMaxISpecs
+  , MinMaxISpecs(..)
   , FilledIPolicy(..)
   , PartialIPolicy(..)
   , fillIPolicy
@@ -505,16 +503,7 @@ $(buildParam "ISpec" "ispec"
   , simpleField C.ispecSpindleUse  [t| Int |]
   ])
 
--- | Partial min-max instance specs. These is not built via buildParam since
--- it has a special 2-level inheritance mode.
-$(buildObject "PartialMinMaxISpecs" "mmis"
-  [ renameField "MinSpecP" $ simpleField "min" [t| PartialISpecParams |]
-  , renameField "MaxSpecP" $ simpleField "max" [t| PartialISpecParams |]
-  ])
-
--- | Filled min-max instance specs. This is not built via buildParam since
--- it has a special 2-level inheritance mode.
-$(buildObject "FilledMinMaxISpecs" "mmis"
+$(buildObject "MinMaxISpecs" "mmis"
   [ renameField "MinSpec" $ simpleField "min" [t| FilledISpecParams |]
   , renameField "MaxSpec" $ simpleField "max" [t| FilledISpecParams |]
   ])
@@ -523,8 +512,9 @@ $(buildObject "FilledMinMaxISpecs" "mmis"
 -- has a special 2-level inheritance mode.
 $(buildObject "PartialIPolicy" "ipolicy"
   [ optionalField . renameField "MinMaxISpecsP"
-                    $ simpleField C.ispecsMinmax [t| PartialMinMaxISpecs |]
-  , renameField "StdSpecP" $ simpleField "std" [t| PartialISpecParams |]
+                    $ simpleField C.ispecsMinmax   [t| [MinMaxISpecs] |]
+  , optionalField . renameField "StdSpecP"
+                    $ simpleField "std"            [t| PartialISpecParams |]
   , optionalField . renameField "SpindleRatioP"
                     $ simpleField "spindle-ratio"  [t| Double |]
   , optionalField . renameField "VcpuRatioP"
@@ -537,24 +527,13 @@ $(buildObject "PartialIPolicy" "ipolicy"
 -- has a special 2-level inheritance mode.
 $(buildObject "FilledIPolicy" "ipolicy"
   [ renameField "MinMaxISpecs"
-    $ simpleField C.ispecsMinmax [t| FilledMinMaxISpecs |]
+    $ simpleField C.ispecsMinmax [t| [MinMaxISpecs] |]
   , renameField "StdSpec" $ simpleField "std" [t| FilledISpecParams |]
   , simpleField "spindle-ratio"  [t| Double |]
   , simpleField "vcpu-ratio"     [t| Double |]
   , simpleField "disk-templates" [t| [DiskTemplate] |]
   ])
 
--- | Custom filler for the min-max instance specs.
-fillMinMaxISpecs :: FilledMinMaxISpecs -> Maybe PartialMinMaxISpecs ->
-                    FilledMinMaxISpecs
-fillMinMaxISpecs fminmax Nothing = fminmax
-fillMinMaxISpecs (FilledMinMaxISpecs { mmisMinSpec = fmin
-                                     , mmisMaxSpec = fmax })
-                 (Just PartialMinMaxISpecs { mmisMinSpecP = pmin
-                                           , mmisMaxSpecP = pmax }) =
-  FilledMinMaxISpecs { mmisMinSpec = fillISpecParams fmin pmin
-                     , mmisMaxSpec = fillISpecParams fmax pmax }
-
 -- | Custom filler for the ipolicy types.
 fillIPolicy :: FilledIPolicy -> PartialIPolicy -> FilledIPolicy
 fillIPolicy (FilledIPolicy { ipolicyMinMaxISpecs  = fminmax
@@ -567,8 +546,10 @@ fillIPolicy (FilledIPolicy { ipolicyMinMaxISpecs  = fminmax
                             , ipolicySpindleRatioP  = pspindleRatio
                             , ipolicyVcpuRatioP     = pvcpuRatio
                             , ipolicyDiskTemplatesP = pdiskTemplates}) =
-  FilledIPolicy { ipolicyMinMaxISpecs  = fillMinMaxISpecs fminmax pminmax
-                , ipolicyStdSpec       = fillISpecParams fstd pstd
+  FilledIPolicy { ipolicyMinMaxISpecs  = fromMaybe fminmax pminmax
+                , ipolicyStdSpec       = case pstd of
+                                         Nothing -> fstd
+                                         Just p -> fillISpecParams fstd p
                 , ipolicySpindleRatio  = fromMaybe fspindleRatio pspindleRatio
                 , ipolicyVcpuRatio     = fromMaybe fvcpuRatio pvcpuRatio
                 , ipolicyDiskTemplates = fromMaybe fdiskTemplates
diff --git a/test/data/cluster_config_2.7.json b/test/data/cluster_config_2.7.json
new file mode 100644 (file)
index 0000000..3d53c1b
--- /dev/null
@@ -0,0 +1,529 @@
+{
+  "cluster": {
+    "beparams": {
+      "default": {
+        "always_failover": false,
+        "auto_balance": true,
+        "maxmem": 128,
+        "minmem": 128,
+        "spindle_use": 1,
+        "vcpus": 1
+      }
+    },
+    "blacklisted_os": [],
+    "candidate_pool_size": 10,
+    "cluster_name": "cluster.name.example.com",
+    "ctime": 1343869045.604884,
+    "default_iallocator": "hail",
+    "disk_state_static": {},
+    "diskparams": {
+      "blockdev": {},
+      "diskless": {},
+      "drbd": {
+        "c-delay-target": 1,
+        "c-fill-target": 200,
+        "c-max-rate": 2048,
+        "c-min-rate": 1024,
+        "c-plan-ahead": 1,
+        "data-stripes": 2,
+        "disk-barriers": "bf",
+        "disk-custom": "",
+        "dynamic-resync": false,
+        "meta-barriers": true,
+        "meta-stripes": 2,
+        "metavg": "xenvg",
+        "net-custom": "",
+        "resync-rate": 1024
+      },
+      "ext": {},
+      "file": {},
+      "plain": {
+        "stripes": 2
+      },
+      "rbd": {
+        "pool": "rbd"
+      },
+      "sharedfile": {}
+    },
+    "drbd_usermode_helper": "/bin/true",
+    "enabled_hypervisors": [
+      "xen-pvm"
+    ],
+    "file_storage_dir": "",
+    "hidden_os": [],
+    "highest_used_port": 32105,
+    "hv_state_static": {
+      "xen-pvm": {
+        "cpu_node": 1,
+        "cpu_total": 1,
+        "mem_hv": 0,
+        "mem_node": 0,
+        "mem_total": 0
+      }
+    },
+    "hvparams": {
+      "chroot": {
+        "init_script": "/ganeti-chroot"
+      },
+      "fake": {},
+      "kvm": {
+        "acpi": true,
+        "boot_order": "disk",
+        "cdrom2_image_path": "",
+        "cdrom_disk_type": "",
+        "cdrom_image_path": "",
+        "cpu_cores": 0,
+        "cpu_mask": "all",
+        "cpu_sockets": 0,
+        "cpu_threads": 0,
+        "cpu_type": "",
+        "disk_cache": "default",
+        "disk_type": "paravirtual",
+        "floppy_image_path": "",
+        "initrd_path": "",
+        "kernel_args": "ro",
+        "kernel_path": "/boot/vmlinuz-kvmU",
+        "keymap": "",
+        "kvm_extra": "",
+        "kvm_flag": "",
+        "kvm_path": "/usr/bin/kvm",
+        "machine_version": "",
+        "mem_path": "",
+        "migration_bandwidth": 4,
+        "migration_downtime": 30,
+        "migration_mode": "live",
+        "migration_port": 4041,
+        "nic_type": "paravirtual",
+        "reboot_behavior": "reboot",
+        "root_path": "/dev/vda1",
+        "security_domain": "",
+        "security_model": "none",
+        "serial_console": true,
+        "serial_speed": 38400,
+        "soundhw": "",
+        "spice_bind": "",
+        "spice_image_compression": "",
+        "spice_ip_version": 0,
+        "spice_jpeg_wan_compression": "",
+        "spice_password_file": "",
+        "spice_playback_compression": true,
+        "spice_streaming_video": "",
+        "spice_tls_ciphers": "HIGH:-DES:-3DES:-EXPORT:-ADH",
+        "spice_use_tls": false,
+        "spice_use_vdagent": true,
+        "spice_zlib_glz_wan_compression": "",
+        "usb_devices": "",
+        "usb_mouse": "",
+        "use_chroot": false,
+        "use_localtime": false,
+        "vga": "",
+        "vhost_net": false,
+        "vnc_bind_address": "",
+        "vnc_password_file": "",
+        "vnc_tls": false,
+        "vnc_x509_path": "",
+        "vnc_x509_verify": false
+      },
+      "lxc": {
+        "cpu_mask": ""
+      },
+      "xen-hvm": {
+        "acpi": true,
+        "blockdev_prefix": "hd",
+        "boot_order": "cd",
+        "cdrom_image_path": "",
+        "cpu_cap": 0,
+        "cpu_mask": "all",
+        "cpu_weight": 256,
+        "device_model": "/usr/lib/xen/bin/qemu-dm",
+        "disk_type": "paravirtual",
+        "kernel_path": "/usr/lib/xen/boot/hvmloader",
+        "migration_mode": "non-live",
+        "migration_port": 8082,
+        "nic_type": "rtl8139",
+        "pae": true,
+        "pci_pass": "",
+        "reboot_behavior": "reboot",
+        "use_localtime": false,
+        "vnc_bind_address": "0.0.0.0",
+        "vnc_password_file": "/your/vnc-cluster-password"
+      },
+      "xen-pvm": {
+        "blockdev_prefix": "sd",
+        "bootloader_args": "",
+        "bootloader_path": "",
+        "cpu_cap": 0,
+        "cpu_mask": "all",
+        "cpu_weight": 256,
+        "initrd_path": "",
+        "kernel_args": "ro",
+        "kernel_path": "/boot/vmlinuz-xenU",
+        "migration_mode": "live",
+        "migration_port": 8082,
+        "reboot_behavior": "reboot",
+        "root_path": "/dev/xvda1",
+        "use_bootloader": false
+      }
+    },
+    "ipolicy": {
+      "disk-templates": [
+        "sharedfile",
+        "diskless",
+        "plain",
+        "blockdev",
+        "drbd",
+        "file",
+        "rbd"
+      ],
+      "max": {
+        "cpu-count": 8,
+        "disk-count": 16,
+        "disk-size": 1048576,
+        "memory-size": 32768,
+        "nic-count": 8,
+        "spindle-use": 12
+      },
+      "min": {
+        "cpu-count": 1,
+        "disk-count": 1,
+        "disk-size": 1024,
+        "memory-size": 128,
+        "nic-count": 1,
+        "spindle-use": 1
+      },
+      "spindle-ratio": 32.0,
+      "std": {
+        "cpu-count": 1,
+        "disk-count": 1,
+        "disk-size": 1024,
+        "memory-size": 128,
+        "nic-count": 1,
+        "spindle-use": 1
+      },
+      "vcpu-ratio": 1.0
+    },
+    "mac_prefix": "aa:bb:cc",
+    "maintain_node_health": false,
+    "master_ip": "192.0.2.87",
+    "master_netdev": "eth0",
+    "master_netmask": 32,
+    "master_node": "node1.example.com",
+    "modify_etc_hosts": true,
+    "modify_ssh_setup": true,
+    "mtime": 1361964122.79471,
+    "ndparams": {
+      "exclusive_storage": false,
+      "oob_program": "",
+      "spindle_count": 1
+    },
+    "nicparams": {
+      "default": {
+        "link": "br974",
+        "mode": "bridged"
+      }
+    },
+    "os_hvp": {
+      "TEMP-Ganeti-QA-OS": {
+        "xen-hvm": {
+          "acpi": false,
+          "pae": true
+        },
+        "xen-pvm": {
+          "root_path": "/dev/sda5"
+        }
+      }
+    },
+    "osparams": {},
+    "prealloc_wipe_disks": false,
+    "primary_ip_family": 2,
+    "reserved_lvs": [],
+    "rsahostkeypub": "YOURKEY",
+    "serial_no": 3189,
+    "shared_file_storage_dir": "/srv/ganeti/shared-file-storage",
+    "tags": [
+      "mytag"
+    ],
+    "tcpudp_port_pool": [
+      32101,
+      32102,
+      32103,
+      32104,
+      32105
+    ],
+    "uid_pool": [],
+    "use_external_mip_script": false,
+    "uuid": "dddf8c12-f2d8-4718-a35b-7804daf12a3f",
+    "volume_group_name": "xenvg"
+  },
+  "ctime": 1343869045.605523,
+  "instances": {
+    "instance1.example.com": {
+      "admin_state": "up",
+      "beparams": {},
+      "ctime": 1363620258.608976,
+      "disk_template": "drbd",
+      "disks": [
+        {
+          "children": [
+            {
+              "dev_type": "lvm",
+              "logical_id": [
+                "xenvg",
+                "5c390722-6a7a-4bb4-9cef-98d896a8e6b1.disk0_data"
+              ],
+              "params": {},
+              "physical_id": [
+                "xenvg",
+                "5c390722-6a7a-4bb4-9cef-98d896a8e6b1.disk0_data"
+              ],
+              "size": 1024
+            },
+            {
+              "dev_type": "lvm",
+              "logical_id": [
+                "xenvg",
+                "5c390722-6a7a-4bb4-9cef-98d896a8e6b1.disk0_meta"
+              ],
+              "params": {},
+              "physical_id": [
+                "xenvg",
+                "5c390722-6a7a-4bb4-9cef-98d896a8e6b1.disk0_meta"
+              ],
+              "size": 128
+            }
+          ],
+          "dev_type": "drbd8",
+          "iv_name": "disk/0",
+          "logical_id": [
+            "node1.example.com",
+            "node3.example.com",
+            32100,
+            0,
+            0,
+            "d3c3fd475fcbaf5fd177fb245ac43b71247ada38"
+          ],
+          "mode": "rw",
+          "params": {},
+          "physical_id": [
+            "198.51.100.82",
+            32100,
+            "198.51.100.84",
+            32100,
+            0,
+            "d3c3fd475fcbaf5fd177fb245ac43b71247ada38"
+          ],
+          "size": 1024
+        }
+      ],
+      "hvparams": {},
+      "hypervisor": "xen-pvm",
+      "mtime": 1363620320.874901,
+      "name": "instance1.example.com",
+      "nics": [
+        {
+          "mac": "aa:bb:cc:b2:6e:0b",
+          "nicparams": {}
+        }
+      ],
+      "os": "busybox",
+      "osparams": {},
+      "primary_node": "node1.example.com",
+      "serial_no": 2,
+      "uuid": "6c078d22-3eb6-4780-857d-81772e09eef1"
+    },
+    "instance2.example.com": {
+      "admin_state": "up",
+      "beparams": {},
+      "ctime": 1355186880.451181,
+      "disk_template": "plain",
+      "disks": [
+        {
+          "dev_type": "lvm",
+          "iv_name": "disk/0",
+          "logical_id": [
+            "xenvg",
+            "3e559cd7-1024-4294-a923-a9fd13182b2f.disk0"
+          ],
+          "mode": "rw",
+          "params": {},
+          "physical_id": [
+            "xenvg",
+            "3e559cd7-1024-4294-a923-a9fd13182b2f.disk0"
+          ],
+          "size": 102400
+        }
+      ],
+      "hvparams": {},
+      "hypervisor": "xen-pvm",
+      "mtime": 1355186898.307642,
+      "name": "instance2.example.com",
+      "nics": [
+        {
+          "mac": "aa:bb:cc:56:83:fb",
+          "nicparams": {}
+        }
+      ],
+      "os": "debian-image",
+      "osparams": {},
+      "primary_node": "node3.example.com",
+      "serial_no": 2,
+      "tags": [],
+      "uuid": "8fde9f6d-e1f1-4850-9e9c-154966f622f5"
+    },
+    "instance3.example.com": {
+      "admin_state": "up",
+      "beparams": {},
+      "ctime": 1354038435.343601,
+      "disk_template": "plain",
+      "disks": [
+        {
+          "dev_type": "lvm",
+          "iv_name": "disk/0",
+          "logical_id": [
+            "xenvg",
+            "b27a576a-13f7-4f07-885c-63fcad4fdfcc.disk0"
+          ],
+          "mode": "rw",
+          "params": {},
+          "physical_id": [
+            "xenvg",
+            "b27a576a-13f7-4f07-885c-63fcad4fdfcc.disk0"
+          ],
+          "size": 1280
+        }
+      ],
+      "hvparams": {},
+      "hypervisor": "xen-pvm",
+      "mtime": 1354224585.700732,
+      "name": "instance3.example.com",
+      "nics": [
+        {
+          "mac": "aa:bb:cc:5e:5c:75",
+          "nicparams": {}
+        }
+      ],
+      "os": "debian-image",
+      "osparams": {},
+      "primary_node": "node2.example.com",
+      "serial_no": 4,
+      "tags": [],
+      "uuid": "4e091bdc-e205-4ed7-8a47-0c9130a6619f"
+    }
+  },
+  "mtime": 1361984633.373014,
+  "networks": {
+    "99f0128a-1c84-44da-90b9-9581ea00c075": {
+      "ext_reservations": "1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001",
+      "name": "a network",
+      "network": "203.0.113.0/24",
+      "reservations": "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+      "serial_no": 1,
+      "uuid": "99f0128a-1c84-44da-90b9-9581ea00c075"
+    }
+  },
+  "nodegroups": {
+    "5244a46d-7506-4e14-922d-02b58153dde1": {
+      "alloc_policy": "preferred",
+      "diskparams": {},
+      "ipolicy": {
+        "max": {},
+        "min": {},
+        "std": {}
+      },
+      "mtime": 1361963775.575009,
+      "name": "default",
+      "ndparams": {},
+      "networks": {},
+      "serial_no": 125,
+      "tags": [],
+      "uuid": "5244a46d-7506-4e14-922d-02b58153dde1"
+    },
+    "6c0a8916-b719-45ad-95dd-82192b1e473f": {
+      "alloc_policy": "preferred",
+      "diskparams": {},
+      "ipolicy": {
+       "disk-templates": [
+         "plain"
+       ],
+       "max": {
+         "nic-count": 18,
+         "spindle-use": 14
+       },
+       "min": {
+         "cpu-count": 2,
+         "disk-count": 2
+       },
+       "spindle-ratio": 5.2,
+       "std": {},
+       "vcpu-ratio": 3.14
+      },
+      "mtime": 1361963775.575009,
+      "name": "another",
+      "ndparams": {
+        "exclusive_storage": true
+      },
+      "networks": {},
+      "serial_no": 125,
+      "tags": [],
+      "uuid": "6c0a8916-b719-45ad-95dd-82192b1e473f"
+    }
+  },
+  "nodes": {
+    "node1.example.com": {
+      "ctime": 1349722460.022264,
+      "drained": false,
+      "group": "5244a46d-7506-4e14-922d-02b58153dde1",
+      "master_candidate": true,
+      "master_capable": true,
+      "mtime": 1359986533.353329,
+      "name": "node1.example.com",
+      "ndparams": {},
+      "offline": false,
+      "powered": true,
+      "primary_ip": "192.0.2.82",
+      "secondary_ip": "198.51.100.82",
+      "serial_no": 197,
+      "tags": [],
+      "uuid": "9a12d554-75c0-4cb1-8064-103365145db0",
+      "vm_capable": true
+    },
+    "node2.example.com": {
+      "ctime": 1343869045.604884,
+      "drained": false,
+      "group": "5244a46d-7506-4e14-922d-02b58153dde1",
+      "master_candidate": true,
+      "master_capable": true,
+      "mtime": 1358348755.779906,
+      "name": "node2.example.com",
+      "ndparams": {},
+      "offline": false,
+      "powered": true,
+      "primary_ip": "192.0.2.83",
+      "secondary_ip": "198.51.100.83",
+      "serial_no": 6,
+      "tags": [],
+      "uuid": "2ae3d962-2dad-44f2-bdb1-85f77107f907",
+      "vm_capable": true
+    },
+    "node3.example.com": {
+      "ctime": 1343869205.934807,
+      "drained": false,
+      "group": "5244a46d-7506-4e14-922d-02b58153dde1",
+      "master_candidate": true,
+      "master_capable": true,
+      "mtime": 1353019704.885368,
+      "name": "node3.example.com",
+      "ndparams": {},
+      "offline": false,
+      "powered": true,
+      "primary_ip": "192.0.2.84",
+      "secondary_ip": "198.51.100.84",
+      "serial_no": 2,
+      "tags": [],
+      "uuid": "41f9c238-173c-4120-9e41-04ad379b647a",
+      "vm_capable": true
+    }
+  },
+  "serial_no": 7624,
+  "version": 2070000
+}
diff --git a/test/data/cluster_config_downgraded_2.7.json b/test/data/cluster_config_downgraded_2.7.json
new file mode 100644 (file)
index 0000000..8f3847a
--- /dev/null
@@ -0,0 +1,523 @@
+{
+  "cluster": {
+    "beparams": {
+      "default": {
+        "always_failover": false,
+        "auto_balance": true,
+        "maxmem": 128,
+        "minmem": 128,
+        "spindle_use": 1,
+        "vcpus": 1
+      }
+    },
+    "blacklisted_os": [],
+    "candidate_pool_size": 10,
+    "cluster_name": "cluster.name.example.com",
+    "ctime": 1343869045.604884,
+    "default_iallocator": "hail",
+    "disk_state_static": {},
+    "diskparams": {
+      "blockdev": {},
+      "diskless": {},
+      "drbd": {
+        "c-delay-target": 1,
+        "c-fill-target": 200,
+        "c-max-rate": 2048,
+        "c-min-rate": 1024,
+        "c-plan-ahead": 1,
+        "data-stripes": 2,
+        "disk-barriers": "bf",
+        "disk-custom": "",
+        "dynamic-resync": false,
+        "meta-barriers": true,
+        "meta-stripes": 2,
+        "metavg": "xenvg",
+        "net-custom": "",
+        "resync-rate": 1024
+      },
+      "ext": {},
+      "file": {},
+      "plain": {
+        "stripes": 2
+      },
+      "rbd": {
+        "pool": "rbd"
+      },
+      "sharedfile": {}
+    },
+    "drbd_usermode_helper": "/bin/true",
+    "enabled_hypervisors": [
+      "xen-pvm"
+    ],
+    "file_storage_dir": "",
+    "hidden_os": [],
+    "highest_used_port": 32105,
+    "hv_state_static": {
+      "xen-pvm": {
+        "cpu_node": 1,
+        "cpu_total": 1,
+        "mem_hv": 0,
+        "mem_node": 0,
+        "mem_total": 0
+      }
+    },
+    "hvparams": {
+      "chroot": {
+        "init_script": "/ganeti-chroot"
+      },
+      "fake": {},
+      "kvm": {
+        "acpi": true,
+        "boot_order": "disk",
+        "cdrom2_image_path": "",
+        "cdrom_disk_type": "",
+        "cdrom_image_path": "",
+        "cpu_cores": 0,
+        "cpu_mask": "all",
+        "cpu_sockets": 0,
+        "cpu_threads": 0,
+        "cpu_type": "",
+        "disk_cache": "default",
+        "disk_type": "paravirtual",
+        "floppy_image_path": "",
+        "initrd_path": "",
+        "kernel_args": "ro",
+        "kernel_path": "/boot/vmlinuz-kvmU",
+        "keymap": "",
+        "kvm_extra": "",
+        "kvm_flag": "",
+        "kvm_path": "/usr/bin/kvm",
+        "machine_version": "",
+        "mem_path": "",
+        "migration_bandwidth": 4,
+        "migration_downtime": 30,
+        "migration_mode": "live",
+        "migration_port": 4041,
+        "nic_type": "paravirtual",
+        "reboot_behavior": "reboot",
+        "root_path": "/dev/vda1",
+        "security_domain": "",
+        "security_model": "none",
+        "serial_console": true,
+        "serial_speed": 38400,
+        "soundhw": "",
+        "spice_bind": "",
+        "spice_image_compression": "",
+        "spice_ip_version": 0,
+        "spice_jpeg_wan_compression": "",
+        "spice_password_file": "",
+        "spice_playback_compression": true,
+        "spice_streaming_video": "",
+        "spice_tls_ciphers": "HIGH:-DES:-3DES:-EXPORT:-ADH",
+        "spice_use_tls": false,
+        "spice_use_vdagent": true,
+        "spice_zlib_glz_wan_compression": "",
+        "usb_devices": "",
+        "usb_mouse": "",
+        "use_chroot": false,
+        "use_localtime": false,
+        "vga": "",
+        "vhost_net": false,
+        "vnc_bind_address": "",
+        "vnc_password_file": "",
+        "vnc_tls": false,
+        "vnc_x509_path": "",
+        "vnc_x509_verify": false
+      },
+      "lxc": {
+        "cpu_mask": ""
+      },
+      "xen-hvm": {
+        "acpi": true,
+        "blockdev_prefix": "hd",
+        "boot_order": "cd",
+        "cdrom_image_path": "",
+        "cpu_cap": 0,
+        "cpu_mask": "all",
+        "cpu_weight": 256,
+        "device_model": "/usr/lib/xen/bin/qemu-dm",
+        "disk_type": "paravirtual",
+        "kernel_path": "/usr/lib/xen/boot/hvmloader",
+        "migration_mode": "non-live",
+        "migration_port": 8082,
+        "nic_type": "rtl8139",
+        "pae": true,
+        "pci_pass": "",
+        "reboot_behavior": "reboot",
+        "use_localtime": false,
+        "vnc_bind_address": "0.0.0.0",
+        "vnc_password_file": "/your/vnc-cluster-password"
+      },
+      "xen-pvm": {
+        "blockdev_prefix": "sd",
+        "bootloader_args": "",
+        "bootloader_path": "",
+        "cpu_cap": 0,
+        "cpu_mask": "all",
+        "cpu_weight": 256,
+        "initrd_path": "",
+        "kernel_args": "ro",
+        "kernel_path": "/boot/vmlinuz-xenU",
+        "migration_mode": "live",
+        "migration_port": 8082,
+        "reboot_behavior": "reboot",
+        "root_path": "/dev/xvda1",
+        "use_bootloader": false
+      }
+    },
+    "ipolicy": {
+      "disk-templates": [
+        "sharedfile",
+        "diskless",
+        "plain",
+        "blockdev",
+        "drbd",
+        "file",
+        "rbd"
+      ],
+      "max": {
+        "cpu-count": 8,
+        "disk-count": 16,
+        "disk-size": 1048576,
+        "memory-size": 32768,
+        "nic-count": 8,
+        "spindle-use": 12
+      },
+      "min": {
+        "cpu-count": 1,
+        "disk-count": 1,
+        "disk-size": 1024,
+        "memory-size": 128,
+        "nic-count": 1,
+        "spindle-use": 1
+      },
+      "spindle-ratio": 32.0,
+      "std": {
+        "cpu-count": 1,
+        "disk-count": 1,
+        "disk-size": 1024,
+        "memory-size": 128,
+        "nic-count": 1,
+        "spindle-use": 1
+      },
+      "vcpu-ratio": 1.0
+    },
+    "mac_prefix": "aa:bb:cc",
+    "maintain_node_health": false,
+    "master_ip": "192.0.2.87",
+    "master_netdev": "eth0",
+    "master_netmask": 32,
+    "master_node": "node1.example.com",
+    "modify_etc_hosts": true,
+    "modify_ssh_setup": true,
+    "mtime": 1361964122.79471,
+    "ndparams": {
+      "exclusive_storage": false,
+      "oob_program": "",
+      "spindle_count": 1
+    },
+    "nicparams": {
+      "default": {
+        "link": "br974",
+        "mode": "bridged"
+      }
+    },
+    "os_hvp": {
+      "TEMP-Ganeti-QA-OS": {
+        "xen-hvm": {
+          "acpi": false,
+          "pae": true
+        },
+        "xen-pvm": {
+          "root_path": "/dev/sda5"
+        }
+      }
+    },
+    "osparams": {},
+    "prealloc_wipe_disks": false,
+    "primary_ip_family": 2,
+    "reserved_lvs": [],
+    "rsahostkeypub": "YOURKEY",
+    "serial_no": 3189,
+    "shared_file_storage_dir": "/srv/ganeti/shared-file-storage",
+    "tags": [
+      "mytag"
+    ],
+    "tcpudp_port_pool": [
+      32101,
+      32102,
+      32103,
+      32104,
+      32105
+    ],
+    "uid_pool": [],
+    "use_external_mip_script": false,
+    "uuid": "dddf8c12-f2d8-4718-a35b-7804daf12a3f",
+    "volume_group_name": "xenvg"
+  },
+  "ctime": 1343869045.605523,
+  "instances": {
+    "instance1.example.com": {
+      "admin_state": "up",
+      "beparams": {},
+      "ctime": 1363620258.608976,
+      "disk_template": "drbd",
+      "disks": [
+        {
+          "children": [
+            {
+              "dev_type": "lvm",
+              "logical_id": [
+                "xenvg",
+                "5c390722-6a7a-4bb4-9cef-98d896a8e6b1.disk0_data"
+              ],
+              "params": {},
+              "physical_id": [
+                "xenvg",
+                "5c390722-6a7a-4bb4-9cef-98d896a8e6b1.disk0_data"
+              ],
+              "size": 1024
+            },
+            {
+              "dev_type": "lvm",
+              "logical_id": [
+                "xenvg",
+                "5c390722-6a7a-4bb4-9cef-98d896a8e6b1.disk0_meta"
+              ],
+              "params": {},
+              "physical_id": [
+                "xenvg",
+                "5c390722-6a7a-4bb4-9cef-98d896a8e6b1.disk0_meta"
+              ],
+              "size": 128
+            }
+          ],
+          "dev_type": "drbd8",
+          "iv_name": "disk/0",
+          "logical_id": [
+            "node1.example.com",
+            "node3.example.com",
+            32100,
+            0,
+            0,
+            "d3c3fd475fcbaf5fd177fb245ac43b71247ada38"
+          ],
+          "mode": "rw",
+          "params": {},
+          "physical_id": [
+            "198.51.100.82",
+            32100,
+            "198.51.100.84",
+            32100,
+            0,
+            "d3c3fd475fcbaf5fd177fb245ac43b71247ada38"
+          ],
+          "size": 1024
+        }
+      ],
+      "hvparams": {},
+      "hypervisor": "xen-pvm",
+      "mtime": 1363620320.874901,
+      "name": "instance1.example.com",
+      "nics": [
+        {
+          "mac": "aa:bb:cc:b2:6e:0b",
+          "nicparams": {}
+        }
+      ],
+      "os": "busybox",
+      "osparams": {},
+      "primary_node": "node1.example.com",
+      "serial_no": 2,
+      "uuid": "6c078d22-3eb6-4780-857d-81772e09eef1"
+    },
+    "instance2.example.com": {
+      "admin_state": "up",
+      "beparams": {},
+      "ctime": 1355186880.451181,
+      "disk_template": "plain",
+      "disks": [
+        {
+          "dev_type": "lvm",
+          "iv_name": "disk/0",
+          "logical_id": [
+            "xenvg",
+            "3e559cd7-1024-4294-a923-a9fd13182b2f.disk0"
+          ],
+          "mode": "rw",
+          "params": {},
+          "physical_id": [
+            "xenvg",
+            "3e559cd7-1024-4294-a923-a9fd13182b2f.disk0"
+          ],
+          "size": 102400
+        }
+      ],
+      "hvparams": {},
+      "hypervisor": "xen-pvm",
+      "mtime": 1355186898.307642,
+      "name": "instance2.example.com",
+      "nics": [
+        {
+          "mac": "aa:bb:cc:56:83:fb",
+          "nicparams": {}
+        }
+      ],
+      "os": "debian-image",
+      "osparams": {},
+      "primary_node": "node3.example.com",
+      "serial_no": 2,
+      "tags": [],
+      "uuid": "8fde9f6d-e1f1-4850-9e9c-154966f622f5"
+    },
+    "instance3.example.com": {
+      "admin_state": "up",
+      "beparams": {},
+      "ctime": 1354038435.343601,
+      "disk_template": "plain",
+      "disks": [
+        {
+          "dev_type": "lvm",
+          "iv_name": "disk/0",
+          "logical_id": [
+            "xenvg",
+            "b27a576a-13f7-4f07-885c-63fcad4fdfcc.disk0"
+          ],
+          "mode": "rw",
+          "params": {},
+          "physical_id": [
+            "xenvg",
+            "b27a576a-13f7-4f07-885c-63fcad4fdfcc.disk0"
+          ],
+          "size": 1280
+        }
+      ],
+      "hvparams": {},
+      "hypervisor": "xen-pvm",
+      "mtime": 1354224585.700732,
+      "name": "instance3.example.com",
+      "nics": [
+        {
+          "mac": "aa:bb:cc:5e:5c:75",
+          "nicparams": {}
+        }
+      ],
+      "os": "debian-image",
+      "osparams": {},
+      "primary_node": "node2.example.com",
+      "serial_no": 4,
+      "tags": [],
+      "uuid": "4e091bdc-e205-4ed7-8a47-0c9130a6619f"
+    }
+  },
+  "mtime": 1361984633.373014,
+  "networks": {
+    "99f0128a-1c84-44da-90b9-9581ea00c075": {
+      "ext_reservations": "1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001",
+      "name": "a network",
+      "network": "203.0.113.0/24",
+      "reservations": "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+      "serial_no": 1,
+      "uuid": "99f0128a-1c84-44da-90b9-9581ea00c075"
+    }
+  },
+  "nodegroups": {
+    "5244a46d-7506-4e14-922d-02b58153dde1": {
+      "alloc_policy": "preferred",
+      "diskparams": {},
+      "ipolicy": {
+        "max": {},
+        "min": {},
+        "std": {}
+      },
+      "mtime": 1361963775.575009,
+      "name": "default",
+      "ndparams": {},
+      "networks": {},
+      "serial_no": 125,
+      "tags": [],
+      "uuid": "5244a46d-7506-4e14-922d-02b58153dde1"
+    },
+    "6c0a8916-b719-45ad-95dd-82192b1e473f": {
+      "alloc_policy": "preferred",
+      "diskparams": {},
+      "ipolicy": {
+       "disk-templates": [
+         "plain"
+       ],
+       "max": {},
+       "min": {},
+       "spindle-ratio": 5.2,
+       "std": {},
+       "vcpu-ratio": 3.14
+      },
+      "mtime": 1361963775.575009,
+      "name": "another",
+      "ndparams": {
+        "exclusive_storage": true
+      },
+      "networks": {},
+      "serial_no": 125,
+      "tags": [],
+      "uuid": "6c0a8916-b719-45ad-95dd-82192b1e473f"
+    }
+  },
+  "nodes": {
+    "node1.example.com": {
+      "ctime": 1349722460.022264,
+      "drained": false,
+      "group": "5244a46d-7506-4e14-922d-02b58153dde1",
+      "master_candidate": true,
+      "master_capable": true,
+      "mtime": 1359986533.353329,
+      "name": "node1.example.com",
+      "ndparams": {},
+      "offline": false,
+      "powered": true,
+      "primary_ip": "192.0.2.82",
+      "secondary_ip": "198.51.100.82",
+      "serial_no": 197,
+      "tags": [],
+      "uuid": "9a12d554-75c0-4cb1-8064-103365145db0",
+      "vm_capable": true
+    },
+    "node2.example.com": {
+      "ctime": 1343869045.604884,
+      "drained": false,
+      "group": "5244a46d-7506-4e14-922d-02b58153dde1",
+      "master_candidate": true,
+      "master_capable": true,
+      "mtime": 1358348755.779906,
+      "name": "node2.example.com",
+      "ndparams": {},
+      "offline": false,
+      "powered": true,
+      "primary_ip": "192.0.2.83",
+      "secondary_ip": "198.51.100.83",
+      "serial_no": 6,
+      "tags": [],
+      "uuid": "2ae3d962-2dad-44f2-bdb1-85f77107f907",
+      "vm_capable": true
+    },
+    "node3.example.com": {
+      "ctime": 1343869205.934807,
+      "drained": false,
+      "group": "5244a46d-7506-4e14-922d-02b58153dde1",
+      "master_candidate": true,
+      "master_capable": true,
+      "mtime": 1353019704.885368,
+      "name": "node3.example.com",
+      "ndparams": {},
+      "offline": false,
+      "powered": true,
+      "primary_ip": "192.0.2.84",
+      "secondary_ip": "198.51.100.84",
+      "serial_no": 2,
+      "tags": [],
+      "uuid": "41f9c238-173c-4120-9e41-04ad379b647a",
+      "vm_capable": true
+    }
+  },
+  "serial_no": 7624,
+  "version": 2070000
+}
index 72762fa..28ebdf1 100644 (file)
@@ -11,5 +11,5 @@ new-3|128|1024|1|running|Y|node-01-003||diskless||1
 new-4|128|1024|1|running|Y|node-01-002||diskless||1
 
 
-|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
-group-01|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
+|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
+group-01|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
index f7410ae..1e34c94 100644 (file)
@@ -6,5 +6,5 @@ node2.example.com|1024|0|896|95367|94343|4|N|fake-uuid-01|1
 instance1.example.com|128|1024|1|running|Y|node2.example.com||plain|
 
 
-|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,1|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
-default|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,1|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
+|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,1|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
+default|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,1|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
index d25526f..9e17bbb 100644 (file)
@@ -3,5 +3,5 @@ group-01|fake-uuid-01|preferred|
 
 
 
-|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
-group-01|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
+|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
+group-01|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
index f650fbe..57e20c3 100644 (file)
           "cpu-count": 1,
           "spindle-use": 1
         },
-        "minmax": {
-         "min": {
-           "nic-count": 1,
-           "disk-size": 128,
-           "disk-count": 1,
-           "memory-size": 128,
-           "cpu-count": 1,
-           "spindle-use": 1
-         },
-         "max": {
-           "nic-count": 8,
-           "disk-size": 1048576,
-           "disk-count": 16,
-           "memory-size": 32768,
-           "cpu-count": 8,
-           "spindle-use": 8
-         }
-        },
+        "minmax": [
+          {
+            "min": {
+              "nic-count": 1,
+              "disk-size": 128,
+              "disk-count": 1,
+              "memory-size": 128,
+              "cpu-count": 1,
+              "spindle-use": 1
+            },
+            "max": {
+              "nic-count": 8,
+              "disk-size": 1048576,
+              "disk-count": 16,
+              "memory-size": 32768,
+              "cpu-count": 8,
+              "spindle-use": 8
+            }
+          }
+        ],
         "vcpu-ratio": 4.0,
         "disk-templates": [
           "sharedfile",
       "disk-count": 1,
       "spindle-use": 1
     },
-    "min": {
-      "nic-count": 1,
-      "disk-size": 1024,
-      "memory-size": 128,
-      "cpu-count": 1,
-      "disk-count": 1,
-      "spindle-use": 1
-    },
-    "max": {
-      "nic-count": 8,
-      "disk-size": 1048576,
-      "memory-size": 32768,
-      "cpu-count": 8,
-      "disk-count": 16,
-      "spindle-use": 8
-    },
+    "minmax": [
+      {
+        "min": {
+          "nic-count": 1,
+          "disk-size": 1024,
+          "memory-size": 128,
+          "cpu-count": 1,
+          "disk-count": 1,
+          "spindle-use": 1
+        },
+        "max": {
+          "nic-count": 8,
+          "disk-size": 1048576,
+          "memory-size": 32768,
+          "cpu-count": 8,
+          "disk-count": 16,
+          "spindle-use": 8
+        }
+      }
+    ],
     "vcpu-ratio": 4.0,
     "disk-templates": [
       "sharedfile",
index 790de2a..4f233c7 100644 (file)
         "disk-templates": [
           "file"
         ],
-        "minmax" : {
-          "max": {
-            "cpu-count": 2,
-            "disk-count": 8,
-            "disk-size": 2048,
-            "memory-size": 12800,
-            "nic-count": 8,
-            "spindle-use": 8
-          },
-          "min": {
-            "cpu-count": 1,
-            "disk-count": 1,
-            "disk-size": 1024,
-            "memory-size": 128,
-            "nic-count": 1,
-            "spindle-use": 1
+        "minmax" : [
+          {
+            "max": {
+              "cpu-count": 2,
+              "disk-count": 8,
+              "disk-size": 2048,
+              "memory-size": 12800,
+              "nic-count": 8,
+              "spindle-use": 8
+            },
+            "min": {
+              "cpu-count": 1,
+              "disk-count": 1,
+              "disk-size": 1024,
+              "memory-size": 128,
+              "nic-count": 1,
+              "spindle-use": 1
+            }
           }
-        },
+        ],
         "spindle-ratio": 32.0,
         "std": {
           "cpu-count": 1,
index 2a4f636..b4e7280 100644 (file)
         "disk-templates": [
           "file"
         ],
-        "minmax": {
-          "max": {
-            "cpu-count": 2,
-            "disk-count": 8,
-            "disk-size": 2048,
-            "memory-size": 12800,
-            "nic-count": 8,
-            "spindle-use": 8
-          },
-          "min": {
-            "cpu-count": 1,
-            "disk-count": 1,
-            "disk-size": 1024,
-            "memory-size": 128,
-            "nic-count": 1,
-            "spindle-use": 1
+        "minmax": [
+          {
+            "max": {
+              "cpu-count": 2,
+              "disk-count": 8,
+              "disk-size": 2048,
+              "memory-size": 12800,
+              "nic-count": 8,
+              "spindle-use": 8
+            },
+            "min": {
+              "cpu-count": 1,
+              "disk-count": 1,
+              "disk-size": 1024,
+              "memory-size": 128,
+              "nic-count": 1,
+              "spindle-use": 1
+            }
           }
-        },
+        ],
         "spindle-ratio": 32.0,
         "std": {
           "cpu-count": 1,
index f698638..8cca142 100644 (file)
           "cpu-count": 1,
           "spindle-use": 1
         },
-        "minmax": {
-         "min": {
-           "nic-count": 1,
-           "disk-size": 128,
-           "disk-count": 1,
-           "memory-size": 128,
-           "cpu-count": 1,
-           "spindle-use": 1
-         },
-         "max": {
-           "nic-count": 8,
-           "disk-size": 1048576,
-           "disk-count": 16,
-           "memory-size": 32768,
-           "cpu-count": 8,
-           "spindle-use": 8
-         }
-        },
+        "minmax": [
+          {
+            "min": {
+              "nic-count": 1,
+              "disk-size": 128,
+              "disk-count": 1,
+              "memory-size": 128,
+              "cpu-count": 1,
+              "spindle-use": 1
+            },
+            "max": {
+              "nic-count": 8,
+              "disk-size": 1048576,
+              "disk-count": 16,
+              "memory-size": 32768,
+              "cpu-count": 8,
+              "spindle-use": 8
+            }
+          }
+        ],
         "vcpu-ratio": 4.0,
         "disk-templates": [
           "sharedfile",
           "cpu-count": 1,
           "spindle-use": 1
         },
-        "minmax": {
-         "min": {
-           "nic-count": 1,
-           "disk-size": 128,
-           "disk-count": 1,
-           "memory-size": 128,
-           "cpu-count": 1,
-           "spindle-use": 1
-         },
-         "max": {
-           "nic-count": 8,
-           "disk-size": 1048576,
-           "disk-count": 16,
-           "memory-size": 32768,
-           "cpu-count": 8,
-           "spindle-use": 8
-         }
-        },
+        "minmax": [
+          {
+            "min": {
+              "nic-count": 1,
+              "disk-size": 128,
+              "disk-count": 1,
+              "memory-size": 128,
+              "cpu-count": 1,
+              "spindle-use": 1
+            },
+            "max": {
+              "nic-count": 8,
+              "disk-size": 1048576,
+              "disk-count": 16,
+              "memory-size": 32768,
+              "cpu-count": 8,
+              "spindle-use": 8
+            }
+          }
+        ],
         "vcpu-ratio": 4.0,
         "disk-templates": [
           "sharedfile",
       "disk-count": 1,
       "spindle-use": 1
     },
-    "minmax": {
-      "min": {
-       "nic-count": 1,
-       "disk-size": 1024,
-       "memory-size": 128,
-       "cpu-count": 1,
-       "disk-count": 1,
-       "spindle-use": 1
-      },
-      "max": {
-       "nic-count": 8,
-       "disk-size": 1048576,
-       "memory-size": 32768,
-       "cpu-count": 8,
-       "disk-count": 16,
-       "spindle-use": 8
+    "minmax": [
+      {
+        "min": {
+          "nic-count": 1,
+          "disk-size": 1024,
+          "memory-size": 128,
+          "cpu-count": 1,
+          "disk-count": 1,
+          "spindle-use": 1
+        },
+        "max": {
+          "nic-count": 8,
+          "disk-size": 1048576,
+          "memory-size": 32768,
+          "cpu-count": 8,
+          "disk-count": 16,
+          "spindle-use": 8
+        }
       }
-    },
+    ],
     "vcpu-ratio": 4.0,
     "disk-templates": [
       "sharedfile",
index 8b80e25..8fed477 100644 (file)
           "cpu-count": 1,
           "spindle-use": 1
         },
-        "minmax": {
-         "min": {
-           "nic-count": 1,
-           "disk-size": 128,
-           "disk-count": 1,
-           "memory-size": 128,
-           "cpu-count": 1,
-           "spindle-use": 1
-         },
-         "max": {
-           "nic-count": 8,
-           "disk-size": 1048576,
-           "disk-count": 16,
-           "memory-size": 32768,
-           "cpu-count": 8,
-           "spindle-use": 8
-         }
-        },
+        "minmax": [
+          {
+            "min": {
+              "nic-count": 1,
+              "disk-size": 128,
+              "disk-count": 1,
+              "memory-size": 128,
+              "cpu-count": 1,
+              "spindle-use": 1
+            },
+            "max": {
+              "nic-count": 8,
+              "disk-size": 1048576,
+              "disk-count": 16,
+              "memory-size": 32768,
+              "cpu-count": 8,
+              "spindle-use": 8
+            }
+          }
+        ],
         "vcpu-ratio": 4.0,
         "disk-templates": [
           "sharedfile",
index ce66041..944700e 100644 (file)
           "cpu-count": 1,
           "spindle-use": 1
         },
-        "minmax": {
-         "min": {
-           "nic-count": 1,
-           "disk-size": 128,
-           "disk-count": 1,
-           "memory-size": 128,
-           "cpu-count": 1,
-           "spindle-use": 1
-         },
-         "max": {
-           "nic-count": 8,
-           "disk-size": 1048576,
-           "disk-count": 16,
-           "memory-size": 32768,
-           "cpu-count": 8,
-           "spindle-use": 8
-         }
-        },
+        "minmax": [
+          {
+            "min": {
+              "nic-count": 1,
+              "disk-size": 128,
+              "disk-count": 1,
+              "memory-size": 128,
+              "cpu-count": 1,
+              "spindle-use": 1
+            },
+            "max": {
+              "nic-count": 8,
+              "disk-size": 1048576,
+              "disk-count": 16,
+              "memory-size": 32768,
+              "cpu-count": 8,
+              "spindle-use": 8
+            }
+          }
+        ],
         "vcpu-ratio": 4.0,
         "disk-templates": [
           "sharedfile",
       "disk-count": 1,
       "spindle-use": 1
     },
-    "min": {
-      "nic-count": 1,
-      "disk-size": 1024,
-      "memory-size": 128,
-      "cpu-count": 1,
-      "disk-count": 1,
-      "spindle-use": 1
-    },
-    "max": {
-      "nic-count": 8,
-      "disk-size": 1048576,
-      "memory-size": 32768,
-      "cpu-count": 8,
-      "disk-count": 16,
-      "spindle-use": 8
-    },
+    "minmax": [
+      {
+       "min": {
+         "nic-count": 1,
+         "disk-size": 1024,
+         "memory-size": 128,
+         "cpu-count": 1,
+         "disk-count": 1,
+         "spindle-use": 1
+       },
+       "max": {
+         "nic-count": 8,
+         "disk-size": 1048576,
+         "memory-size": 32768,
+         "cpu-count": 8,
+         "disk-count": 16,
+         "spindle-use": 8
+       }
+      }
+    ],
     "vcpu-ratio": 4.0,
     "disk-templates": [
       "sharedfile",
index 5c5761f..fe3bb22 100644 (file)
@@ -140,6 +140,6 @@ new-126|128|1024|1|running|Y|node-01-002|node-01-004|drbd||1
 new-127|128|1024|1|running|Y|node-01-001|node-01-003|drbd||1
 
 
-|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
-group-01|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
-group-02|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
+|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
+group-01|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
+group-02|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
diff --git a/test/data/htools/hspace-tiered-dualspec.data b/test/data/htools/hspace-tiered-dualspec.data
new file mode 100644 (file)
index 0000000..46e44ce
--- /dev/null
@@ -0,0 +1,11 @@
+group-01|fake-uuid-01|preferred|
+
+node-01-001|262144|65536|196608|2097152|2097152|8|N|fake-uuid-01|1
+node-01-002|262144|65536|196608|2097152|2097152|8|N|fake-uuid-01|1
+node-01-003|262144|1024|261120|2097152|2097152|8|N|fake-uuid-01|1
+node-01-004|262144|1024|261120|2097152|2097152|8|N|fake-uuid-01|1
+
+
+
+|63488,2,522240,1,1,1|129024,4,1047552,1,1,1;131072,4,1048576,16,8,12;63488,2,522240,1,1,1;65536,2,524288,16,8,12|plain,diskless,file,sharedfile,blockdev,drbd,rbd,ext|4.0|32.0
+group-01|63488,2,522240,1,1,1|129024,4,1047552,1,1,1;131072,4,1048576,16,8,12;63488,2,522240,1,1,1;65536,2,524288,16,8,12|plain,diskless,file,sharedfile,blockdev,drbd,rbd,ext|4.0|32.0
diff --git a/test/data/htools/hspace-tiered.data b/test/data/htools/hspace-tiered.data
new file mode 100644 (file)
index 0000000..61ee4c1
--- /dev/null
@@ -0,0 +1,11 @@
+group-01|fake-uuid-01|preferred|
+
+node-01-001|262144|65536|196608|2097152|2097152|8|N|fake-uuid-01|1
+node-01-002|262144|65536|196608|2097152|2097152|8|N|fake-uuid-01|1
+node-01-003|262144|1024|261120|2097152|2097152|8|N|fake-uuid-01|1
+node-01-004|262144|1024|261120|2097152|2097152|8|N|fake-uuid-01|1
+
+
+
+|129024,4,1047552,1,1,1|129024,4,1047552,1,1,1;131072,4,1048576,16,8,12|plain,diskless,file,sharedfile,blockdev,drbd,rbd,ext|4.0|32.0
+group-01|129024,4,1047552,1,1,1|129024,4,1047552,1,1,1;131072,4,1048576,16,8,12|plain,diskless,file,sharedfile,blockdev,drbd,rbd,ext|4.0|32.0
index f85223e..9655f72 100644 (file)
@@ -6,5 +6,5 @@ node-01-002|1024|0|896|95367|94343|4|N|fake-uuid-01|1
 new-0|128|1024|1|running|Y|no-such-node||plain|
 
 
-|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,8|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
-group-01|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,8|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
+|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,8|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
+group-01|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,8|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
index b12eb4f..500576a 100644 (file)
@@ -5,5 +5,5 @@ node2|1024|0|0|95367|0|4|N|fake-uuid-01|1
 
 
 
-|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,8|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
-default|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,8|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
+|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,8|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
+default|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,8|diskless,file,sharedfile,plain,blockdev,drbd,rbd|4.0|32.0
index 35aa1b7..5d4fc52 100644 (file)
@@ -6,5 +6,5 @@ node-01-003|91552|0|91296|953674|953674|16|M|fake-uuid-01|1
 
 
 
-|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
-group-01|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
+|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
+group-01|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
index 8a90223..5218750 100644 (file)
@@ -20,6 +20,6 @@ new-6|128|1024|1|running|Y|node-01-004|node-01-002|drbd||1
 new-7|128|1024|1|running|Y|node-01-001|node-01-003|drbd||1
 
 
-|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
-group-01|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
-group-02|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
+|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
+group-01|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
+group-02|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
index dab6569..8ac08cb 100644 (file)
         "disk-count": 1,
         "spindle-use": 1
       },
-      "minmax": {
-       "min": {
-         "cpu-count": 1,
-         "nic-count": 1,
-         "disk-size": 1024,
-         "memory-size": 128,
-         "disk-count": 1,
-         "spindle-use": 1
-       },
-       "max": {
-         "cpu-count": 8,
-         "nic-count": 8,
-         "disk-size": 1048576,
-         "memory-size": 32768,
-         "disk-count": 16,
-         "spindle-use": 8
-       }
-      },
+      "minmax": [
+        {
+          "min": {
+            "cpu-count": 1,
+            "nic-count": 1,
+            "disk-size": 1024,
+            "memory-size": 128,
+            "disk-count": 1,
+            "spindle-use": 1
+          },
+          "max": {
+            "cpu-count": 8,
+            "nic-count": 8,
+            "disk-size": 1048576,
+            "memory-size": 32768,
+            "disk-count": 16,
+            "spindle-use": 8
+          }
+        }
+      ],
       "vcpu-ratio": 4.0,
       "disk-templates": [
         "sharedfile",
index a37d4e5..20fc0af 100644 (file)
       "cpu-count": 1,
       "spindle-use": 1
     },
-    "minmax": {
-      "min": {
-       "nic-count": 1,
-       "disk-size": 128,
-       "disk-count": 1,
-       "memory-size": 128,
-       "cpu-count": 1,
-       "spindle-use": 1
-      },
-      "max": {
-       "nic-count": 8,
-       "disk-size": 1048576,
-       "disk-count": 16,
-       "memory-size": 32768,
-       "cpu-count": 8,
-       "spindle-use": 8
+    "minmax": [
+      {
+        "min": {
+          "nic-count": 1,
+          "disk-size": 128,
+          "disk-count": 1,
+          "memory-size": 128,
+          "cpu-count": 1,
+          "spindle-use": 1
+        },
+        "max": {
+          "nic-count": 8,
+          "disk-size": 1048576,
+          "disk-count": 16,
+          "memory-size": 32768,
+          "cpu-count": 8,
+          "spindle-use": 8
+        }
       }
-    },
+    ],
     "vcpu-ratio": 4.0,
     "disk-templates": [
       "sharedfile",
index d4261c9..a756a05 100644 (file)
@@ -8,5 +8,5 @@ new-0|128|1152|1|running|Y|node-01-001|node-01-002|drbd||1
 new-1|128|1152|1|running|Y|node-01-002|node-01-003|drbd||1
 
 
-|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
-group-01|128,1,1024,1,1,1|128,1,1024,1,1,1|32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
+|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
+group-01|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
index 5763fa0..088a53b 100644 (file)
@@ -7,7 +7,7 @@
 
 {-
 
-Copyright (C) 2009, 2010, 2011, 2012 Google Inc.
+Copyright (C) 2009, 2010, 2011, 2012, 2013 Google Inc.
 
 This program is free software; you can redistribute it and/or modify
 it under the terms of the GNU General Public License as published by
@@ -158,6 +158,13 @@ prop_ISpecIdempotent ispec =
     Bad msg -> failTest $ "Failed to load ispec: " ++ msg
     Ok ispec' -> ispec ==? ispec'
 
+prop_MultipleMinMaxISpecsIdempotent :: [Types.MinMaxISpecs] -> Property
+prop_MultipleMinMaxISpecsIdempotent minmaxes =
+  case Text.loadMultipleMinMaxISpecs "dummy" . Utils.sepSplit ';' .
+       Text.serializeMultipleMinMaxISpecs $ minmaxes of
+    Bad msg -> failTest $ "Failed to load min/max ispecs: " ++ msg
+    Ok minmaxes' -> minmaxes ==? minmaxes'
+
 prop_IPolicyIdempotent :: Types.IPolicy -> Property
 prop_IPolicyIdempotent ipol =
   case Text.loadIPolicy . Utils.sepSplit '|' $
@@ -210,6 +217,7 @@ testSuite "HTools/Backend/Text"
             , 'prop_Load_NodeFail
             , 'prop_NodeLSIdempotent
             , 'prop_ISpecIdempotent
+            , 'prop_MultipleMinMaxISpecsIdempotent
             , 'prop_IPolicyIdempotent
             , 'prop_CreateSerialise
             ]
index 3980edb..1379878 100644 (file)
@@ -42,6 +42,7 @@ import Test.HUnit
 
 import Control.Applicative
 import Data.List (sort)
+import Control.Monad (replicateM)
 
 import Test.Ganeti.TestHelper
 import Test.Ganeti.TestCommon
@@ -101,19 +102,45 @@ genBiggerISpec imin = do
                      , Types.iSpecSpindleUse = fromIntegral su
                      }
 
+genMinMaxISpecs :: Gen Types.MinMaxISpecs
+genMinMaxISpecs = do
+  imin <- arbitrary
+  imax <- genBiggerISpec imin
+  return Types.MinMaxISpecs { Types.minMaxISpecsMinSpec = imin
+                             , Types.minMaxISpecsMaxSpec = imax
+                             }
+
+instance Arbitrary Types.MinMaxISpecs where
+  arbitrary = genMinMaxISpecs
+
+genMinMaxStdISpecs :: Gen (Types.MinMaxISpecs, Types.ISpec)
+genMinMaxStdISpecs = do
+  imin <- arbitrary
+  istd <- genBiggerISpec imin
+  imax <- genBiggerISpec istd
+  return (Types.MinMaxISpecs { Types.minMaxISpecsMinSpec = imin
+                             , Types.minMaxISpecsMaxSpec = imax
+                             },
+          istd)
+
+genIPolicySpecs :: Gen ([Types.MinMaxISpecs], Types.ISpec)
+genIPolicySpecs = do
+  num_mm <- choose (1, 6) -- 6 is just an arbitrary limit
+  std_compl <- choose (1, num_mm)
+  mm_head <- replicateM (std_compl - 1) genMinMaxISpecs
+  (mm_middle, istd) <- genMinMaxStdISpecs
+  mm_tail <- replicateM (num_mm - std_compl) genMinMaxISpecs
+  return (mm_head ++ (mm_middle : mm_tail), istd)
+
+
 instance Arbitrary Types.IPolicy where
   arbitrary = do
-    imin <- arbitrary
-    istd <- genBiggerISpec imin
-    imax <- genBiggerISpec istd
+    (iminmax, istd) <- genIPolicySpecs
     num_tmpl <- choose (0, length allDiskTemplates)
     dts  <- genUniquesList num_tmpl arbitrary
     vcpu_ratio <- choose (1.0, maxVcpuRatio)
     spindle_ratio <- choose (1.0, maxSpindleRatio)
-    return Types.IPolicy { Types.iPolicyMinMaxISpecs = Types.MinMaxISpecs
-                           { Types.minMaxISpecsMinSpec = imin
-                           , Types.minMaxISpecsMaxSpec = imax
-                           }
+    return Types.IPolicy { Types.iPolicyMinMaxISpecs = iminmax
                          , Types.iPolicyStdSpec = istd
                          , Types.iPolicyDiskTemplates = dts
                          , Types.iPolicyVcpuRatio = vcpu_ratio
index 81bf3fd..23ea054 100644 (file)
@@ -143,14 +143,13 @@ genInstWithNets nets = do
 -- | FIXME: This generates completely random data, without normal
 -- validation rules.
 $(genArbitrary ''PartialISpecParams)
-$(genArbitrary ''PartialMinMaxISpecs)
 
 -- | FIXME: This generates completely random data, without normal
 -- validation rules.
 $(genArbitrary ''PartialIPolicy)
 
 $(genArbitrary ''FilledISpecParams)
-$(genArbitrary ''FilledMinMaxISpecs)
+$(genArbitrary ''MinMaxISpecs)
 $(genArbitrary ''FilledIPolicy)
 $(genArbitrary ''IpFamily)
 $(genArbitrary ''FilledNDParams)
index 3b0ac6a..961d7cd 100644 (file)
@@ -52,7 +52,7 @@ import qualified Ganeti.HTools.Types as Types
 -- | Null iPolicy, and by null we mean very liberal.
 nullIPolicy :: Types.IPolicy
 nullIPolicy = Types.IPolicy
-  { Types.iPolicyMinMaxISpecs = Types.MinMaxISpecs
+  { Types.iPolicyMinMaxISpecs = [Types.MinMaxISpecs
     { Types.minMaxISpecsMinSpec = Types.ISpec { Types.iSpecMemorySize = 0
                                               , Types.iSpecCpuCount   = 0
                                               , Types.iSpecDiskSize   = 0
@@ -68,7 +68,7 @@ nullIPolicy = Types.IPolicy
       , Types.iSpecNicCount   = C.maxNics
       , Types.iSpecSpindleUse = maxBound
       }
-    }
+    }]
   , Types.iPolicyStdSpec = Types.ISpec { Types.iSpecMemorySize = Types.unitMem
                                        , Types.iSpecCpuCount   = Types.unitCpu
                                        , Types.iSpecDiskSize   = Types.unitDsk
index 553c98d..43f7ea0 100644 (file)
@@ -6,3 +6,12 @@
 # test again via a file and shell parsing
 ./test/hs/hspace --simu p,4,8T,64g,16 --machine-readable --disk-template drbd -l 8 > $T/capacity && sh -c ". $T/capacity && test x\$HTS_OK = x1"
 >>>= 0
+
+# standard & tiered allocation, using shell parsing to do multiple checks
+./test/hs/hspace --machine-readable -t $TESTDATA_DIR/hspace-tiered.data > $T/capacity && sh -c ". $T/capacity && test \"\${HTS_TSPEC}\" = '131072,1048576,4=4 129984,1048320,4=2' && test \"\${HTS_ALLOC_INSTANCES}\" = 6"
+>>>=0
+
+# again, but with a policy containing two min/max specs pairs
+./test/hs/hspace --machine-readable -t $TESTDATA_DIR/hspace-tiered-dualspec.data > $T/capacity && sh -c ". $T/capacity && test \"\${HTS_TSPEC}\" = '131072,1048576,4=4 129984,1048320,4=2 65472,524288,2=2' && test \"\${HTS_ALLOC_INSTANCES}\" = 14"
+>>>2
+>>>=0
index 311a68f..52a9299 100755 (executable)
@@ -78,6 +78,9 @@ class TestCfgupgrade(unittest.TestCase):
   def _LoadConfig(self):
     return serializer.LoadJson(utils.ReadFile(self.config_path))
 
+  def _LoadTestDataConfig(self, filename):
+    return serializer.LoadJson(testutils.ReadTestData(filename))
+
   def _CreateValidConfigDir(self):
     utils.WriteFile(self.noded_cert_path, data="")
     utils.WriteFile(self.known_hosts_path, data="")
@@ -140,6 +143,10 @@ class TestCfgupgrade(unittest.TestCase):
     utils.WriteFile(self.config_path, data=serializer.DumpJson({}))
     self.assertRaises(Exception, _RunUpgrade, self.tmpdir, False, True)
 
+  def _TestUpgradeFromFile(self, filename, dry_run):
+    cfg = self._LoadTestDataConfig(filename)
+    self._TestUpgradeFromData(cfg, dry_run)
+
   def _TestSimpleUpgrade(self, from_version, dry_run,
                          file_storage_dir=None,
                          shared_file_storage_dir=None):
@@ -156,6 +163,11 @@ class TestCfgupgrade(unittest.TestCase):
       "instances": {},
       "nodegroups": {},
       }
+    self._TestUpgradeFromData(cfg, dry_run)
+
+  def _TestUpgradeFromData(self, cfg, dry_run):
+    assert "version" in cfg
+    from_version = cfg["version"]
     self._CreateValidConfigDir()
     utils.WriteFile(self.config_path, data=serializer.DumpJson(cfg))
 
@@ -347,25 +359,59 @@ class TestCfgupgrade(unittest.TestCase):
   def testUpgradeFrom_2_6(self):
     self._TestSimpleUpgrade(constants.BuildVersion(2, 6, 0), False)
 
+  def testUpgradeFrom_2_7(self):
+    self._TestSimpleUpgrade(constants.BuildVersion(2, 7, 0), False)
+
+  def testUpgradeFullConfigFrom_2_7(self):
+    self._TestUpgradeFromFile("cluster_config_2.7.json", False)
+
   def testUpgradeCurrent(self):
     self._TestSimpleUpgrade(constants.CONFIG_VERSION, False)
 
-  def testDowngrade(self):
-    self._TestSimpleUpgrade(constants.CONFIG_VERSION, False)
+  def _RunDowngradeUpgrade(self):
     oldconf = self._LoadConfig()
     _RunUpgrade(self.tmpdir, False, True, downgrade=True)
     _RunUpgrade(self.tmpdir, False, True)
     newconf = self._LoadConfig()
     self.assertEqual(oldconf, newconf)
 
-  def testDowngradeTwice(self):
+  def testDowngrade(self):
     self._TestSimpleUpgrade(constants.CONFIG_VERSION, False)
+    self._RunDowngradeUpgrade()
+
+  def testDowngradeFullConfig(self):
+    """Test for upgrade + downgrade combination."""
+    # This test can work only with the previous version of a configuration!
+    # For 2.7, downgrading returns the original file only if group policies
+    # don't override instance specs, so we need to use an ad-hoc configuration.
+    oldconfname = "cluster_config_downgraded_2.7.json"
+    self._TestUpgradeFromFile(oldconfname, False)
+    _RunUpgrade(self.tmpdir, False, True, downgrade=True)
+    oldconf = self._LoadTestDataConfig(oldconfname)
+    newconf = self._LoadConfig()
+    self.assertEqual(oldconf, newconf)
+
+  def testDowngradeFullConfigBackwardFrom_2_7(self):
+    """Test for upgrade + downgrade + upgrade combination."""
+    self._TestUpgradeFromFile("cluster_config_2.7.json", False)
+    self._RunDowngradeUpgrade()
+
+  def _RunDowngradeTwice(self):
+    """Make sure that downgrade is idempotent."""
     _RunUpgrade(self.tmpdir, False, True, downgrade=True)
     oldconf = self._LoadConfig()
     _RunUpgrade(self.tmpdir, False, True, downgrade=True)
     newconf = self._LoadConfig()
     self.assertEqual(oldconf, newconf)
 
+  def testDowngradeTwice(self):
+    self._TestSimpleUpgrade(constants.CONFIG_VERSION, False)
+    self._RunDowngradeTwice()
+
+  def testDowngradeTwiceFullConfigFrom_2_7(self):
+    self._TestUpgradeFromFile("cluster_config_2.7.json", False)
+    self._RunDowngradeTwice()
+
   def testUpgradeDryRunFrom_2_0(self):
     self._TestSimpleUpgrade(constants.BuildVersion(2, 0, 0), True)
 
index c38ad0e..855ff40 100755 (executable)
 
 """Script for unittesting the cli module"""
 
-import unittest
+import copy
+import testutils
 import time
+import unittest
+import yaml
 from cStringIO import StringIO
 
-import ganeti
-import testutils
-
 from ganeti import constants
 from ganeti import cli
 from ganeti import errors
@@ -71,20 +71,22 @@ class TestSplitKeyVal(unittest.TestCase):
   """Testing case for cli._SplitKeyVal"""
   DATA = "a=b,c,no_d,-e"
   RESULT = {"a": "b", "c": True, "d": False, "e": None}
+  RESULT_NOPREFIX = {"a": "b", "c": {}, "no_d": {}, "-e": {}}
 
   def testSplitKeyVal(self):
     """Test splitting"""
-    self.failUnlessEqual(cli._SplitKeyVal("option", self.DATA), self.RESULT)
+    self.failUnlessEqual(cli._SplitKeyVal("option", self.DATA, True),
+                         self.RESULT)
 
   def testDuplicateParam(self):
     """Test duplicate parameters"""
     for data in ("a=1,a=2", "a,no_a"):
       self.failUnlessRaises(ParameterError, cli._SplitKeyVal,
-                            "option", data)
+                            "option", data, True)
 
   def testEmptyData(self):
     """Test how we handle splitting an empty string"""
-    self.failUnlessEqual(cli._SplitKeyVal("option", ""), {})
+    self.failUnlessEqual(cli._SplitKeyVal("option", "", True), {})
 
 
 class TestIdentKeyVal(unittest.TestCase):
@@ -101,6 +103,7 @@ class TestIdentKeyVal(unittest.TestCase):
     self.assertEqual(cikv("no_bar"), ("bar", False))
     self.assertRaises(ParameterError, cikv, "no_bar:foo")
     self.assertRaises(ParameterError, cikv, "no_bar:foo=baz")
+    self.assertRaises(ParameterError, cikv, "bar:foo=baz,foo=baz")
     self.assertEqual(cikv("-foo"), ("foo", None))
     self.assertRaises(ParameterError, cikv, "-foo:a=c")
 
@@ -115,6 +118,82 @@ class TestIdentKeyVal(unittest.TestCase):
     for i in ["-:", "-"]:
       self.assertEqual(cikv(i), ("", None))
 
+  @staticmethod
+  def _csikv(value):
+    return cli._SplitIdentKeyVal("opt", value, False)
+
+  def testIdentKeyValNoPrefix(self):
+    """Test identkeyval without prefixes"""
+    test_cases = [
+      ("foo:bar", None),
+      ("foo:no_bar", None),
+      ("foo:bar=baz,bar=baz", None),
+      ("foo",
+       ("foo", {})),
+      ("foo:bar=baz",
+       ("foo", {"bar": "baz"})),
+      ("no_foo:-1=baz,no_op=3",
+       ("no_foo", {"-1": "baz", "no_op": "3"})),
+      ]
+    for (arg, res) in test_cases:
+      if res is None:
+        self.assertRaises(ParameterError, self._csikv, arg)
+      else:
+        self.assertEqual(self._csikv(arg), res)
+
+
+class TestMultilistIdentKeyVal(unittest.TestCase):
+  """Test for cli.check_multilist_ident_key_val()"""
+
+  @staticmethod
+  def _cmikv(value):
+    return cli.check_multilist_ident_key_val("option", "opt", value)
+
+  def testListIdentKeyVal(self):
+    test_cases = [
+      ("",
+       None),
+      ("foo", [
+        {"foo": {}}
+        ]),
+      ("foo:bar=baz", [
+        {"foo": {"bar": "baz"}}
+        ]),
+      ("foo:bar=baz/foo:bat=bad",
+       None),
+      ("foo:abc=42/bar:def=11", [
+        {"foo": {"abc": "42"},
+         "bar": {"def": "11"}}
+        ]),
+      ("foo:abc=42/bar:def=11,ghi=07", [
+        {"foo": {"abc": "42"},
+         "bar": {"def": "11", "ghi": "07"}}
+        ]),
+      ("foo:abc=42/bar:def=11//",
+       None),
+      ("foo:abc=42/bar:def=11,ghi=07//foobar", [
+        {"foo": {"abc": "42"},
+         "bar": {"def": "11", "ghi": "07"}},
+        {"foobar": {}}
+        ]),
+      ("foo:abc=42/bar:def=11,ghi=07//foobar:xyz=88", [
+        {"foo": {"abc": "42"},
+         "bar": {"def": "11", "ghi": "07"}},
+        {"foobar": {"xyz": "88"}}
+        ]),
+      ("foo:abc=42/bar:def=11,ghi=07//foobar:xyz=88/foo:uvw=314", [
+        {"foo": {"abc": "42"},
+         "bar": {"def": "11", "ghi": "07"}},
+        {"foobar": {"xyz": "88"},
+         "foo": {"uvw": "314"}}
+        ]),
+      ]
+    for (arg, res) in test_cases:
+      if res is None:
+        self.assertRaises(ParameterError, self._cmikv, arg)
+      else:
+        self.assertEqual(res, self._cmikv(arg))
+
 
 class TestToStream(unittest.TestCase):
   """Test the ToStream functions"""
@@ -1122,9 +1201,85 @@ class TestSerializeGenericInfo(unittest.TestCase):
     self._RunTest(dict(data), expected)
 
 
+class TestFormatPolicyInfo(unittest.TestCase):
+  """Test case for cli.FormatPolicyInfo.
+
+  These tests rely on cli._SerializeGenericInfo (tested elsewhere).
+
+  """
+  def setUp(self):
+    # Policies are big, and we want to see the difference in case of an error
+    self.maxDiff = None
+
+  def _RenameDictItem(self, parsed, old, new):
+    self.assertTrue(old in parsed)
+    self.assertTrue(new not in parsed)
+    parsed[new] = parsed[old]
+    del parsed[old]
+
+  def _TranslateParsedNames(self, parsed):
+    for (pretty, raw) in [
+      ("bounds specs", constants.ISPECS_MINMAX),
+      ("enabled disk templates", constants.IPOLICY_DTS)
+      ]:
+      self._RenameDictItem(parsed, pretty, raw)
+    for minmax in parsed[constants.ISPECS_MINMAX]:
+      for key in minmax:
+        keyparts = key.split("/", 1)
+        if len(keyparts) > 1:
+          self._RenameDictItem(minmax, key, keyparts[0])
+    self.assertTrue(constants.IPOLICY_DTS in parsed)
+    parsed[constants.IPOLICY_DTS] = yaml.load("[%s]" %
+                                              parsed[constants.IPOLICY_DTS])
+
+  @staticmethod
+  def _PrintAndParsePolicy(custom, effective, iscluster):
+    formatted = cli.FormatPolicyInfo(custom, effective, iscluster)
+    buf = StringIO()
+    cli._SerializeGenericInfo(buf, formatted, 0)
+    return yaml.load(buf.getvalue())
+
+  def _PrintAndCheckParsed(self, policy):
+    parsed = self._PrintAndParsePolicy(policy, NotImplemented, True)
+    self._TranslateParsedNames(parsed)
+    self.assertEqual(parsed, policy)
+
+  def _CompareClusterGroupItems(self, cluster, group, skip=None):
+    if isinstance(group, dict):
+      self.assertTrue(isinstance(cluster, dict))
+      if skip is None:
+        skip = frozenset()
+      self.assertEqual(frozenset(cluster.keys()).difference(skip),
+                       frozenset(group.keys()))
+      for key in group:
+        self._CompareClusterGroupItems(cluster[key], group[key])
+    elif isinstance(group, list):
+      self.assertTrue(isinstance(cluster, list))
+      self.assertEqual(len(cluster), len(group))
+      for (cval, gval) in zip(cluster, group):
+        self._CompareClusterGroupItems(cval, gval)
+    else:
+      self.assertTrue(isinstance(group, basestring))
+      self.assertEqual("default (%s)" % cluster, group)
+
+  def _TestClusterVsGroup(self, policy):
+    cluster = self._PrintAndParsePolicy(policy, NotImplemented, True)
+    group = self._PrintAndParsePolicy({}, policy, False)
+    self._CompareClusterGroupItems(cluster, group, ["std"])
+
+  def testWithDefaults(self):
+    self._PrintAndCheckParsed(constants.IPOLICY_DEFAULTS)
+    self._TestClusterVsGroup(constants.IPOLICY_DEFAULTS)
+
+
 class TestCreateIPolicyFromOpts(unittest.TestCase):
   """Test case for cli.CreateIPolicyFromOpts."""
-  def _RecursiveCheckMergedDicts(self, default_pol, diff_pol, merged_pol):
+  def setUp(self):
+    # Policies are big, and we want to see the difference in case of an error
+    self.maxDiff = None
+
+  def _RecursiveCheckMergedDicts(self, default_pol, diff_pol, merged_pol,
+                                 merge_minmax=False):
     self.assertTrue(type(default_pol) is dict)
     self.assertTrue(type(diff_pol) is dict)
     self.assertTrue(type(merged_pol) is dict)
@@ -1134,46 +1289,76 @@ class TestCreateIPolicyFromOpts(unittest.TestCase):
       if key in diff_pol:
         if type(val) is dict:
           self._RecursiveCheckMergedDicts(default_pol[key], diff_pol[key], val)
+        elif (merge_minmax and key == "minmax" and type(val) is list and
+              len(val) == 1):
+          self.assertEqual(len(default_pol[key]), 1)
+          self.assertEqual(len(diff_pol[key]), 1)
+          self._RecursiveCheckMergedDicts(default_pol[key][0],
+                                          diff_pol[key][0], val[0])
         else:
           self.assertEqual(val, diff_pol[key])
       else:
         self.assertEqual(val, default_pol[key])
 
   def testClusterPolicy(self):
-    exp_pol0 = {
-      constants.ISPECS_MINMAX: {
-        constants.ISPECS_MIN: {},
-        constants.ISPECS_MAX: {},
-        },
-      constants.ISPECS_STD: {},
-      }
+    pol0 = cli.CreateIPolicyFromOpts(
+      ispecs_mem_size={},
+      ispecs_cpu_count={},
+      ispecs_disk_count={},
+      ispecs_disk_size={},
+      ispecs_nic_count={},
+      ipolicy_disk_templates=None,
+      ipolicy_vcpu_ratio=None,
+      ipolicy_spindle_ratio=None,
+      fill_all=True
+      )
+    self.assertEqual(pol0, constants.IPOLICY_DEFAULTS)
+
     exp_pol1 = {
-      constants.ISPECS_MINMAX: {
-        constants.ISPECS_MIN: {
-          constants.ISPEC_CPU_COUNT: 2,
-          constants.ISPEC_DISK_COUNT: 1,
-          },
-        constants.ISPECS_MAX: {
-          constants.ISPEC_MEM_SIZE: 12*1024,
-          constants.ISPEC_DISK_COUNT: 2,
+      constants.ISPECS_MINMAX: [
+        {
+          constants.ISPECS_MIN: {
+            constants.ISPEC_CPU_COUNT: 2,
+            constants.ISPEC_DISK_COUNT: 1,
+            },
+          constants.ISPECS_MAX: {
+            constants.ISPEC_MEM_SIZE: 12*1024,
+            constants.ISPEC_DISK_COUNT: 2,
+            },
           },
-        },
+        ],
       constants.ISPECS_STD: {
         constants.ISPEC_CPU_COUNT: 2,
         constants.ISPEC_DISK_COUNT: 2,
         },
       constants.IPOLICY_VCPU_RATIO: 3.1,
       }
+    pol1 = cli.CreateIPolicyFromOpts(
+      ispecs_mem_size={"max": "12g"},
+      ispecs_cpu_count={"min": 2, "std": 2},
+      ispecs_disk_count={"min": 1, "max": 2, "std": 2},
+      ispecs_disk_size={},
+      ispecs_nic_count={},
+      ipolicy_disk_templates=None,
+      ipolicy_vcpu_ratio=3.1,
+      ipolicy_spindle_ratio=None,
+      fill_all=True
+      )
+    self._RecursiveCheckMergedDicts(constants.IPOLICY_DEFAULTS,
+                                    exp_pol1, pol1, merge_minmax=True)
+
     exp_pol2 = {
-      constants.ISPECS_MINMAX: {
-        constants.ISPECS_MIN: {
-          constants.ISPEC_DISK_SIZE: 512,
-          constants.ISPEC_NIC_COUNT: 2,
-          },
-        constants.ISPECS_MAX: {
-          constants.ISPEC_NIC_COUNT: 3,
+      constants.ISPECS_MINMAX: [
+        {
+          constants.ISPECS_MIN: {
+            constants.ISPEC_DISK_SIZE: 512,
+            constants.ISPEC_NIC_COUNT: 2,
+            },
+          constants.ISPECS_MAX: {
+            constants.ISPEC_NIC_COUNT: 3,
+            },
           },
-        },
+        ],
       constants.ISPECS_STD: {
         constants.ISPEC_CPU_COUNT: 2,
         constants.ISPEC_NIC_COUNT: 3,
@@ -1181,96 +1366,369 @@ class TestCreateIPolicyFromOpts(unittest.TestCase):
       constants.IPOLICY_SPINDLE_RATIO: 1.3,
       constants.IPOLICY_DTS: ["templates"],
       }
-    for fillall in [False, True]:
-      pol0 = cli.CreateIPolicyFromOpts(
-        ispecs_mem_size={},
-        ispecs_cpu_count={},
-        ispecs_disk_count={},
-        ispecs_disk_size={},
-        ispecs_nic_count={},
+    pol2 = cli.CreateIPolicyFromOpts(
+      ispecs_mem_size={},
+      ispecs_cpu_count={"std": 2},
+      ispecs_disk_count={},
+      ispecs_disk_size={"min": "0.5g"},
+      ispecs_nic_count={"min": 2, "max": 3, "std": 3},
+      ipolicy_disk_templates=["templates"],
+      ipolicy_vcpu_ratio=None,
+      ipolicy_spindle_ratio=1.3,
+      fill_all=True
+      )
+    self._RecursiveCheckMergedDicts(constants.IPOLICY_DEFAULTS,
+                                      exp_pol2, pol2, merge_minmax=True)
+
+    for fill_all in [False, True]:
+      exp_pol3 = {
+        constants.ISPECS_STD: {
+          constants.ISPEC_CPU_COUNT: 2,
+          constants.ISPEC_NIC_COUNT: 3,
+          },
+        }
+      pol3 = cli.CreateIPolicyFromOpts(
+        std_ispecs={
+          constants.ISPEC_CPU_COUNT: "2",
+          constants.ISPEC_NIC_COUNT: "3",
+          },
         ipolicy_disk_templates=None,
         ipolicy_vcpu_ratio=None,
         ipolicy_spindle_ratio=None,
-        fill_all=fillall
-        )
-      if fillall:
-        self.assertEqual(pol0, constants.IPOLICY_DEFAULTS)
-      else:
-        self.assertEqual(pol0, exp_pol0)
-      pol1 = cli.CreateIPolicyFromOpts(
-        ispecs_mem_size={"max": "12g"},
-        ispecs_cpu_count={"min": 2, "std": 2},
-        ispecs_disk_count={"min": 1, "max": 2, "std": 2},
-        ispecs_disk_size={},
-        ispecs_nic_count={},
-        ipolicy_disk_templates=None,
-        ipolicy_vcpu_ratio=3.1,
-        ipolicy_spindle_ratio=None,
-        fill_all=fillall
+        fill_all=fill_all
         )
-      if fillall:
+      if fill_all:
         self._RecursiveCheckMergedDicts(constants.IPOLICY_DEFAULTS,
-                                        exp_pol1, pol1)
+                                        exp_pol3, pol3, merge_minmax=True)
       else:
-        self.assertEqual(pol1, exp_pol1)
-      pol2 = cli.CreateIPolicyFromOpts(
-        ispecs_mem_size={},
-        ispecs_cpu_count={"std": 2},
-        ispecs_disk_count={},
-        ispecs_disk_size={"min": "0.5g"},
-        ispecs_nic_count={"min": 2, "max": 3, "std": 3},
-        ipolicy_disk_templates=["templates"],
-        ipolicy_vcpu_ratio=None,
-        ipolicy_spindle_ratio=1.3,
-        fill_all=fillall
-        )
-      if fillall:
-        self._RecursiveCheckMergedDicts(constants.IPOLICY_DEFAULTS,
-                                        exp_pol2, pol2)
+        self.assertEqual(pol3, exp_pol3)
+
+  def testPartialPolicy(self):
+    exp_pol0 = objects.MakeEmptyIPolicy()
+    pol0 = cli.CreateIPolicyFromOpts(
+      minmax_ispecs=None,
+      std_ispecs=None,
+      ipolicy_disk_templates=None,
+      ipolicy_vcpu_ratio=None,
+      ipolicy_spindle_ratio=None,
+      fill_all=False
+      )
+    self.assertEqual(pol0, exp_pol0)
+
+    exp_pol1 = {
+      constants.IPOLICY_VCPU_RATIO: 3.1,
+      }
+    pol1 = cli.CreateIPolicyFromOpts(
+      minmax_ispecs=None,
+      std_ispecs=None,
+      ipolicy_disk_templates=None,
+      ipolicy_vcpu_ratio=3.1,
+      ipolicy_spindle_ratio=None,
+      fill_all=False
+      )
+    self.assertEqual(pol1, exp_pol1)
+
+    exp_pol2 = {
+      constants.IPOLICY_SPINDLE_RATIO: 1.3,
+      constants.IPOLICY_DTS: ["templates"],
+      }
+    pol2 = cli.CreateIPolicyFromOpts(
+      minmax_ispecs=None,
+      std_ispecs=None,
+      ipolicy_disk_templates=["templates"],
+      ipolicy_vcpu_ratio=None,
+      ipolicy_spindle_ratio=1.3,
+      fill_all=False
+      )
+    self.assertEqual(pol2, exp_pol2)
+
+  def _TestInvalidISpecs(self, minmax_ispecs, std_ispecs, fail=True):
+    for fill_all in [False, True]:
+      if fail:
+        self.assertRaises((errors.OpPrereqError,
+                           errors.UnitParseError,
+                           errors.TypeEnforcementError),
+                          cli.CreateIPolicyFromOpts,
+                          minmax_ispecs=minmax_ispecs,
+                          std_ispecs=std_ispecs,
+                          fill_all=fill_all)
       else:
-        self.assertEqual(pol2, exp_pol2)
+        cli.CreateIPolicyFromOpts(minmax_ispecs=minmax_ispecs,
+                                  std_ispecs=std_ispecs,
+                                  fill_all=fill_all)
 
   def testInvalidPolicies(self):
-    self.assertRaises(errors.TypeEnforcementError, cli.CreateIPolicyFromOpts,
-                      ispecs_mem_size={}, ispecs_cpu_count={},
-                      ispecs_disk_count={}, ispecs_disk_size={"std": 1},
-                      ispecs_nic_count={}, ipolicy_disk_templates=None,
-                      ipolicy_vcpu_ratio=None, ipolicy_spindle_ratio=None,
-                      group_ipolicy=True)
+    self.assertRaises(AssertionError, cli.CreateIPolicyFromOpts,
+                      std_ispecs={constants.ISPEC_MEM_SIZE: 1024},
+                      ipolicy_disk_templates=None, ipolicy_vcpu_ratio=None,
+                      ipolicy_spindle_ratio=None, group_ipolicy=True)
     self.assertRaises(errors.OpPrereqError, cli.CreateIPolicyFromOpts,
                       ispecs_mem_size={"wrong": "x"}, ispecs_cpu_count={},
                       ispecs_disk_count={}, ispecs_disk_size={},
                       ispecs_nic_count={}, ipolicy_disk_templates=None,
-                      ipolicy_vcpu_ratio=None, ipolicy_spindle_ratio=None)
+                      ipolicy_vcpu_ratio=None, ipolicy_spindle_ratio=None,
+                      fill_all=True)
     self.assertRaises(errors.TypeEnforcementError, cli.CreateIPolicyFromOpts,
                       ispecs_mem_size={}, ispecs_cpu_count={"min": "default"},
                       ispecs_disk_count={}, ispecs_disk_size={},
                       ispecs_nic_count={}, ipolicy_disk_templates=None,
-                      ipolicy_vcpu_ratio=None, ipolicy_spindle_ratio=None)
+                      ipolicy_vcpu_ratio=None, ipolicy_spindle_ratio=None,
+                      fill_all=True)
+
+    good_mmspecs = [
+      constants.ISPECS_MINMAX_DEFAULTS,
+      constants.ISPECS_MINMAX_DEFAULTS,
+      ]
+    self._TestInvalidISpecs(good_mmspecs, None, fail=False)
+    broken_mmspecs = copy.deepcopy(good_mmspecs)
+    for minmaxpair in broken_mmspecs:
+      for key in constants.ISPECS_MINMAX_KEYS:
+        for par in constants.ISPECS_PARAMETERS:
+          old = minmaxpair[key][par]
+          del minmaxpair[key][par]
+          self._TestInvalidISpecs(broken_mmspecs, None)
+          minmaxpair[key][par] = "invalid"
+          self._TestInvalidISpecs(broken_mmspecs, None)
+          minmaxpair[key][par] = old
+        minmaxpair[key]["invalid_key"] = None
+        self._TestInvalidISpecs(broken_mmspecs, None)
+        del minmaxpair[key]["invalid_key"]
+      minmaxpair["invalid_key"] = None
+      self._TestInvalidISpecs(broken_mmspecs, None)
+      del minmaxpair["invalid_key"]
+      assert broken_mmspecs == good_mmspecs
+
+    good_stdspecs = constants.IPOLICY_DEFAULTS[constants.ISPECS_STD]
+    self._TestInvalidISpecs(None, good_stdspecs, fail=False)
+    broken_stdspecs = copy.deepcopy(good_stdspecs)
+    for par in constants.ISPECS_PARAMETERS:
+      old = broken_stdspecs[par]
+      broken_stdspecs[par] = "invalid"
+      self._TestInvalidISpecs(None, broken_stdspecs)
+      broken_stdspecs[par] = old
+    broken_stdspecs["invalid_key"] = None
+    self._TestInvalidISpecs(None, broken_stdspecs)
+    del broken_stdspecs["invalid_key"]
+    assert broken_stdspecs == good_stdspecs
 
   def testAllowedValues(self):
     allowedv = "blah"
     exp_pol1 = {
-      constants.ISPECS_MINMAX: {
+      constants.ISPECS_MINMAX: allowedv,
+      constants.IPOLICY_DTS: allowedv,
+      constants.IPOLICY_VCPU_RATIO: allowedv,
+      constants.IPOLICY_SPINDLE_RATIO: allowedv,
+      }
+    pol1 = cli.CreateIPolicyFromOpts(minmax_ispecs=[{allowedv: {}}],
+                                     std_ispecs=None,
+                                     ipolicy_disk_templates=allowedv,
+                                     ipolicy_vcpu_ratio=allowedv,
+                                     ipolicy_spindle_ratio=allowedv,
+                                     allowed_values=[allowedv])
+    self.assertEqual(pol1, exp_pol1)
+
+  @staticmethod
+  def _ConvertSpecToStrings(spec):
+    ret = {}
+    for (par, val) in spec.items():
+      ret[par] = str(val)
+    return ret
+
+  def _CheckNewStyleSpecsCall(self, exp_ipolicy, minmax_ispecs, std_ispecs,
+                              group_ipolicy, fill_all):
+    ipolicy = cli.CreateIPolicyFromOpts(minmax_ispecs=minmax_ispecs,
+                                        std_ispecs=std_ispecs,
+                                        group_ipolicy=group_ipolicy,
+                                        fill_all=fill_all)
+    self.assertEqual(ipolicy, exp_ipolicy)
+
+  def _TestFullISpecsInner(self, skel_exp_ipol, exp_minmax, exp_std,
+                           group_ipolicy, fill_all):
+    exp_ipol = skel_exp_ipol.copy()
+    if exp_minmax is not None:
+      minmax_ispecs = []
+      for exp_mm_pair in exp_minmax:
+        mmpair = {}
+        for (key, spec) in exp_mm_pair.items():
+          mmpair[key] = self._ConvertSpecToStrings(spec)
+        minmax_ispecs.append(mmpair)
+      exp_ipol[constants.ISPECS_MINMAX] = exp_minmax
+    else:
+      minmax_ispecs = None
+    if exp_std is not None:
+      std_ispecs = self._ConvertSpecToStrings(exp_std)
+      exp_ipol[constants.ISPECS_STD] = exp_std
+    else:
+      std_ispecs = None
+
+    self._CheckNewStyleSpecsCall(exp_ipol, minmax_ispecs, std_ispecs,
+                                 group_ipolicy, fill_all)
+    if minmax_ispecs:
+      for mmpair in minmax_ispecs:
+        for (key, spec) in mmpair.items():
+          for par in [constants.ISPEC_MEM_SIZE, constants.ISPEC_DISK_SIZE]:
+            if par in spec:
+              spec[par] += "m"
+              self._CheckNewStyleSpecsCall(exp_ipol, minmax_ispecs, std_ispecs,
+                                           group_ipolicy, fill_all)
+    if std_ispecs:
+      for par in [constants.ISPEC_MEM_SIZE, constants.ISPEC_DISK_SIZE]:
+        if par in std_ispecs:
+          std_ispecs[par] += "m"
+          self._CheckNewStyleSpecsCall(exp_ipol, minmax_ispecs, std_ispecs,
+                                       group_ipolicy, fill_all)
+
+  def testFullISpecs(self):
+    exp_minmax1 = [
+      {
         constants.ISPECS_MIN: {
-          constants.ISPEC_CPU_COUNT: allowedv,
+          constants.ISPEC_MEM_SIZE: 512,
+          constants.ISPEC_CPU_COUNT: 2,
+          constants.ISPEC_DISK_COUNT: 2,
+          constants.ISPEC_DISK_SIZE: 512,
+          constants.ISPEC_NIC_COUNT: 2,
+          constants.ISPEC_SPINDLE_USE: 2,
           },
         constants.ISPECS_MAX: {
+          constants.ISPEC_MEM_SIZE: 768*1024,
+          constants.ISPEC_CPU_COUNT: 7,
+          constants.ISPEC_DISK_COUNT: 6,
+          constants.ISPEC_DISK_SIZE: 2048*1024,
+          constants.ISPEC_NIC_COUNT: 3,
+          constants.ISPEC_SPINDLE_USE: 3,
           },
         },
-      constants.ISPECS_STD: {
+      ]
+    exp_minmax2 = [
+      {
+        constants.ISPECS_MIN: {
+          constants.ISPEC_MEM_SIZE: 512,
+          constants.ISPEC_CPU_COUNT: 2,
+          constants.ISPEC_DISK_COUNT: 2,
+          constants.ISPEC_DISK_SIZE: 512,
+          constants.ISPEC_NIC_COUNT: 2,
+          constants.ISPEC_SPINDLE_USE: 2,
+          },
+        constants.ISPECS_MAX: {
+          constants.ISPEC_MEM_SIZE: 768*1024,
+          constants.ISPEC_CPU_COUNT: 7,
+          constants.ISPEC_DISK_COUNT: 6,
+          constants.ISPEC_DISK_SIZE: 2048*1024,
+          constants.ISPEC_NIC_COUNT: 3,
+          constants.ISPEC_SPINDLE_USE: 3,
+          },
+        },
+      {
+        constants.ISPECS_MIN: {
+          constants.ISPEC_MEM_SIZE: 1024*1024,
+          constants.ISPEC_CPU_COUNT: 3,
+          constants.ISPEC_DISK_COUNT: 3,
+          constants.ISPEC_DISK_SIZE: 256,
+          constants.ISPEC_NIC_COUNT: 4,
+          constants.ISPEC_SPINDLE_USE: 5,
+          },
+        constants.ISPECS_MAX: {
+          constants.ISPEC_MEM_SIZE: 2048*1024,
+          constants.ISPEC_CPU_COUNT: 5,
+          constants.ISPEC_DISK_COUNT: 5,
+          constants.ISPEC_DISK_SIZE: 1024*1024,
+          constants.ISPEC_NIC_COUNT: 5,
+          constants.ISPEC_SPINDLE_USE: 7,
+          },
         },
+      ]
+    exp_std1 = {
+      constants.ISPEC_MEM_SIZE: 768*1024,
+      constants.ISPEC_CPU_COUNT: 7,
+      constants.ISPEC_DISK_COUNT: 6,
+      constants.ISPEC_DISK_SIZE: 2048*1024,
+      constants.ISPEC_NIC_COUNT: 3,
+      constants.ISPEC_SPINDLE_USE: 1,
       }
-    pol1 = cli.CreateIPolicyFromOpts(ispecs_mem_size={},
-                                     ispecs_cpu_count={"min": allowedv},
-                                     ispecs_disk_count={},
-                                     ispecs_disk_size={},
-                                     ispecs_nic_count={},
-                                     ipolicy_disk_templates=None,
-                                     ipolicy_vcpu_ratio=None,
-                                     ipolicy_spindle_ratio=None,
-                                     allowed_values=[allowedv])
-    self.assertEqual(pol1, exp_pol1)
+    for fill_all in [False, True]:
+      if fill_all:
+        skel_ipolicy = constants.IPOLICY_DEFAULTS
+      else:
+        skel_ipolicy = {}
+      self._TestFullISpecsInner(skel_ipolicy, None, exp_std1,
+                                False, fill_all)
+      for exp_minmax in [exp_minmax1, exp_minmax2]:
+        self._TestFullISpecsInner(skel_ipolicy, exp_minmax, exp_std1,
+                                  False, fill_all)
+        self._TestFullISpecsInner(skel_ipolicy, exp_minmax, None,
+                                  False, fill_all)
+
+
+class TestPrintIPolicyCommand(unittest.TestCase):
+  """Test case for cli.PrintIPolicyCommand"""
+  _SPECS1 = {
+    "par1": 42,
+    "par2": "xyz",
+    }
+  _SPECS1_STR = "par1=42,par2=xyz"
+  _SPECS2 = {
+    "param": 10,
+    "another_param": 101,
+    }
+  _SPECS2_STR = "another_param=101,param=10"
+  _SPECS3 = {
+    "par1": 1024,
+    "param": "abc",
+    }
+  _SPECS3_STR = "par1=1024,param=abc"
+
+  def _CheckPrintIPolicyCommand(self, ipolicy, isgroup, expected):
+    buf = StringIO()
+    cli.PrintIPolicyCommand(buf, ipolicy, isgroup)
+    self.assertEqual(buf.getvalue(), expected)
+
+  def testIgnoreStdForGroup(self):
+    self._CheckPrintIPolicyCommand({"std": self._SPECS1}, True, "")
+
+  def testIgnoreEmpty(self):
+    policies = [
+      {},
+      {"std": {}},
+      {"minmax": []},
+      {"minmax": [{}]},
+      {"minmax": [{
+        "min": {},
+        "max": {},
+        }]},
+      {"minmax": [{
+        "min": self._SPECS1,
+        "max": {},
+        }]},
+      ]
+    for pol in policies:
+      self._CheckPrintIPolicyCommand(pol, False, "")
+
+  def testFullPolicies(self):
+    cases = [
+      ({"std": self._SPECS1},
+       " %s %s" % (cli.IPOLICY_STD_SPECS_STR, self._SPECS1_STR)),
+      ({"minmax": [{
+        "min": self._SPECS1,
+        "max": self._SPECS2,
+        }]},
+       " %s min:%s/max:%s" % (cli.IPOLICY_BOUNDS_SPECS_STR,
+                              self._SPECS1_STR, self._SPECS2_STR)),
+      ({"minmax": [
+        {
+          "min": self._SPECS1,
+          "max": self._SPECS2,
+          },
+        {
+          "min": self._SPECS2,
+          "max": self._SPECS3,
+          },
+        ]},
+       " %s min:%s/max:%s//min:%s/max:%s" %
+       (cli.IPOLICY_BOUNDS_SPECS_STR, self._SPECS1_STR, self._SPECS2_STR,
+        self._SPECS2_STR, self._SPECS3_STR)),
+      ]
+    for (pol, exp) in cases:
+      self._CheckPrintIPolicyCommand(pol, False, exp)
 
 
 if __name__ == "__main__":
index 5603e4a..bc4e7ee 100755 (executable)
@@ -673,7 +673,7 @@ class TestComputeIPolicySpecViolation(unittest.TestCase):
   # Minimal policy accepted by _ComputeIPolicySpecViolation()
   _MICRO_IPOL = {
     constants.IPOLICY_DTS: [constants.DT_PLAIN, constants.DT_DISKLESS],
-    constants.ISPECS_MINMAX: NotImplemented,
+    constants.ISPECS_MINMAX: [NotImplemented],
     }
 
   def test(self):
@@ -719,6 +719,79 @@ class TestComputeIPolicySpecViolation(unittest.TestCase):
     self.assertEqual(ret, ["foo", "bar"])
     self.assertFalse(spec.spec)
 
+  def testWithIPolicy(self):
+    mem_size = 2048
+    cpu_count = 2
+    disk_count = 1
+    disk_sizes = [512]
+    nic_count = 1
+    spindle_use = 4
+    disk_template = "mytemplate"
+    ispec = {
+      constants.ISPEC_MEM_SIZE: mem_size,
+      constants.ISPEC_CPU_COUNT: cpu_count,
+      constants.ISPEC_DISK_COUNT: disk_count,
+      constants.ISPEC_DISK_SIZE: disk_sizes[0],
+      constants.ISPEC_NIC_COUNT: nic_count,
+      constants.ISPEC_SPINDLE_USE: spindle_use,
+      }
+    ipolicy1 = {
+      constants.ISPECS_MINMAX: [{
+        constants.ISPECS_MIN: ispec,
+        constants.ISPECS_MAX: ispec,
+        }],
+      constants.IPOLICY_DTS: [disk_template],
+      }
+    ispec_copy = copy.deepcopy(ispec)
+    ipolicy2 = {
+      constants.ISPECS_MINMAX: [
+        {
+          constants.ISPECS_MIN: ispec_copy,
+          constants.ISPECS_MAX: ispec_copy,
+          },
+        {
+          constants.ISPECS_MIN: ispec,
+          constants.ISPECS_MAX: ispec,
+          },
+        ],
+      constants.IPOLICY_DTS: [disk_template],
+      }
+    ipolicy3 = {
+      constants.ISPECS_MINMAX: [
+        {
+          constants.ISPECS_MIN: ispec,
+          constants.ISPECS_MAX: ispec,
+          },
+        {
+          constants.ISPECS_MIN: ispec_copy,
+          constants.ISPECS_MAX: ispec_copy,
+          },
+        ],
+      constants.IPOLICY_DTS: [disk_template],
+      }
+    def AssertComputeViolation(ipolicy, violations):
+      ret = cmdlib._ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count,
+                                                disk_count, nic_count,
+                                                disk_sizes, spindle_use,
+                                                disk_template)
+      self.assertEqual(len(ret), violations)
+
+    AssertComputeViolation(ipolicy1, 0)
+    AssertComputeViolation(ipolicy2, 0)
+    AssertComputeViolation(ipolicy3, 0)
+    for par in constants.ISPECS_PARAMETERS:
+      ispec[par] += 1
+      AssertComputeViolation(ipolicy1, 1)
+      AssertComputeViolation(ipolicy2, 0)
+      AssertComputeViolation(ipolicy3, 0)
+      ispec[par] -= 2
+      AssertComputeViolation(ipolicy1, 1)
+      AssertComputeViolation(ipolicy2, 0)
+      AssertComputeViolation(ipolicy3, 0)
+      ispec[par] += 1 # Restore
+    ipolicy1[constants.IPOLICY_DTS] = ["another_template"]
+    AssertComputeViolation(ipolicy1, 1)
+
 
 class _StubComputeIPolicySpecViolation:
   def __init__(self, mem_size, cpu_count, disk_count, nic_count, disk_sizes,
@@ -1733,81 +1806,101 @@ class TestGetUpdatedIPolicy(unittest.TestCase):
   """Tests for cmdlib._GetUpdatedIPolicy()"""
   _OLD_CLUSTER_POLICY = {
     constants.IPOLICY_VCPU_RATIO: 1.5,
-    constants.ISPECS_MINMAX: {
-      constants.ISPECS_MIN: {
-        constants.ISPEC_MEM_SIZE: 20,
-        constants.ISPEC_CPU_COUNT: 2,
+    constants.ISPECS_MINMAX: [
+      {
+        constants.ISPECS_MIN: {
+          constants.ISPEC_MEM_SIZE: 32768,
+          constants.ISPEC_CPU_COUNT: 8,
+          constants.ISPEC_DISK_COUNT: 1,
+          constants.ISPEC_DISK_SIZE: 1024,
+          constants.ISPEC_NIC_COUNT: 1,
+          constants.ISPEC_SPINDLE_USE: 1,
+          },
+        constants.ISPECS_MAX: {
+          constants.ISPEC_MEM_SIZE: 65536,
+          constants.ISPEC_CPU_COUNT: 10,
+          constants.ISPEC_DISK_COUNT: 5,
+          constants.ISPEC_DISK_SIZE: 1024 * 1024,
+          constants.ISPEC_NIC_COUNT: 3,
+          constants.ISPEC_SPINDLE_USE: 12,
+          },
         },
-      constants.ISPECS_MAX: {},
-      },
-    constants.ISPECS_STD: {},
+      constants.ISPECS_MINMAX_DEFAULTS,
+      ],
+    constants.ISPECS_STD: constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
     }
   _OLD_GROUP_POLICY = {
     constants.IPOLICY_SPINDLE_RATIO: 2.5,
-    constants.ISPECS_MINMAX: {
+    constants.ISPECS_MINMAX: [{
       constants.ISPECS_MIN: {
-        constants.ISPEC_DISK_SIZE: 20,
-        constants.ISPEC_NIC_COUNT: 2,
+        constants.ISPEC_MEM_SIZE: 128,
+        constants.ISPEC_CPU_COUNT: 1,
+        constants.ISPEC_DISK_COUNT: 1,
+        constants.ISPEC_DISK_SIZE: 1024,
+        constants.ISPEC_NIC_COUNT: 1,
+        constants.ISPEC_SPINDLE_USE: 1,
         },
-      constants.ISPECS_MAX: {},
-      },
+      constants.ISPECS_MAX: {
+        constants.ISPEC_MEM_SIZE: 32768,
+        constants.ISPEC_CPU_COUNT: 8,
+        constants.ISPEC_DISK_COUNT: 5,
+        constants.ISPEC_DISK_SIZE: 1024 * 1024,
+        constants.ISPEC_NIC_COUNT: 3,
+        constants.ISPEC_SPINDLE_USE: 12,
+        },
+      }],
     }
 
   def _TestSetSpecs(self, old_policy, isgroup):
-    ispec_key = constants.ISPECS_MIN
-    diff_ispec = {
-      constants.ISPEC_MEM_SIZE: 50,
-      constants.ISPEC_DISK_SIZE: 30,
-      }
-    diff_policy = {
-      constants.ISPECS_MINMAX: {
-        ispec_key: diff_ispec,
+    diff_minmax = [{
+      constants.ISPECS_MIN: {
+        constants.ISPEC_MEM_SIZE: 64,
+        constants.ISPEC_CPU_COUNT: 1,
+        constants.ISPEC_DISK_COUNT: 2,
+        constants.ISPEC_DISK_SIZE: 64,
+        constants.ISPEC_NIC_COUNT: 1,
+        constants.ISPEC_SPINDLE_USE: 1,
         },
+      constants.ISPECS_MAX: {
+        constants.ISPEC_MEM_SIZE: 16384,
+        constants.ISPEC_CPU_COUNT: 10,
+        constants.ISPEC_DISK_COUNT: 12,
+        constants.ISPEC_DISK_SIZE: 1024,
+        constants.ISPEC_NIC_COUNT: 9,
+        constants.ISPEC_SPINDLE_USE: 18,
+        },
+      }]
+    diff_std = {
+        constants.ISPEC_DISK_COUNT: 10,
+        constants.ISPEC_DISK_SIZE: 512,
+        }
+    diff_policy = {
+      constants.ISPECS_MINMAX: diff_minmax
       }
     if not isgroup:
-      diff_std = {
-        constants.ISPEC_CPU_COUNT: 3,
-        constants.ISPEC_DISK_COUNT: 3,
-        }
       diff_policy[constants.ISPECS_STD] = diff_std
     new_policy = cmdlib._GetUpdatedIPolicy(old_policy, diff_policy,
                                            group_policy=isgroup)
 
     self.assertTrue(constants.ISPECS_MINMAX in new_policy)
-    new_ispec = new_policy[constants.ISPECS_MINMAX][ispec_key]
-    for key in diff_ispec:
-      self.assertTrue(key in new_ispec)
-      self.assertEqual(new_ispec[key], diff_ispec[key])
+    self.assertEqual(new_policy[constants.ISPECS_MINMAX], diff_minmax)
     for key in old_policy:
       if not key in diff_policy:
         self.assertTrue(key in new_policy)
         self.assertEqual(new_policy[key], old_policy[key])
 
-    if constants.ISPECS_MINMAX in old_policy:
-      old_minmax = old_policy[constants.ISPECS_MINMAX]
-      for key in old_minmax:
-        if key != ispec_key:
-          self.assertTrue(key in new_policy[constants.ISPECS_MINMAX])
-          self.assertEqual(new_policy[constants.ISPECS_MINMAX][key],
-                           old_minmax[key])
-      old_ispec = old_policy[constants.ISPECS_MINMAX][ispec_key]
-      for key in old_ispec:
-        if not key in diff_ispec:
-          self.assertTrue(key in new_ispec)
-          self.assertEqual(new_ispec[key], old_ispec[key])
-
     if not isgroup:
       new_std = new_policy[constants.ISPECS_STD]
       for key in diff_std:
         self.assertTrue(key in new_std)
         self.assertEqual(new_std[key], diff_std[key])
+      old_std = old_policy.get(constants.ISPECS_STD, {})
+      for key in old_std:
+        self.assertTrue(key in new_std)
+        if key not in diff_std:
+          self.assertEqual(new_std[key], old_std[key])
 
-
-  def _TestSet(self, old_policy, isgroup):
-    diff_policy = {
-      constants.IPOLICY_VCPU_RATIO: 3,
-      constants.IPOLICY_SPINDLE_RATIO: 1.9,
-      }
+  def _TestSet(self, old_policy, diff_policy, isgroup):
     new_policy = cmdlib._GetUpdatedIPolicy(old_policy, diff_policy,
                                            group_policy=isgroup)
     for key in diff_policy:
@@ -1819,9 +1912,15 @@ class TestGetUpdatedIPolicy(unittest.TestCase):
         self.assertEqual(new_policy[key], old_policy[key])
 
   def testSet(self):
-    self._TestSet(self._OLD_GROUP_POLICY, True)
+    diff_policy = {
+      constants.IPOLICY_VCPU_RATIO: 3,
+      constants.IPOLICY_DTS: [constants.DT_FILE],
+      }
+    self._TestSet(self._OLD_GROUP_POLICY, diff_policy, True)
     self._TestSetSpecs(self._OLD_GROUP_POLICY, True)
-    self._TestSet(self._OLD_CLUSTER_POLICY, False)
+    self._TestSet({}, diff_policy, True)
+    self._TestSetSpecs({}, True)
+    self._TestSet(self._OLD_CLUSTER_POLICY, diff_policy, False)
     self._TestSetSpecs(self._OLD_CLUSTER_POLICY, False)
 
   def testUnset(self):
@@ -1838,31 +1937,73 @@ class TestGetUpdatedIPolicy(unittest.TestCase):
         self.assertTrue(key in new_policy)
         self.assertEqual(new_policy[key], old_policy[key])
 
+    self.assertRaises(errors.OpPrereqError, cmdlib._GetUpdatedIPolicy,
+                      old_policy, diff_policy, group_policy=False)
+
+  def testUnsetEmpty(self):
+    old_policy = {}
+    for key in constants.IPOLICY_ALL_KEYS:
+      diff_policy = {
+        key: constants.VALUE_DEFAULT,
+        }
+    new_policy = cmdlib._GetUpdatedIPolicy(old_policy, diff_policy,
+                                           group_policy=True)
+    self.assertEqual(new_policy, old_policy)
+
   def _TestInvalidKeys(self, old_policy, isgroup):
+    INVALID_KEY = "this_key_shouldnt_be_allowed"
     INVALID_DICT = {
-      "this_key_shouldnt_be_allowed": 3,
+      INVALID_KEY: 3,
       }
     invalid_policy = INVALID_DICT
     self.assertRaises(errors.OpPrereqError, cmdlib._GetUpdatedIPolicy,
                       old_policy, invalid_policy, group_policy=isgroup)
     invalid_ispecs = {
-      constants.ISPECS_MINMAX: INVALID_DICT,
+      constants.ISPECS_MINMAX: [INVALID_DICT],
       }
-    self.assertRaises(errors.OpPrereqError, cmdlib._GetUpdatedIPolicy,
+    self.assertRaises(errors.TypeEnforcementError, cmdlib._GetUpdatedIPolicy,
                       old_policy, invalid_ispecs, group_policy=isgroup)
-    for key in constants.ISPECS_MINMAX_KEYS:
-      invalid_ispec = {
-        constants.ISPECS_MINMAX: {
-          key: INVALID_DICT,
-          },
+    if isgroup:
+      invalid_for_group = {
+        constants.ISPECS_STD: constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
         }
-      self.assertRaises(errors.TypeEnforcementError, cmdlib._GetUpdatedIPolicy,
-                        old_policy, invalid_ispec, group_policy=isgroup)
+      self.assertRaises(errors.OpPrereqError, cmdlib._GetUpdatedIPolicy,
+                        old_policy, invalid_for_group, group_policy=isgroup)
+    good_ispecs = self._OLD_CLUSTER_POLICY[constants.ISPECS_MINMAX]
+    invalid_ispecs = copy.deepcopy(good_ispecs)
+    invalid_policy = {
+      constants.ISPECS_MINMAX: invalid_ispecs,
+      }
+    for minmax in invalid_ispecs:
+      for key in constants.ISPECS_MINMAX_KEYS:
+        ispec = minmax[key]
+        ispec[INVALID_KEY] = None
+        self.assertRaises(errors.TypeEnforcementError,
+                          cmdlib._GetUpdatedIPolicy, old_policy,
+                          invalid_policy, group_policy=isgroup)
+        del ispec[INVALID_KEY]
+        for par in constants.ISPECS_PARAMETERS:
+          oldv = ispec[par]
+          ispec[par] = "this_is_not_good"
+          self.assertRaises(errors.TypeEnforcementError,
+                            cmdlib._GetUpdatedIPolicy,
+                            old_policy, invalid_policy, group_policy=isgroup)
+          ispec[par] = oldv
+    # This is to make sure that no two errors were present during the tests
+    cmdlib._GetUpdatedIPolicy(old_policy, invalid_policy, group_policy=isgroup)
 
   def testInvalidKeys(self):
     self._TestInvalidKeys(self._OLD_GROUP_POLICY, True)
     self._TestInvalidKeys(self._OLD_CLUSTER_POLICY, False)
 
+  def testInvalidValues(self):
+    for par in (constants.IPOLICY_PARAMETERS |
+                frozenset([constants.IPOLICY_DTS])):
+      bad_policy = {
+        par: "invalid_value",
+        }
+      self.assertRaises(errors.OpPrereqError, cmdlib._GetUpdatedIPolicy, {},
+                        bad_policy, group_policy=True)
 
 if __name__ == "__main__":
   testutils.GanetiTestProgram()
index 71d60e6..cbfa510 100755 (executable)
@@ -436,11 +436,9 @@ class TestConfigRunner(unittest.TestCase):
     # depending on the owner (cluster or group)
     if isgroup:
       errs = cfg.VerifyConfig()
-      # FIXME: A bug in FillIPolicy (issue 401) makes this test fail, so we
-      # invert the assertions for the time being
-      self.assertFalse(len(errs) >= 1)
+      self.assertTrue(len(errs) >= 1)
       errstr = "%s has invalid instance policy" % ipowner
-      self.assertFalse(_IsErrorInList(errstr, errs))
+      self.assertTrue(_IsErrorInList(errstr, errs))
     else:
       self.assertRaises(AssertionError, cfg.VerifyConfig)
     del ipolicy[INVALID_KEY]
@@ -461,13 +459,18 @@ class TestConfigRunner(unittest.TestCase):
     else:
       del ipolicy[key]
 
-    ispeclist = [
-      (ipolicy[constants.ISPECS_MINMAX][constants.ISPECS_MIN],
-       "%s/%s" % (constants.ISPECS_MINMAX, constants.ISPECS_MIN)),
-      (ipolicy[constants.ISPECS_MINMAX][constants.ISPECS_MAX],
-       "%s/%s" % (constants.ISPECS_MINMAX, constants.ISPECS_MAX)),
-      (ipolicy[constants.ISPECS_STD], constants.ISPECS_STD),
-      ]
+    ispeclist = []
+    if constants.ISPECS_MINMAX in ipolicy:
+      for k in range(len(ipolicy[constants.ISPECS_MINMAX])):
+        ispeclist.extend([
+            (ipolicy[constants.ISPECS_MINMAX][k][constants.ISPECS_MIN],
+             "%s[%s]/%s" % (constants.ISPECS_MINMAX, k, constants.ISPECS_MIN)),
+            (ipolicy[constants.ISPECS_MINMAX][k][constants.ISPECS_MAX],
+             "%s[%s]/%s" % (constants.ISPECS_MINMAX, k, constants.ISPECS_MAX)),
+            ])
+    if constants.ISPECS_STD in ipolicy:
+      ispeclist.append((ipolicy[constants.ISPECS_STD], constants.ISPECS_STD))
+
     for (ispec, ispecpath) in ispeclist:
       ispec[INVALID_KEY] = None
       errs = cfg.VerifyConfig()
@@ -494,6 +497,29 @@ class TestConfigRunner(unittest.TestCase):
         errs = cfg.VerifyConfig()
         self.assertFalse(errs)
 
+    if constants.ISPECS_MINMAX in ipolicy:
+      # Test partial minmax specs
+      for minmax in ipolicy[constants.ISPECS_MINMAX]:
+        for key in constants.ISPECS_MINMAX_KEYS:
+          self.assertTrue(key in minmax)
+          ispec = minmax[key]
+          del minmax[key]
+          errs = cfg.VerifyConfig()
+          self.assertTrue(len(errs) >= 1)
+          self.assertTrue(_IsErrorInList("Missing instance specification",
+                                         errs))
+          minmax[key] = ispec
+          for par in constants.ISPECS_PARAMETERS:
+            oldv = ispec[par]
+            del ispec[par]
+            errs = cfg.VerifyConfig()
+            self.assertTrue(len(errs) >= 1)
+            self.assertTrue(_IsErrorInList("Missing instance specs parameters",
+                                           errs))
+            ispec[par] = oldv
+      errs = cfg.VerifyConfig()
+      self.assertFalse(errs)
+
   def _TestVerifyConfigGroupIPolicy(self, groupinfo, cfg):
     old_ipolicy = groupinfo.ipolicy
     ipolicy = cfg.GetClusterInfo().SimpleFillIPolicy({})
@@ -506,16 +532,6 @@ class TestConfigRunner(unittest.TestCase):
       errs = cfg.VerifyConfig()
       self.assertFalse(errs)
       ipolicy[key] = oldv
-    # Test partial minmax specs
-    minmax = ipolicy[constants.ISPECS_MINMAX]
-    for ispec_key in minmax.keys():
-      ispec = minmax[ispec_key]
-      for par in constants.ISPECS_PARAMETERS:
-        oldv = ispec[par]
-        del ispec[par]
-        errs = cfg.VerifyConfig()
-        self.assertFalse(errs)
-        ispec[par] = oldv
     groupinfo.ipolicy = old_ipolicy
 
   def _TestVerifyConfigClusterIPolicy(self, ipolicy, cfg):
@@ -526,14 +542,18 @@ class TestConfigRunner(unittest.TestCase):
       del ipolicy[key]
       self.assertRaises(AssertionError, cfg.VerifyConfig)
       ipolicy[key] = oldv
-    # Test partial minmax specs
-    minmax = ipolicy[constants.ISPECS_MINMAX]
-    for key in constants.ISPECS_MINMAX_KEYS:
-      self.assertTrue(key in minmax)
-      oldv = minmax[key]
-      del minmax[key]
-      self.assertRaises(AssertionError, cfg.VerifyConfig)
-      minmax[key] = oldv
+    errs = cfg.VerifyConfig()
+    self.assertFalse(errs)
+    # Partial standard specs
+    ispec = ipolicy[constants.ISPECS_STD]
+    for par in constants.ISPECS_PARAMETERS:
+      oldv = ispec[par]
+      del ispec[par]
+      errs = cfg.VerifyConfig()
+      self.assertTrue(len(errs) >= 1)
+      self.assertTrue(_IsErrorInList("Missing instance specs parameters",
+                                     errs))
+      ispec[par] = oldv
     errs = cfg.VerifyConfig()
     self.assertFalse(errs)
 
index e20fc11..30e00d8 100755 (executable)
@@ -30,7 +30,7 @@ import os.path
 
 from ganeti import errors
 from ganeti import opcodes
-from ganeti import mcpu
+from ganeti import hooksmaster
 from ganeti import backend
 from ganeti import constants
 from ganeti import cmdlib
@@ -250,14 +250,14 @@ class TestHooksMaster(unittest.TestCase):
 
   def testTotalFalse(self):
     """Test complete rpc failure"""
-    hm = mcpu.HooksMaster.BuildFromLu(self._call_false, self.lu)
+    hm = hooksmaster.HooksMaster.BuildFromLu(self._call_false, self.lu)
     self.failUnlessRaises(errors.HooksFailure,
                           hm.RunPhase, constants.HOOKS_PHASE_PRE)
     hm.RunPhase(constants.HOOKS_PHASE_POST)
 
   def testIndividualFalse(self):
     """Test individual node failure"""
-    hm = mcpu.HooksMaster.BuildFromLu(self._call_nodes_false, self.lu)
+    hm = hooksmaster.HooksMaster.BuildFromLu(self._call_nodes_false, self.lu)
     hm.RunPhase(constants.HOOKS_PHASE_PRE)
     #self.failUnlessRaises(errors.HooksFailure,
     #                      hm.RunPhase, constants.HOOKS_PHASE_PRE)
@@ -265,14 +265,14 @@ class TestHooksMaster(unittest.TestCase):
 
   def testScriptFalse(self):
     """Test individual rpc failure"""
-    hm = mcpu.HooksMaster.BuildFromLu(self._call_script_fail, self.lu)
+    hm = hooksmaster.HooksMaster.BuildFromLu(self._call_script_fail, self.lu)
     self.failUnlessRaises(errors.HooksAbort,
                           hm.RunPhase, constants.HOOKS_PHASE_PRE)
     hm.RunPhase(constants.HOOKS_PHASE_POST)
 
   def testScriptSucceed(self):
     """Test individual rpc failure"""
-    hm = mcpu.HooksMaster.BuildFromLu(FakeHooksRpcSuccess, self.lu)
+    hm = hooksmaster.HooksMaster.BuildFromLu(FakeHooksRpcSuccess, self.lu)
     for phase in (constants.HOOKS_PHASE_PRE, constants.HOOKS_PHASE_POST):
       hm.RunPhase(phase)
 
@@ -323,7 +323,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
   def testEmptyEnv(self):
     # Check pre-phase hook
     self.lu.hook_env = {}
-    hm = mcpu.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
+    hm = hooksmaster.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
     hm.RunPhase(constants.HOOKS_PHASE_PRE)
 
     (node_list, hpath, phase, env) = self._rpcs.pop(0)
@@ -349,7 +349,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
     self.lu.hook_env = {
       "FOO": "pre-foo-value",
       }
-    hm = mcpu.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
+    hm = hooksmaster.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
     hm.RunPhase(constants.HOOKS_PHASE_PRE)
 
     (node_list, hpath, phase, env) = self._rpcs.pop(0)
@@ -396,7 +396,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
       self.lu.hook_env = { name: "value" }
 
       # Test using a clean HooksMaster instance
-      hm = mcpu.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
+      hm = hooksmaster.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
 
       for phase in [constants.HOOKS_PHASE_PRE, constants.HOOKS_PHASE_POST]:
         self.assertRaises(AssertionError, hm.RunPhase, phase)
@@ -404,7 +404,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
 
   def testNoNodes(self):
     self.lu.hook_env = {}
-    hm = mcpu.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
+    hm = hooksmaster.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
     hm.RunPhase(constants.HOOKS_PHASE_PRE, nodes=[])
     self.assertRaises(IndexError, self._rpcs.pop)
 
@@ -416,7 +416,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
       "node93782.example.net",
       ]
 
-    hm = mcpu.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
+    hm = hooksmaster.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
 
     for phase in [constants.HOOKS_PHASE_PRE, constants.HOOKS_PHASE_POST]:
       hm.RunPhase(phase, nodes=nodes)
@@ -434,7 +434,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
       "FOO": "value",
       }
 
-    hm = mcpu.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
+    hm = hooksmaster.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
     hm.RunConfigUpdate()
 
     (node_list, hpath, phase, env) = self._rpcs.pop(0)
@@ -453,7 +453,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
       "FOO": "value",
       }
 
-    hm = mcpu.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
+    hm = hooksmaster.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
     hm.RunPhase(constants.HOOKS_PHASE_POST)
 
     (node_list, hpath, phase, env) = self._rpcs.pop(0)
@@ -471,7 +471,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
     self.assertRaises(AssertionError, self.lu.BuildHooksEnv)
     self.assertRaises(AssertionError, self.lu.BuildHooksNodes)
 
-    hm = mcpu.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
+    hm = hooksmaster.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
     self.assertEqual(hm.pre_env, {})
     self.assertRaises(IndexError, self._rpcs.pop)
 
index 720b22a..719a4a1 100755 (executable)
@@ -22,6 +22,7 @@
 """Script for unittesting the objects module"""
 
 
+import copy
 import unittest
 
 from ganeti import constants
@@ -414,11 +415,12 @@ class TestInstancePolicy(unittest.TestCase):
 
   def _AssertIPolicyIsFull(self, policy):
     self.assertEqual(frozenset(policy.keys()), constants.IPOLICY_ALL_KEYS)
-    minmax = policy[constants.ISPECS_MINMAX]
-    self.assertEqual(frozenset(minmax.keys()), constants.ISPECS_MINMAX_KEYS)
-    for key in constants.ISPECS_MINMAX_KEYS:
-      self.assertEqual(frozenset(minmax[key].keys()),
-                       constants.ISPECS_PARAMETERS)
+    self.assertTrue(len(policy[constants.ISPECS_MINMAX]) > 0)
+    for minmax in policy[constants.ISPECS_MINMAX]:
+      self.assertEqual(frozenset(minmax.keys()), constants.ISPECS_MINMAX_KEYS)
+      for key in constants.ISPECS_MINMAX_KEYS:
+        self.assertEqual(frozenset(minmax[key].keys()),
+                         constants.ISPECS_PARAMETERS)
     self.assertEqual(frozenset(policy[constants.ISPECS_STD].keys()),
                      constants.ISPECS_PARAMETERS)
 
@@ -427,31 +429,149 @@ class TestInstancePolicy(unittest.TestCase):
                                                 True)
     self._AssertIPolicyIsFull(constants.IPOLICY_DEFAULTS)
 
+  def _AssertPolicyIsBad(self, ipolicy, do_check_std=None):
+    if do_check_std is None:
+      check_std_vals = [False, True]
+    else:
+      check_std_vals = [do_check_std]
+    for check_std in check_std_vals:
+      self.assertRaises(errors.ConfigurationError,
+                        objects.InstancePolicy.CheckISpecSyntax,
+                        ipolicy, check_std)
+
   def testCheckISpecSyntax(self):
+    default_stdspec = constants.IPOLICY_DEFAULTS[constants.ISPECS_STD]
+    incomplete_ipolicies = [
+      {
+         constants.ISPECS_MINMAX: [],
+         constants.ISPECS_STD: default_stdspec,
+         },
+      {
+         constants.ISPECS_MINMAX: [{}],
+         constants.ISPECS_STD: default_stdspec,
+         },
+      {
+        constants.ISPECS_MINMAX: [{
+          constants.ISPECS_MIN: NotImplemented,
+          }],
+        constants.ISPECS_STD: default_stdspec,
+        },
+      {
+        constants.ISPECS_MINMAX: [{
+          constants.ISPECS_MAX: NotImplemented,
+          }],
+        constants.ISPECS_STD: default_stdspec,
+        },
+      {
+        constants.ISPECS_MINMAX: [{
+          constants.ISPECS_MIN: NotImplemented,
+          constants.ISPECS_MAX: NotImplemented,
+          }],
+        },
+      ]
+    for ipol in incomplete_ipolicies:
+      self.assertRaises(errors.ConfigurationError,
+                        objects.InstancePolicy.CheckISpecSyntax,
+                        ipol, True)
+      oldminmax = ipol[constants.ISPECS_MINMAX]
+      if oldminmax:
+        # Prepending valid specs shouldn't change the error
+        ipol[constants.ISPECS_MINMAX] = ([constants.ISPECS_MINMAX_DEFAULTS] +
+                                         oldminmax)
+        self.assertRaises(errors.ConfigurationError,
+                          objects.InstancePolicy.CheckISpecSyntax,
+                          ipol, True)
+
+    good_ipolicy = {
+      constants.ISPECS_MINMAX: [
+        {
+          constants.ISPECS_MIN: {
+            constants.ISPEC_MEM_SIZE: 64,
+            constants.ISPEC_CPU_COUNT: 1,
+            constants.ISPEC_DISK_COUNT: 2,
+            constants.ISPEC_DISK_SIZE: 64,
+            constants.ISPEC_NIC_COUNT: 1,
+            constants.ISPEC_SPINDLE_USE: 1,
+            },
+          constants.ISPECS_MAX: {
+            constants.ISPEC_MEM_SIZE: 16384,
+            constants.ISPEC_CPU_COUNT: 5,
+            constants.ISPEC_DISK_COUNT: 12,
+            constants.ISPEC_DISK_SIZE: 1024,
+            constants.ISPEC_NIC_COUNT: 9,
+            constants.ISPEC_SPINDLE_USE: 18,
+            },
+          },
+        {
+          constants.ISPECS_MIN: {
+            constants.ISPEC_MEM_SIZE: 32768,
+            constants.ISPEC_CPU_COUNT: 8,
+            constants.ISPEC_DISK_COUNT: 1,
+            constants.ISPEC_DISK_SIZE: 1024,
+            constants.ISPEC_NIC_COUNT: 1,
+            constants.ISPEC_SPINDLE_USE: 1,
+            },
+          constants.ISPECS_MAX: {
+            constants.ISPEC_MEM_SIZE: 65536,
+            constants.ISPEC_CPU_COUNT: 10,
+            constants.ISPEC_DISK_COUNT: 5,
+            constants.ISPEC_DISK_SIZE: 1024 * 1024,
+            constants.ISPEC_NIC_COUNT: 3,
+            constants.ISPEC_SPINDLE_USE: 12,
+            },
+          },
+        ],
+      }
+    good_ipolicy[constants.ISPECS_STD] = copy.deepcopy(
+      good_ipolicy[constants.ISPECS_MINMAX][0][constants.ISPECS_MAX])
+    # Check that it's really good before making it bad
+    objects.InstancePolicy.CheckISpecSyntax(good_ipolicy, True)
+
+    bad_ipolicy = copy.deepcopy(good_ipolicy)
+    for minmax in bad_ipolicy[constants.ISPECS_MINMAX]:
+      for (key, spec) in minmax.items():
+        for param in spec:
+          oldv = spec[param]
+          del spec[param]
+          self._AssertPolicyIsBad(bad_ipolicy)
+          if key == constants.ISPECS_MIN:
+            spec[param] = minmax[constants.ISPECS_MAX][param] + 1
+          self._AssertPolicyIsBad(bad_ipolicy)
+          spec[param] = oldv
+    assert bad_ipolicy == good_ipolicy
+
+    stdspec = bad_ipolicy[constants.ISPECS_STD]
+    for param in stdspec:
+      oldv = stdspec[param]
+      del stdspec[param]
+      self._AssertPolicyIsBad(bad_ipolicy, True)
+      # Note that std spec is the same as a max spec
+      stdspec[param] = oldv + 1
+      self._AssertPolicyIsBad(bad_ipolicy, True)
+      stdspec[param] = oldv
+    assert bad_ipolicy == good_ipolicy
+
+    for minmax in good_ipolicy[constants.ISPECS_MINMAX]:
+      for spec in minmax.values():
+        good_ipolicy[constants.ISPECS_STD] = spec
+        objects.InstancePolicy.CheckISpecSyntax(good_ipolicy, True)
+
+  def testCheckISpecParamSyntax(self):
     par = "my_parameter"
     for check_std in [True, False]:
-      # Only one policy limit
-      for key in constants.ISPECS_MINMAX_KEYS:
-        minmax = dict((k, {}) for k in constants.ISPECS_MINMAX_KEYS)
-        minmax[key][par] = 11
-        objects.InstancePolicy.CheckISpecSyntax(minmax, {}, par, check_std)
-      if check_std:
-        minmax = dict((k, {}) for k in constants.ISPECS_MINMAX_KEYS)
-        stdspec = {par: 11}
-        objects.InstancePolicy.CheckISpecSyntax(minmax, stdspec, par, check_std)
-
       # Min and max only
       good_values = [(11, 11), (11, 40), (0, 0)]
       for (mn, mx) in good_values:
         minmax = dict((k, {}) for k in constants.ISPECS_MINMAX_KEYS)
         minmax[constants.ISPECS_MIN][par] = mn
         minmax[constants.ISPECS_MAX][par] = mx
-        objects.InstancePolicy.CheckISpecSyntax(minmax, {}, par, check_std)
+        objects.InstancePolicy._CheckISpecParamSyntax(minmax, {}, par,
+                                                     check_std)
       minmax = dict((k, {}) for k in constants.ISPECS_MINMAX_KEYS)
       minmax[constants.ISPECS_MIN][par] = 11
       minmax[constants.ISPECS_MAX][par] = 5
       self.assertRaises(errors.ConfigurationError,
-                        objects.InstancePolicy.CheckISpecSyntax,
+                        objects.InstancePolicy._CheckISpecParamSyntax,
                         minmax, {}, par, check_std)
     # Min, std, max
     good_values = [
@@ -465,24 +585,29 @@ class TestInstancePolicy(unittest.TestCase):
         constants.ISPECS_MAX: {par: mx},
         }
       stdspec = {par: st}
-      objects.InstancePolicy.CheckISpecSyntax(minmax, stdspec, par, True)
+      objects.InstancePolicy._CheckISpecParamSyntax(minmax, stdspec, par, True)
     bad_values = [
-      (11, 11,  5),
-      (40, 11, 11),
-      (11, 80, 40),
-      (11,  5, 40),
-      (11,  5,  5),
-      (40, 40, 11),
+      (11, 11,  5, True),
+      (40, 11, 11, True),
+      (11, 80, 40, False),
+      (11,  5, 40, False,),
+      (11,  5,  5, True),
+      (40, 40, 11, True),
       ]
-    for (mn, st, mx) in bad_values:
+    for (mn, st, mx, excp) in bad_values:
       minmax = {
         constants.ISPECS_MIN: {par: mn},
         constants.ISPECS_MAX: {par: mx},
         }
       stdspec = {par: st}
-      self.assertRaises(errors.ConfigurationError,
-                        objects.InstancePolicy.CheckISpecSyntax,
-                        minmax, stdspec, par, True)
+      if excp:
+        self.assertRaises(errors.ConfigurationError,
+                          objects.InstancePolicy._CheckISpecParamSyntax,
+                          minmax, stdspec, par, True)
+      else:
+        ret = objects.InstancePolicy._CheckISpecParamSyntax(minmax, stdspec,
+                                                            par, True)
+        self.assertFalse(ret)
 
   def testCheckDiskTemplates(self):
     invalid = "this_is_not_a_good_template"
@@ -531,12 +656,7 @@ class TestInstancePolicy(unittest.TestCase):
   def _AssertIPolicyMerged(self, default_pol, diff_pol, merged_pol):
     for (key, value) in merged_pol.items():
       if key in diff_pol:
-        if key == constants.ISPECS_MINMAX:
-          self.assertEqual(frozenset(value), constants.ISPECS_MINMAX_KEYS)
-          for k in constants.ISPECS_MINMAX_KEYS:
-            self._AssertISpecsMerged(default_pol[key][k], diff_pol[key][k],
-                                     value[k])
-        elif key == constants.ISPECS_STD:
+        if key == constants.ISPECS_STD:
           self._AssertISpecsMerged(default_pol[key], diff_pol[key], value)
         else:
           self.assertEqual(value, diff_pol[key])
@@ -548,6 +668,9 @@ class TestInstancePolicy(unittest.TestCase):
       {constants.IPOLICY_VCPU_RATIO: 3.14},
       {constants.IPOLICY_SPINDLE_RATIO: 2.72},
       {constants.IPOLICY_DTS: [constants.DT_FILE]},
+      {constants.ISPECS_STD: {constants.ISPEC_DISK_COUNT: 3}},
+      {constants.ISPECS_MINMAX: [constants.ISPECS_MINMAX_DEFAULTS,
+                                 constants.ISPECS_MINMAX_DEFAULTS]}
       ]
     for diff_pol in partial_policies:
       policy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, diff_pol)
@@ -555,34 +678,6 @@ class TestInstancePolicy(unittest.TestCase):
       self._AssertIPolicyIsFull(policy)
       self._AssertIPolicyMerged(constants.IPOLICY_DEFAULTS, diff_pol, policy)
 
-  def testFillIPolicySpecs(self):
-    partial_ipolicies = [
-      {
-        constants.ISPECS_MINMAX: {
-          constants.ISPECS_MIN: {constants.ISPEC_MEM_SIZE: 32},
-          constants.ISPECS_MAX: {constants.ISPEC_CPU_COUNT: 1024}
-          },
-        },
-      {
-        constants.ISPECS_MINMAX: {
-          constants.ISPECS_MAX: {
-            constants.ISPEC_DISK_COUNT: constants.MAX_DISKS - 1,
-            constants.ISPEC_NIC_COUNT: constants.MAX_NICS - 1,
-            },
-          constants.ISPECS_MIN: {},
-          },
-          constants.ISPECS_STD: {constants.ISPEC_DISK_SIZE: 2048},
-        },
-      {
-        constants.ISPECS_STD: {constants.ISPEC_SPINDLE_USE: 3},
-        },
-      ]
-    for diff_pol in partial_ipolicies:
-      policy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, diff_pol)
-      objects.InstancePolicy.CheckParameterSyntax(policy, True)
-      self._AssertIPolicyIsFull(policy)
-      self._AssertIPolicyMerged(constants.IPOLICY_DEFAULTS, diff_pol, policy)
-
 
 if __name__ == "__main__":
   testutils.GanetiTestProgram()
index 3174c69..555ee21 100755 (executable)
@@ -619,18 +619,40 @@ class GanetiRapiClientTests(testutils.GanetiTestCase):
   def testShutdownInstance(self):
     self.rapi.AddResponse("1487")
     self.assertEqual(1487, self.client.ShutdownInstance("foo-instance",
+                                                        dry_run=True,
+                                                        reason="NoMore"))
+    self.assertHandler(rlib2.R_2_instances_name_shutdown)
+    self.assertItems(["foo-instance"])
+    self.assertDryRun()
+    self.assertQuery("reason", ["NoMore"])
+
+  def testShutdownInstanceDefaultReason(self):
+    self.rapi.AddResponse("1487")
+    self.assertEqual(1487, self.client.ShutdownInstance("foo-instance",
                                                         dry_run=True))
     self.assertHandler(rlib2.R_2_instances_name_shutdown)
     self.assertItems(["foo-instance"])
     self.assertDryRun()
+    self.assertQuery("reason", None)
 
   def testStartupInstance(self):
     self.rapi.AddResponse("27149")
     self.assertEqual(27149, self.client.StartupInstance("bar-instance",
+                                                        dry_run=True,
+                                                        reason="New"))
+    self.assertHandler(rlib2.R_2_instances_name_startup)
+    self.assertItems(["bar-instance"])
+    self.assertDryRun()
+    self.assertQuery("reason", ["New"])
+
+  def testStartupInstanceDefaultReason(self):
+    self.rapi.AddResponse("27149")
+    self.assertEqual(27149, self.client.StartupInstance("bar-instance",
                                                         dry_run=True))
     self.assertHandler(rlib2.R_2_instances_name_startup)
     self.assertItems(["bar-instance"])
     self.assertDryRun()
+    self.assertQuery("reason", None)
 
   def testReinstallInstance(self):
     self.rapi.AddResponse(serializer.DumpJson([]))
index e19c925..cb19811 100755 (executable)
@@ -400,6 +400,7 @@ class TestInstanceStartup(unittest.TestCase):
     handler = _CreateHandler(rlib2.R_2_instances_name_startup, ["inst31083"], {
       "force": ["1"],
       "no_remember": ["1"],
+      "reason": ["Newly created instance"],
       }, {}, clfactory)
     job_id = handler.PUT()
 
@@ -413,6 +414,12 @@ class TestInstanceStartup(unittest.TestCase):
     self.assertTrue(op.no_remember)
     self.assertTrue(op.force)
     self.assertFalse(op.dry_run)
+    self.assertEqual(op.reason[0][0], constants.OPCODE_REASON_SRC_USER)
+    self.assertEqual(op.reason[0][1], "Newly created instance")
+    self.assertEqual(op.reason[1][0],
+                     "%s:%s" % (constants.OPCODE_REASON_SRC_RLIB2,
+                                "instances_name_startup"))
+    self.assertEqual(op.reason[1][1], "")
 
     self.assertRaises(IndexError, cl.GetNextSubmittedJob)
 
@@ -422,6 +429,7 @@ class TestInstanceShutdown(unittest.TestCase):
     clfactory = _FakeClientFactory(_FakeClient)
     handler = _CreateHandler(rlib2.R_2_instances_name_shutdown, ["inst26791"], {
       "no_remember": ["0"],
+      "reason": ["Not used anymore"],
       }, {}, clfactory)
     job_id = handler.PUT()
 
@@ -434,6 +442,12 @@ class TestInstanceShutdown(unittest.TestCase):
     self.assertEqual(op.instance_name, "inst26791")
     self.assertFalse(op.no_remember)
     self.assertFalse(op.dry_run)
+    self.assertEqual(op.reason[0][0], constants.OPCODE_REASON_SRC_USER)
+    self.assertEqual(op.reason[0][1], "Not used anymore")
+    self.assertEqual(op.reason[1][0],
+                     "%s:%s" % (constants.OPCODE_REASON_SRC_RLIB2,
+                                "instances_name_shutdown"))
+    self.assertEqual(op.reason[1][1], "")
 
     self.assertRaises(IndexError, cl.GetNextSubmittedJob)
 
index eefabec..0526b1a 100755 (executable)
@@ -102,17 +102,28 @@ def CheckHostname(path):
   return False
 
 
-def UpgradeIPolicy(ipolicy):
+def _FillIPolicySpecs(default_ipolicy, ipolicy):
+  if "minmax" in ipolicy:
+    for (key, spec) in ipolicy["minmax"][0].items():
+      for (par, val) in default_ipolicy["minmax"][0][key].items():
+        if par not in spec:
+          spec[par] = val
+
+
+def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
   minmax_keys = ["min", "max"]
   if any((k in ipolicy) for k in minmax_keys):
     minmax = {}
-    ipolicy["minmax"] = minmax
     for key in minmax_keys:
       if key in ipolicy:
-        minmax[key] = ipolicy[key]
+        if ipolicy[key]:
+          minmax[key] = ipolicy[key]
         del ipolicy[key]
-      else:
-        minmax[key] = {}
+    if minmax:
+      ipolicy["minmax"] = [minmax]
+  if isgroup and "std" in ipolicy:
+    del ipolicy["std"]
+  _FillIPolicySpecs(default_ipolicy, ipolicy)
 
 
 def UpgradeNetworks(config_data):
@@ -125,19 +136,23 @@ def UpgradeCluster(config_data):
   cluster = config_data.get("cluster", None)
   if cluster is None:
     raise Error("Cannot find cluster")
-  ipolicy = cluster.get("ipolicy", None)
+  ipolicy = cluster.setdefault("ipolicy", None)
   if ipolicy:
-    UpgradeIPolicy(ipolicy)
+    UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
 
 
 def UpgradeGroups(config_data):
+  cl_ipolicy = config_data["cluster"].get("ipolicy")
   for group in config_data["nodegroups"].values():
     networks = group.get("networks", None)
     if not networks:
       group["networks"] = {}
     ipolicy = group.get("ipolicy", None)
     if ipolicy:
-      UpgradeIPolicy(ipolicy)
+      if cl_ipolicy is None:
+        raise Error("A group defines an instance policy but there is no"
+                    " instance policy at cluster level")
+      UpgradeIPolicy(ipolicy, cl_ipolicy, True)
 
 
 def UpgradeInstances(config_data):
@@ -245,44 +260,53 @@ def UpgradeAll(config_data):
   UpgradeInstances(config_data)
 
 
-def DowngradeIPolicy(ipolicy):
+def DowngradeIPolicy(ipolicy, owner):
   # Downgrade IPolicy to 2.7 (stable)
   minmax_keys = ["min", "max"]
   specs_is_split = any((k in ipolicy) for k in minmax_keys)
   if not specs_is_split:
     if "minmax" in ipolicy:
-      minmax = ipolicy["minmax"]
+      if type(ipolicy["minmax"]) is not list:
+        raise Error("Invalid minmax type in %s ipolicy: %s" %
+                    (owner, type(ipolicy["minmax"])))
+      if len(ipolicy["minmax"]) > 1:
+        logging.warning("Discarding some limit specs values from %s policy",
+                        owner)
+      minmax = ipolicy["minmax"][0]
       del ipolicy["minmax"]
     else:
       minmax = {}
     for key in minmax_keys:
       spec = minmax.get(key, {})
       ipolicy[key] = spec
+    if "std" not in ipolicy:
+      ipolicy["std"] = {}
 
 
 def DowngradeGroups(config_data):
   for group in config_data["nodegroups"].values():
     ipolicy = group.get("ipolicy", None)
-    if ipolicy:
-      DowngradeIPolicy(ipolicy)
+    if ipolicy is not None:
+      DowngradeIPolicy(ipolicy, "group \"%s\"" % group.get("name"))
 
 
-def DowngradeStorageTypes(cluster):
-  # Remove storage types to downgrade to 2.7
-  if "enabled_storage_types" in cluster:
-    logging.warning("Removing cluster storage types; value = %s",
-                    utils.CommaJoin(cluster["enabled_storage_types"]))
-    del cluster["enabled_storage_types"]
+def DowngradeEnabledTemplates(cluster):
+  # Remove enabled disk templates to downgrade to 2.7
+  edt_key = "enabled_disk_templates"
+  if edt_key in cluster:
+    logging.warning("Removing cluster's enabled disk templates; value = %s",
+                    utils.CommaJoin(cluster[edt_key]))
+    del cluster[edt_key]
 
 
 def DowngradeCluster(config_data):
   cluster = config_data.get("cluster", None)
   if cluster is None:
     raise Error("Cannot find cluster")
-  DowngradeStorageTypes(cluster)
+  DowngradeEnabledTemplates(cluster)
   ipolicy = cluster.get("ipolicy", None)
   if ipolicy:
-    DowngradeIPolicy(ipolicy)
+    DowngradeIPolicy(ipolicy, "cluster")
 
 
 def DowngradeAll(config_data):
index b86afc1..52d1355 100644 (file)
@@ -26,3 +26,5 @@ for u in @GNTMASTERUSER@ @GNTRAPIUSER@
 do
   adduser $u @GNTADMINGROUP@
 done
+
+adduser @GNTMASTERUSER@ @GNTCONFDGROUP@