Merge remote-tracking branch 'origin/devel-2.5'
authorIustin Pop <iustin@google.com>
Mon, 5 Mar 2012 21:48:16 +0000 (23:48 +0200)
committerIustin Pop <iustin@google.com>
Tue, 6 Mar 2012 18:58:14 +0000 (20:58 +0200)
Conflicts:
        Makefile.am    (trivial, test data files added on both branches)
        lib/opcodes.py (trivial, master renamed filter→qfilter, 2.5
                        fixed the type of the parameter)

Signed-off-by: Iustin Pop <iustin@google.com>
Reviewed-by: René Nussbaumer <rn@google.com>

12 files changed:
Makefile.am
lib/client/gnt_node.py
lib/cmdlib.py
lib/opcodes.py
lib/storage.py
man/gnt-node.rst
test/data/vgreduce-removemissing-2.02.02.txt [new file with mode: 0644]
test/data/vgreduce-removemissing-2.02.66-fail.txt [new file with mode: 0644]
test/data/vgreduce-removemissing-2.02.66-ok.txt [new file with mode: 0644]
test/data/vgs-missing-pvs-2.02.02.txt [new file with mode: 0644]
test/data/vgs-missing-pvs-2.02.66.txt [new file with mode: 0644]
test/ganeti.storage_unittest.py [new file with mode: 0755]

index bd3be18..9ee0eea 100644 (file)
@@ -48,6 +48,7 @@ toolsdir = $(pkglibdir)/tools
 iallocatorsdir = $(pkglibdir)/iallocators
 pytoolsdir = $(pkgpythondir)/tools
 docdir = $(datadir)/doc/$(PACKAGE)
+myexeclibdir = $(pkglibdir)
 
 # Delete output file if an error occurred while building it
 .DELETE_ON_ERROR:
@@ -573,12 +574,12 @@ pkglib_python_scripts = \
 nodist_pkglib_python_scripts = \
        tools/ensure-dirs
 
-pkglib_SCRIPTS = \
+myexeclib_SCRIPTS = \
        daemons/daemon-util \
        tools/kvm-ifup \
        $(pkglib_python_scripts)
 
-nodist_pkglib_SCRIPTS = \
+nodist_myexeclib_SCRIPTS = \
        $(nodist_pkglib_python_scripts)
 
 EXTRA_DIST = \
@@ -714,6 +715,11 @@ TEST_FILES = \
        test/data/ovfdata/wrong_ova.ova \
        test/data/ovfdata/wrong_xml.ovf \
        test/data/ovfdata/other/rawdisk.raw \
+       test/data/vgreduce-removemissing-2.02.02.txt \
+       test/data/vgreduce-removemissing-2.02.66-fail.txt \
+       test/data/vgreduce-removemissing-2.02.66-ok.txt \
+       test/data/vgs-missing-pvs-2.02.02.txt \
+       test/data/vgs-missing-pvs-2.02.66.txt \
        test/import-export_unittest-helper
 
 python_tests = \
@@ -760,6 +766,7 @@ python_tests = \
        test/ganeti.runtime_unittest.py \
        test/ganeti.serializer_unittest.py \
        test/ganeti.ssh_unittest.py \
+       test/ganeti.storage_unittest.py \
        test/ganeti.tools.ensure_dirs_unittest.py \
        test/ganeti.uidpool_unittest.py \
        test/ganeti.utils.algo_unittest.py \
index 1be466b..e831221 100644 (file)
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -890,8 +890,7 @@ commands = {
     [FORCE_OPT, IALLOCATOR_OPT, NEW_SECONDARY_OPT, EARLY_RELEASE_OPT,
      PRIORITY_OPT, PRIMARY_ONLY_OPT, SECONDARY_ONLY_OPT],
     "[-f] {-I <iallocator> | -n <dst>} <node>",
-    "Relocate the secondary instances from a node"
-    " to other nodes"),
+    "Relocate the primary and/or secondary instances from a node"),
   "failover": (
     FailoverNode, ARGS_ONE_NODE, [FORCE_OPT, IGNORE_CONSIST_OPT,
                                   IALLOCATOR_OPT, PRIORITY_OPT],
index e11645e..2f06b3b 100644 (file)
@@ -2031,7 +2031,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
 
     # Get instances in node group; this is unsafe and needs verification later
-    inst_names = self.cfg.GetNodeGroupInstances(self.group_uuid)
+    inst_names = \
+      self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
 
     self.needed_locks = {
       locking.LEVEL_INSTANCE: inst_names,
@@ -2065,7 +2066,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
 
     group_nodes = set(self.group_info.members)
-    group_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
+    group_instances = \
+      self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
 
     unlocked_nodes = \
         group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
@@ -2075,11 +2077,13 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     if unlocked_nodes:
       raise errors.OpPrereqError("Missing lock for nodes: %s" %
-                                 utils.CommaJoin(unlocked_nodes))
+                                 utils.CommaJoin(unlocked_nodes),
+                                 errors.ECODE_STATE)
 
     if unlocked_instances:
       raise errors.OpPrereqError("Missing lock for instances: %s" %
-                                 utils.CommaJoin(unlocked_instances))
+                                 utils.CommaJoin(unlocked_instances),
+                                 errors.ECODE_STATE)
 
     self.all_node_info = self.cfg.GetAllNodesInfo()
     self.all_inst_info = self.cfg.GetAllInstancesInfo()
@@ -2099,17 +2103,17 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     for inst in self.my_inst_info.values():
       if inst.disk_template in constants.DTS_INT_MIRROR:
-        group = self.my_node_info[inst.primary_node].group
-        for nname in inst.secondary_nodes:
-          if self.all_node_info[nname].group != group:
+        for nname in inst.all_nodes:
+          if self.all_node_info[nname].group != self.group_uuid:
             extra_lv_nodes.add(nname)
 
     unlocked_lv_nodes = \
         extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
 
     if unlocked_lv_nodes:
-      raise errors.OpPrereqError("these nodes could be locked: %s" %
-                                 utils.CommaJoin(unlocked_lv_nodes))
+      raise errors.OpPrereqError("Missing node locks for LV check: %s" %
+                                 utils.CommaJoin(unlocked_lv_nodes),
+                                 errors.ECODE_STATE)
     self.extra_lv_nodes = list(extra_lv_nodes)
 
   def _VerifyNode(self, ninfo, nresult):
@@ -2405,7 +2409,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     """
     for node, n_img in node_image.items():
-      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
+      if (n_img.offline or n_img.rpc_fail or n_img.lvm_fail or
+          self.all_node_info[node].group != self.group_uuid):
         # skip non-healthy nodes
         continue
       for volume in n_img.volumes:
@@ -2432,11 +2437,11 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       # WARNING: we currently take into account down instances as well
       # as up ones, considering that even if they're down someone
       # might want to start them even in the event of a node failure.
-      if n_img.offline:
-        # we're skipping offline nodes from the N+1 warning, since
-        # most likely we don't have good memory infromation from them;
-        # we already list instances living on such nodes, and that's
-        # enough warning
+      if n_img.offline or self.all_node_info[node].group != self.group_uuid:
+        # we're skipping nodes marked offline and nodes in other groups from
+        # the N+1 warning, since most likely we don't have good memory
+        # infromation from them; we already list instances living on such
+        # nodes, and that's enough warning
         continue
       #TODO(dynmem): also consider ballooning out other instances
       for prinode, instances in n_img.sbp.items():
index be68715..828497f 100644 (file)
@@ -915,7 +915,7 @@ class OpQuery(OpCode):
     _PUseLocking,
     ("fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
      "Requested fields"),
-    ("qfilter", None, ht.TOr(ht.TNone, ht.TListOf),
+    ("qfilter", None, ht.TOr(ht.TNone, ht.TList),
      "Query filter"),
     ]
   OP_RESULT = \
index d382be5..d77d80b 100644 (file)
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2009, 2011 Google Inc.
+# Copyright (C) 2009, 2011, 2012 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -406,6 +406,7 @@ class LvmVgStorage(_LvmBase):
 
   """
   LIST_COMMAND = "vgs"
+  VGREDUCE_COMMAND = "vgreduce"
 
   # Make sure to update constants.VALID_STORAGE_FIELDS when changing field
   # definitions.
@@ -418,7 +419,7 @@ class LvmVgStorage(_LvmBase):
     (constants.SF_ALLOCATABLE, [], True),
     ]
 
-  def _RemoveMissing(self, name):
+  def _RemoveMissing(self, name, _runcmd_fn=utils.RunCmd):
     """Runs "vgreduce --removemissing" on a volume group.
 
     @type name: string
@@ -428,13 +429,23 @@ class LvmVgStorage(_LvmBase):
     # Ignoring vgreduce exit code. Older versions exit with an error even tough
     # the VG is already consistent. This was fixed in later versions, but we
     # cannot depend on it.
-    result = utils.RunCmd(["vgreduce", "--removemissing", name])
+    result = _runcmd_fn([self.VGREDUCE_COMMAND, "--removemissing", name])
 
     # Keep output in case something went wrong
     vgreduce_output = result.output
 
-    result = utils.RunCmd(["vgs", "--noheadings", "--nosuffix", name])
-    if result.failed:
+    # work around newer LVM version
+    if ("Wrote out consistent volume group" not in vgreduce_output or
+        "vgreduce --removemissing --force" in vgreduce_output):
+      # we need to re-run with --force
+      result = _runcmd_fn([self.VGREDUCE_COMMAND, "--removemissing",
+                           "--force", name])
+      vgreduce_output += "\n" + result.output
+
+    result = _runcmd_fn([self.LIST_COMMAND, "--noheadings",
+                         "--nosuffix", name])
+    # we also need to check the output
+    if result.failed or "Couldn't find device with uuid" in result.output:
       raise errors.StorageError(("Volume group '%s' still not consistent,"
                                  " 'vgreduce' output: %r,"
                                  " 'vgs' output: %r") %
index 0f0a223..b8cc2af 100644 (file)
@@ -127,7 +127,12 @@ potential recovery).
 Note that this command is equivalent to using per-instance commands for
 each affected instance individually:
 
-- ``--primary-only`` is equivalent to ``gnt-instance failover/migration``
+- ``--primary-only`` is equivalent to ``gnt-instance
+  failover/migration`` for non-DRBD instances, but for DRBD instances
+  it's different, and usually is a slow process (it will change the
+  primary to another node while keeping the secondary, this requiring
+  data copies, whereas failover/migrate will only toggle the
+  primary/secondary roles, a fast process)
 - ``--secondary-only`` is equivalent to ``gnt-instance replace-disks``
   in the secondary node change mode (only valid for DRBD instances)
 - when neither of the above is done a combination of the two cases is run
diff --git a/test/data/vgreduce-removemissing-2.02.02.txt b/test/data/vgreduce-removemissing-2.02.02.txt
new file mode 100644 (file)
index 0000000..db29420
--- /dev/null
@@ -0,0 +1,7 @@
+  Couldn't find device with uuid 'gg4cmC-4lrT-EN1v-39OA-6S2b-6eEI-wWlJJJ'.
+  Couldn't find all physical volumes for volume group xenvg.
+  Couldn't find device with uuid 'gg4cmC-4lrT-EN1v-39OA-6S2b-6eEI-wWlJJJ'.
+  Couldn't find all physical volumes for volume group xenvg.
+  Couldn't find device with uuid 'gg4cmC-4lrT-EN1v-39OA-6S2b-6eEI-wWlJJJ'.
+  Couldn't find device with uuid 'gg4cmC-4lrT-EN1v-39OA-6S2b-6eEI-wWlJJJ'.
+  Wrote out consistent volume group xenvg
diff --git a/test/data/vgreduce-removemissing-2.02.66-fail.txt b/test/data/vgreduce-removemissing-2.02.66-fail.txt
new file mode 100644 (file)
index 0000000..a2ca050
--- /dev/null
@@ -0,0 +1,34 @@
+  Couldn't find device with uuid bHRa26-svpL-ihJX-e0S4-2HNz-wAAi-AlBFtl.
+  WARNING: Partial LV 4ba7abfa-8459-43b6-b00f-c016244980f0.disk0 needs to be repaired or removed. 
+  WARNING: Partial LV e972960d-4e35-46b2-9cda-7029916b28c1.disk0_data needs to be repaired or removed. 
+  WARNING: Partial LV e972960d-4e35-46b2-9cda-7029916b28c1.disk0_meta needs to be repaired or removed. 
+  WARNING: Partial LV 4fa40b51-dd4d-4fd9-aef1-35cc3a0f1f11.disk0_data needs to be repaired or removed. 
+  WARNING: Partial LV 4fa40b51-dd4d-4fd9-aef1-35cc3a0f1f11.disk0_meta needs to be repaired or removed. 
+  WARNING: Partial LV 0a184b34-1270-4f1a-94df-86da2167cfee.disk0_data needs to be repaired or removed. 
+  WARNING: Partial LV 0a184b34-1270-4f1a-94df-86da2167cfee.disk0_meta needs to be repaired or removed. 
+  WARNING: Partial LV 7e49c8a9-9c65-4e76-810e-bd3d7a1d97a9.disk0_data needs to be repaired or removed. 
+  WARNING: Partial LV 7e49c8a9-9c65-4e76-810e-bd3d7a1d97a9.disk0_meta needs to be repaired or removed. 
+  WARNING: Partial LV 290a3fd4-c035-4fbe-9a18-f5a0889bd45d.disk0_data needs to be repaired or removed. 
+  WARNING: Partial LV 290a3fd4-c035-4fbe-9a18-f5a0889bd45d.disk0_meta needs to be repaired or removed. 
+  WARNING: Partial LV c579be32-c041-4f1b-ae3e-c58aac9c2593.disk0_data needs to be repaired or removed. 
+  WARNING: Partial LV c579be32-c041-4f1b-ae3e-c58aac9c2593.disk0_meta needs to be repaired or removed. 
+  WARNING: Partial LV 47524563-3788-4a89-a61f-4274134dea73.disk0_data needs to be repaired or removed. 
+  WARNING: Partial LV 47524563-3788-4a89-a61f-4274134dea73.disk0_meta needs to be repaired or removed. 
+  WARNING: Partial LV ede9f706-a0dc-4202-96f2-1728240bbf05.disk0_data needs to be repaired or removed. 
+  WARNING: Partial LV ede9f706-a0dc-4202-96f2-1728240bbf05.disk0_meta needs to be repaired or removed. 
+  WARNING: Partial LV 731d9f1b-3f2f-4860-85b3-217a36b9c48e.disk1_data needs to be repaired or removed. 
+  WARNING: Partial LV 731d9f1b-3f2f-4860-85b3-217a36b9c48e.disk1_meta needs to be repaired or removed. 
+  WARNING: Partial LV f449ccfd-4e6b-42d6-9a52-838371988ab5.disk0_data needs to be repaired or removed. 
+  WARNING: Partial LV f449ccfd-4e6b-42d6-9a52-838371988ab5.disk0_meta needs to be repaired or removed. 
+  WARNING: Partial LV 69bb4f61-fd0c-4c89-a57f-5285ae99b3bd.disk0_data needs to be repaired or removed. 
+  WARNING: Partial LV 9c29c24a-97ed-4fc7-b479-7a3385365a71.disk0 needs to be repaired or removed. 
+  WARNING: Partial LV a919d93e-0f51-4e4d-9018-e25ee7d5b36b.disk0 needs to be repaired or removed. 
+  WARNING: Partial LV d2501e6b-56a4-43b6-8856-471e5d49e892.disk0_data needs to be repaired or removed. 
+  WARNING: Partial LV d2501e6b-56a4-43b6-8856-471e5d49e892.disk0_meta needs to be repaired or removed. 
+  WARNING: Partial LV 31a1f85a-ecc8-40c0-88aa-e694626906a3.disk0 needs to be repaired or removed. 
+  WARNING: Partial LV d124d70a-4776-4e00-bf0d-43511c29c534.disk0_data needs to be repaired or removed. 
+  WARNING: Partial LV d124d70a-4776-4e00-bf0d-43511c29c534.disk0_meta needs to be repaired or removed. 
+  WARNING: Partial LV f73b4499-34ec-4f70-a543-e43152a8644a.disk0 needs to be repaired or removed. 
+  WARNING: There are still partial LVs in VG xenvg.
+  To remove them unconditionally use: vgreduce --removemissing --force.
+  Proceeding to remove empty missing PVs.
diff --git a/test/data/vgreduce-removemissing-2.02.66-ok.txt b/test/data/vgreduce-removemissing-2.02.66-ok.txt
new file mode 100644 (file)
index 0000000..deb3ce2
--- /dev/null
@@ -0,0 +1,2 @@
+  Couldn't find device with uuid NzfYON-F7ky-1Szf-aGf1-v8Xa-Bt1W-8V3bou.
+  Wrote out consistent volume group xenvg
diff --git a/test/data/vgs-missing-pvs-2.02.02.txt b/test/data/vgs-missing-pvs-2.02.02.txt
new file mode 100644 (file)
index 0000000..2946bea
--- /dev/null
@@ -0,0 +1,5 @@
+  Couldn't find device with uuid 'gg4cmC-4lrT-EN1v-39OA-6S2b-6eEI-wWlJJJ'.
+  Couldn't find all physical volumes for volume group xenvg.
+  Couldn't find device with uuid 'gg4cmC-4lrT-EN1v-39OA-6S2b-6eEI-wWlJJJ'.
+  Couldn't find all physical volumes for volume group xenvg.
+  Volume group xenvg not found
diff --git a/test/data/vgs-missing-pvs-2.02.66.txt b/test/data/vgs-missing-pvs-2.02.66.txt
new file mode 100644 (file)
index 0000000..fc73047
--- /dev/null
@@ -0,0 +1,2 @@
+  Couldn't find device with uuid bHRa26-svpL-ihJX-e0S4-2HNz-wAAi-AlBFtl.
+  xenvg   2  52   0 wz-pn- 1.31t 1.07t
diff --git a/test/ganeti.storage_unittest.py b/test/ganeti.storage_unittest.py
new file mode 100755 (executable)
index 0000000..add0743
--- /dev/null
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+#
+
+# Copyright (C) 2012 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+
+"""Script for testing ganeti.storage"""
+
+import re
+import unittest
+import random
+
+from ganeti import constants
+from ganeti import utils
+from ganeti import compat
+from ganeti import errors
+from ganeti import storage
+
+import testutils
+
+
+class TestVGReduce(testutils.GanetiTestCase):
+  VGNAME = "xenvg"
+  LIST_CMD = storage.LvmVgStorage.LIST_COMMAND
+  VGREDUCE_CMD = storage.LvmVgStorage.VGREDUCE_COMMAND
+
+  def _runCmd(self, cmd, **kwargs):
+    if not self.run_history:
+      self.fail("Empty run results")
+    exp_cmd, result = self.run_history.pop(0)
+    self.assertEqual(cmd, exp_cmd)
+    return result
+
+  def testOldVersion(self):
+    lvmvg = storage.LvmVgStorage()
+    stdout = self._ReadTestData("vgreduce-removemissing-2.02.02.txt")
+    vgs_fail = self._ReadTestData("vgs-missing-pvs-2.02.02.txt")
+    self.run_history = [
+      ([self.VGREDUCE_CMD, "--removemissing", self.VGNAME],
+       utils.RunResult(0, None, stdout, "", "", None, None)),
+      ([self.LIST_CMD, "--noheadings", "--nosuffix", self.VGNAME],
+       utils.RunResult(0, None, "", "", "", None, None)),
+      ]
+    lvmvg._RemoveMissing(self.VGNAME, _runcmd_fn=self._runCmd)
+    self.assertEqual(self.run_history, [])
+    for ecode, out in [(1, ""), (0, vgs_fail)]:
+      self.run_history = [
+        ([self.VGREDUCE_CMD, "--removemissing", self.VGNAME],
+         utils.RunResult(0, None, stdout, "", "", None, None)),
+        ([self.LIST_CMD, "--noheadings", "--nosuffix", self.VGNAME],
+         utils.RunResult(ecode, None, out, "", "", None, None)),
+        ]
+      self.assertRaises(errors.StorageError, lvmvg._RemoveMissing, self.VGNAME,
+                        _runcmd_fn=self._runCmd)
+      self.assertEqual(self.run_history, [])
+
+  def testNewVersion(self):
+    lvmvg = storage.LvmVgStorage()
+    stdout1 = self._ReadTestData("vgreduce-removemissing-2.02.66-fail.txt")
+    stdout2 = self._ReadTestData("vgreduce-removemissing-2.02.66-ok.txt")
+    vgs_fail = self._ReadTestData("vgs-missing-pvs-2.02.66.txt")
+    # first: require --fail, check that it's used
+    self.run_history = [
+      ([self.VGREDUCE_CMD, "--removemissing", self.VGNAME],
+       utils.RunResult(0, None, stdout1, "", "", None, None)),
+      ([self.VGREDUCE_CMD, "--removemissing", "--force", self.VGNAME],
+       utils.RunResult(0, None, stdout2, "", "", None, None)),
+      ([self.LIST_CMD, "--noheadings", "--nosuffix", self.VGNAME],
+       utils.RunResult(0, None, "", "", "", None, None)),
+      ]
+    lvmvg._RemoveMissing(self.VGNAME, _runcmd_fn=self._runCmd)
+    self.assertEqual(self.run_history, [])
+    # second: make sure --fail is not used if not needed
+    self.run_history = [
+      ([self.VGREDUCE_CMD, "--removemissing", self.VGNAME],
+       utils.RunResult(0, None, stdout2, "", "", None, None)),
+      ([self.LIST_CMD, "--noheadings", "--nosuffix", self.VGNAME],
+       utils.RunResult(0, None, "", "", "", None, None)),
+      ]
+    lvmvg._RemoveMissing(self.VGNAME, _runcmd_fn=self._runCmd)
+    self.assertEqual(self.run_history, [])
+    # third: make sure we error out if vgs doesn't find the volume
+    for ecode, out in [(1, ""), (0, vgs_fail)]:
+      self.run_history = [
+        ([self.VGREDUCE_CMD, "--removemissing", self.VGNAME],
+         utils.RunResult(0, None, stdout1, "", "", None, None)),
+        ([self.VGREDUCE_CMD, "--removemissing", "--force", self.VGNAME],
+         utils.RunResult(0, None, stdout2, "", "", None, None)),
+        ([self.LIST_CMD, "--noheadings", "--nosuffix", self.VGNAME],
+         utils.RunResult(ecode, None, out, "", "", None, None)),
+        ]
+      self.assertRaises(errors.StorageError, lvmvg._RemoveMissing, self.VGNAME,
+                        _runcmd_fn=self._runCmd)
+      self.assertEqual(self.run_history, [])
+
+
+if __name__ == "__main__":
+  testutils.GanetiTestProgram()