-{-| Unittests for ganeti-htools
+{-| Unittests for ganeti-htools.
-}
, testJobs
, testCluster
, testLoader
+ , testTypes
) where
import Test.QuickCheck
-- * Constants
--- | Maximum memory (1TiB, somewhat random value)
+-- | Maximum memory (1TiB, somewhat random value).
maxMem :: Int
maxMem = 1024 * 1024
--- | Maximum disk (8TiB, somewhat random value)
+-- | Maximum disk (8TiB, somewhat random value).
maxDsk :: Int
maxDsk = 1024 * 1024 * 8
--- | Max CPUs (1024, somewhat random value)
+-- | Max CPUs (1024, somewhat random value).
maxCpu :: Int
maxCpu = 1024
-- * Helper functions
--- | Simple checker for whether OpResult is fail or pass
+-- | Simple checker for whether OpResult is fail or pass.
isFailure :: Types.OpResult a -> Bool
isFailure (Types.OpFail _) = True
isFailure _ = False
--- | Update an instance to be smaller than a node
+-- | Update an instance to be smaller than a node.
setInstanceSmallerThanNode node inst =
inst { Instance.mem = Node.availMem node `div` 2
, Instance.dsk = Node.availDisk node `div` 2
, Instance.vcpus = Node.availCpu node `div` 2
}
--- | Create an instance given its spec
+-- | Create an instance given its spec.
createInstance mem dsk vcpus =
Instance.create "inst-unnamed" mem dsk vcpus "running" [] True (-1) (-1)
--- | Create a small cluster by repeating a node spec
+-- | Create a small cluster by repeating a node spec.
makeSmallCluster :: Node.Node -> Int -> Node.List
makeSmallCluster node count =
let fn = Node.buildPeers node Container.empty
(_, nlst) = Loader.assignIndices namelst
in nlst
--- | Checks if a node is "big" enough
+-- | Checks if a node is "big" enough.
isNodeBig :: Node.Node -> Int -> Bool
isNodeBig node size = Node.availDisk node > size * Types.unitDsk
&& Node.availMem node > size * Types.unitMem
canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac 0 0
-- | Assigns a new fresh instance to a cluster; this is not
--- allocation, so no resource checks are done
+-- allocation, so no resource checks are done.
assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
Types.Idx -> Types.Idx ->
(Node.List, Instance.List)
-- * Arbitrary instances
+-- | Defines a DNS name.
newtype DNSChar = DNSChar { dnsGetChar::Char }
+
instance Arbitrary DNSChar where
arbitrary = do
x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
vcpus <- choose (0, maxCpu)
return $ Instance.create name mem dsk vcpus run_st [] True pn sn
+-- | Generas an arbitrary node based on sizing information.
+genNode :: Maybe Int -- ^ Minimum node size in terms of units
+ -> Maybe Int -- ^ Maximum node size (when Nothing, bounded
+ -- just by the max... constants)
+ -> Gen Node.Node
+genNode min_multiplier max_multiplier = do
+ let (base_mem, base_dsk, base_cpu) =
+ case min_multiplier of
+ Just mm -> (mm * Types.unitMem,
+ mm * Types.unitDsk,
+ mm * Types.unitCpu)
+ Nothing -> (0, 0, 0)
+ (top_mem, top_dsk, top_cpu) =
+ case max_multiplier of
+ Just mm -> (mm * Types.unitMem,
+ mm * Types.unitDsk,
+ mm * Types.unitCpu)
+ Nothing -> (maxMem, maxDsk, maxCpu)
+ name <- getFQDN
+ mem_t <- choose (base_mem, top_mem)
+ mem_f <- choose (base_mem, mem_t)
+ mem_n <- choose (0, mem_t - mem_f)
+ dsk_t <- choose (base_dsk, top_dsk)
+ dsk_f <- choose (base_dsk, dsk_t)
+ cpu_t <- choose (base_cpu, top_cpu)
+ offl <- arbitrary
+ let n = Node.create name (fromIntegral mem_t) mem_n mem_f
+ (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl 0
+ return $ Node.buildPeers n Container.empty
+
-- and a random node
instance Arbitrary Node.Node where
- arbitrary = do
- name <- getFQDN
- mem_t <- choose (0, maxMem)
- mem_f <- choose (0, mem_t)
- mem_n <- choose (0, mem_t - mem_f)
- dsk_t <- choose (0, maxDsk)
- dsk_f <- choose (0, dsk_t)
- cpu_t <- choose (0, maxCpu)
- offl <- arbitrary
- let n = Node.create name (fromIntegral mem_t) mem_n mem_f
- (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl
- 0
- n' = Node.buildPeers n Container.empty
- return n'
+ arbitrary = genNode Nothing Nothing
-- replace disks
instance Arbitrary OpCodes.ReplaceDisksMode where
instance Arbitrary Jobs.JobStatus where
arbitrary = elements [minBound..maxBound]
+newtype SmallRatio = SmallRatio Double deriving Show
+instance Arbitrary SmallRatio where
+ arbitrary = do
+ v <- choose (0, 1)
+ return $ SmallRatio v
+
+instance Arbitrary Types.AllocPolicy where
+ arbitrary = elements [minBound..maxBound]
+
+instance Arbitrary Types.DiskTemplate where
+ arbitrary = elements [minBound..maxBound]
+
-- * Actual tests
--- If the list is not just an empty element, and if the elements do
--- not contain commas, then join+split should be idepotent
-prop_Utils_commaJoinSplit lst = lst /= [""] &&
- all (not . elem ',') lst ==>
- Utils.sepSplit ',' (Utils.commaJoin lst) == lst
--- Split and join should always be idempotent
+-- ** Utils tests
+
+-- | If the list is not just an empty element, and if the elements do
+-- not contain commas, then join+split should be idempotent.
+prop_Utils_commaJoinSplit =
+ forAll (arbitrary `suchThat`
+ (\l -> l /= [""] && all (not . elem ',') l )) $ \lst ->
+ Utils.sepSplit ',' (Utils.commaJoin lst) == lst
+
+-- | Split and join should always be idempotent.
prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
-- | fromObjWithDefault, we test using the Maybe monad and an integer
--- value
+-- value.
prop_Utils_fromObjWithDefault def_value random_key =
-- a missing key will be returned with the default
Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
random_key (def_value+1) == Just def_value
where _types = def_value :: Integer
+-- | Test list for the Utils module.
testUtils =
[ run prop_Utils_commaJoinSplit
, run prop_Utils_commaSplitJoin
, run prop_Utils_fromObjWithDefault
]
--- | Make sure add is idempotent
+-- ** PeerMap tests
+
+-- | Make sure add is idempotent.
prop_PeerMap_addIdempotent pmap key em =
fn puniq == fn (fn puniq)
where _types = (pmap::PeerMap.PeerMap,
fn = PeerMap.add key em
puniq = PeerMap.accumArray const pmap
--- | Make sure remove is idempotent
+-- | Make sure remove is idempotent.
prop_PeerMap_removeIdempotent pmap key =
fn puniq == fn (fn puniq)
where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
fn = PeerMap.remove key
puniq = PeerMap.accumArray const pmap
--- | Make sure a missing item returns 0
+-- | Make sure a missing item returns 0.
prop_PeerMap_findMissing pmap key =
PeerMap.find key (PeerMap.remove key puniq) == 0
where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
puniq = PeerMap.accumArray const pmap
--- | Make sure an added item is found
+-- | Make sure an added item is found.
prop_PeerMap_addFind pmap key em =
PeerMap.find key (PeerMap.add key em puniq) == em
where _types = (pmap::PeerMap.PeerMap,
key::PeerMap.Key, em::PeerMap.Elem)
puniq = PeerMap.accumArray const pmap
--- | Manual check that maxElem returns the maximum indeed, or 0 for null
+-- | Manual check that maxElem returns the maximum indeed, or 0 for null.
prop_PeerMap_maxElem pmap =
PeerMap.maxElem puniq == if null puniq then 0
else (maximum . snd . unzip) puniq
where _types = pmap::PeerMap.PeerMap
puniq = PeerMap.accumArray const pmap
+-- | List of tests for the PeerMap module.
testPeerMap =
[ run prop_PeerMap_addIdempotent
, run prop_PeerMap_removeIdempotent
, run prop_PeerMap_findMissing
]
--- Container tests
+-- ** Container tests
prop_Container_addTwo cdata i1 i2 =
fn i1 i2 cont == fn i2 i1 cont &&
fnode = head (Container.elems nl)
in Container.nameOf nl (Node.idx fnode) == Node.name fnode
--- We test that in a cluster, given a random node, we can find it by
+-- | We test that in a cluster, given a random node, we can find it by
-- its name and alias, as long as all names and aliases are unique,
--- and that we fail to find a non-existing name
+-- and that we fail to find a non-existing name.
prop_Container_findByName node othername =
forAll (choose (1, 20)) $ \ cnt ->
forAll (choose (0, cnt - 1)) $ \ fidx ->
, run prop_Container_findByName
]
+-- ** Instance tests
+
-- Simple instance tests, we only have setter/getters
prop_Instance_creat inst =
where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
si = Instance.setBoth inst pdx sdx
-prop_Instance_runStatus_True inst =
- let run_st = Instance.running inst
- run_tx = Instance.runSt inst
- in
- run_tx `elem` Instance.runningStates ==> run_st
+prop_Instance_runStatus_True =
+ forAll (arbitrary `suchThat`
+ ((`elem` Instance.runningStates) . Instance.runSt))
+ Instance.running
prop_Instance_runStatus_False inst =
let run_st = Instance.running inst
_ -> False
prop_Instance_shrinkMF inst =
- Instance.mem inst < 2 * Types.unitMem ==>
- Types.isBad $ Instance.shrinkByType inst Types.FailMem
+ forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
+ let inst' = inst { Instance.mem = mem}
+ in Types.isBad $ Instance.shrinkByType inst' Types.FailMem
prop_Instance_shrinkCG inst =
Instance.vcpus inst >= 2 * Types.unitCpu ==>
_ -> False
prop_Instance_shrinkCF inst =
- Instance.vcpus inst < 2 * Types.unitCpu ==>
- Types.isBad $ Instance.shrinkByType inst Types.FailCPU
+ forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
+ let inst' = inst { Instance.vcpus = vcpus }
+ in Types.isBad $ Instance.shrinkByType inst' Types.FailCPU
prop_Instance_shrinkDG inst =
Instance.dsk inst >= 2 * Types.unitDsk ==>
_ -> False
prop_Instance_shrinkDF inst =
- Instance.dsk inst < 2 * Types.unitDsk ==>
- Types.isBad $ Instance.shrinkByType inst Types.FailDisk
+ forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
+ let inst' = inst { Instance.dsk = dsk }
+ in Types.isBad $ Instance.shrinkByType inst' Types.FailDisk
prop_Instance_setMovable inst m =
Instance.movable inst' == m
, run prop_Instance_setMovable
]
+-- ** Text backend tests
+
-- Instance text loader tests
-prop_Text_Load_Instance name mem dsk vcpus status pnode snode pdx sdx autobal =
- not (null pnode) && pdx >= 0 && sdx >= 0 ==>
+prop_Text_Load_Instance name mem dsk vcpus status
+ (NonEmpty pnode) snode
+ (NonNegative pdx) (NonNegative sdx) autobal =
+ pnode /= snode && pdx /= sdx ==>
let vcpus_s = show vcpus
dsk_s = show dsk
mem_s = show mem
- rsdx = if pdx == sdx
- then sdx + 1
- else sdx
ndx = if null snode
then [(pnode, pdx)]
- else [(pnode, pdx), (snode, rsdx)]
+ else [(pnode, pdx), (snode, sdx)]
nl = Data.Map.fromList ndx
tags = ""
sbal = if autobal then "Y" else "N"
sbal, pnode, pnode, tags]:: Maybe (String, Instance.Instance)
_types = ( name::String, mem::Int, dsk::Int
, vcpus::Int, status::String
- , pnode::String, snode::String
- , pdx::Types.Ndx, sdx::Types.Ndx
+ , snode::String
, autobal::Bool)
in
case inst of
Instance.pNode i == pdx &&
Instance.sNode i == (if null snode
then Node.noSecondary
- else rsdx) &&
- Instance.auto_balance i == autobal &&
+ else sdx) &&
+ Instance.autoBalance i == autobal &&
isNothing fail1
prop_Text_Load_InstanceFail ktn fields =
, run prop_Text_NodeLSIdempotent
]
--- Node tests
+-- ** Node tests
prop_Node_setAlias node name =
Node.name newnode == Node.name node &&
Node.mCpu newnode == mc
where newnode = Node.setMcpu node mc
--- | Check that an instance add with too high memory or disk will be rejected
+-- | Check that an instance add with too high memory or disk will be
+-- rejected.
prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
not (Node.failN1 node)
==>
inst' = setInstanceSmallerThanNode node inst
inst'' = inst' { Instance.dsk = Instance.dsk inst }
-prop_Node_addPriFC node inst = Instance.vcpus inst > Node.availCpu node &&
- not (Node.failN1 node)
- ==>
- case Node.addPri node inst'' of
- Types.OpFail Types.FailCPU -> True
- _ -> False
+prop_Node_addPriFC node inst (Positive extra) =
+ not (Node.failN1 node) ==>
+ case Node.addPri node inst'' of
+ Types.OpFail Types.FailCPU -> True
+ _ -> False
where _types = (node::Node.Node, inst::Instance.Instance)
inst' = setInstanceSmallerThanNode node inst
- inst'' = inst' { Instance.vcpus = Instance.vcpus inst }
+ inst'' = inst' { Instance.vcpus = Node.availCpu node + extra }
--- | Check that an instance add with too high memory or disk will be rejected
+-- | Check that an instance add with too high memory or disk will be
+-- rejected.
prop_Node_addSec node inst pdx =
(Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
Instance.dsk inst >= Node.fDsk node) &&
==> isFailure (Node.addSec node inst pdx)
where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
--- | Checks for memory reservation changes
-prop_Node_rMem node inst =
+-- | Checks for memory reservation changes.
+prop_Node_rMem inst =
+ forAll (arbitrary `suchThat` ((> 0) . Node.fMem)) $ \node ->
-- ab = auto_balance, nb = non-auto_balance
-- we use -1 as the primary node of the instance
- let inst' = inst { Instance.pNode = -1, Instance.auto_balance = True }
+ let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True }
inst_ab = setInstanceSmallerThanNode node inst'
- inst_nb = inst_ab { Instance.auto_balance = False }
+ inst_nb = inst_ab { Instance.autoBalance = False }
-- now we have the two instances, identical except the
- -- auto_balance attribute
+ -- autoBalance attribute
orig_rmem = Node.rMem node
inst_idx = Instance.idx inst_ab
node_add_ab = Node.addSec node inst_ab (-1)
in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
(Types.OpGood a_ab, Types.OpGood a_nb,
Types.OpGood d_ab, Types.OpGood d_nb) ->
+ printTestCase "Consistency checks failed" $
Node.rMem a_ab > orig_rmem &&
Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
Node.rMem a_nb == orig_rmem &&
-- test as any
inst_idx `elem` Node.sList a_ab &&
not (inst_idx `elem` Node.sList d_ab)
- _ -> False
-
-newtype SmallRatio = SmallRatio Double deriving Show
-instance Arbitrary SmallRatio where
- arbitrary = do
- v <- choose (0, 1)
- return $ SmallRatio v
+ x -> printTestCase ("Failed to add/remove instances: " ++ show x)
+ False
--- | Check mdsk setting
+-- | Check mdsk setting.
prop_Node_setMdsk node mx =
Node.loDsk node' >= 0 &&
fromIntegral (Node.loDsk node') <= Node.tDsk node &&
]
--- Cluster tests
+-- ** Cluster tests
--- | Check that the cluster score is close to zero for a homogeneous cluster
+-- | Check that the cluster score is close to zero for a homogeneous
+-- cluster.
prop_Score_Zero node =
forAll (choose (1, 1024)) $ \count ->
(not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
-- this should be much lower than the default score in CLI.hs
in score <= 1e-12
--- | Check that cluster stats are sane
+-- | Check that cluster stats are sane.
prop_CStats_sane node =
forAll (choose (1, 1024)) $ \count ->
(not (Node.offline node) && not (Node.failN1 node) &&
Cluster.csAdsk cstats <= Cluster.csFdsk cstats
-- | Check that one instance is allocated correctly, without
--- rebalances needed
+-- rebalances needed.
prop_ClusterAlloc_sane node inst =
forAll (choose (5, 20)) $ \count ->
not (Node.offline node)
-- | Checks that on a 2-5 node cluster, we can allocate a random
-- instance spec via tiered allocation (whatever the original instance
--- spec), on either one or two nodes
+-- spec), on either one or two nodes.
prop_ClusterCanTieredAlloc node inst =
forAll (choose (2, 5)) $ \count ->
forAll (choose (1, 2)) $ \rqnodes ->
length ixes == length cstats
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
--- we can also evacuate it
+-- we can also evacuate it.
prop_ClusterAllocEvac node inst =
forAll (choose (4, 8)) $ \count ->
not (Node.offline node)
_ -> False
-- | Check that allocating multiple instances on a cluster, then
--- adding an empty node, results in a valid rebalance
-prop_ClusterAllocBalance node =
+-- adding an empty node, results in a valid rebalance.
+prop_ClusterAllocBalance =
+ forAll (genNode (Just 5) (Just 128)) $ \node ->
forAll (choose (3, 5)) $ \count ->
- not (Node.offline node)
- && not (Node.failN1 node)
- && isNodeBig node 4
- && not (isNodeBig node 8)
- ==>
+ not (Node.offline node) && not (Node.failN1 node) ==>
let nl = makeSmallCluster node count
(hnode, nl') = IntMap.deleteFindMax nl
il = Container.empty
tbl = Cluster.Table ynl il' cv []
in canBalance tbl True True False
--- | Checks consistency
+-- | Checks consistency.
prop_ClusterCheckConsistency node inst =
let nl = makeSmallCluster node 3
[node1, node2, node3] = Container.elems nl
null (ccheck [(0, inst2)]) &&
(not . null $ ccheck [(0, inst3)])
--- For now, we only test that we don't lose instances during the split
+-- | For now, we only test that we don't lose instances during the split.
prop_ClusterSplitCluster node inst =
forAll (choose (0, 100)) $ \icnt ->
let nl = makeSmallCluster node 2
, run prop_ClusterSplitCluster
]
--- | Check that opcode serialization is idempotent
+-- ** OpCodes tests
+-- | Check that opcode serialization is idempotent.
prop_OpCodes_serialization op =
case J.readJSON (J.showJSON op) of
J.Error _ -> False
[ run prop_OpCodes_serialization
]
--- | Check that (queued) job\/opcode status serialization is idempotent
+-- ** Jobs tests
+
+-- | Check that (queued) job\/opcode status serialization is idempotent.
prop_OpStatus_serialization os =
case J.readJSON (J.showJSON os) of
J.Error _ -> False
, run prop_JobStatus_serialization
]
--- | Loader tests
+-- ** Loader tests
prop_Loader_lookupNode ktn inst node =
Loader.lookupNode nl inst node == Data.Map.lookup node nl
else True)
where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
-
-- | Checks that the number of primary instances recorded on the nodes
--- is zero
+-- is zero.
prop_Loader_mergeData ns =
let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
in case Loader.mergeData [] [] [] []
, run prop_Loader_assignIndices
, run prop_Loader_mergeData
]
+
+-- ** Types tests
+
+prop_AllocPolicy_serialisation apol =
+ case Types.apolFromString (Types.apolToString apol) of
+ Types.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
+ p == apol
+ Types.Bad s -> printTestCase ("failed to deserialise: " ++ s) False
+
+prop_DiskTemplate_serialisation dt =
+ case Types.dtFromString (Types.dtToString dt) of
+ Types.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
+ p == dt
+ Types.Bad s -> printTestCase ("failed to deserialise: " ++ s) False
+
+testTypes =
+ [ run prop_AllocPolicy_serialisation
+ , run prop_DiskTemplate_serialisation
+ ]