show x ++ "' /= '" ++ show y ++ "'") (x == y)
infix 3 ==?
+-- | Show a message and fail the test.
+failTest :: String -> Property
+failTest msg = printTestCase msg False
+
-- | Update an instance to be smaller than a node.
setInstanceSmallerThanNode node inst =
inst { Instance.mem = Node.availMem node `div` 2
, Instance.vcpus = Node.availCpu node `div` 2
}
+-- | Check if an instance is smaller than a node.
+isInstanceSmallerThanNode node inst =
+ Instance.mem inst <= Node.availMem node `div` 2 &&
+ Instance.dsk inst <= Node.availDisk node `div` 2 &&
+ Instance.vcpus inst <= Node.availCpu node `div` 2
+
-- | Create an instance given its spec.
createInstance mem dsk vcpus =
Instance.create "inst-unnamed" mem dsk vcpus Types.Running [] True (-1) (-1)
-- | Create a small cluster by repeating a node spec.
makeSmallCluster :: Node.Node -> Int -> Node.List
makeSmallCluster node count =
- let fn = Node.buildPeers node Container.empty
- namelst = map (\n -> (Node.name n, n)) (replicate count fn)
+ let origname = Node.name node
+ origalias = Node.alias node
+ nodes = map (\idx -> node { Node.name = origname ++ "-" ++ show idx
+ , Node.alias = origalias ++ "-" ++ show idx })
+ [1..count]
+ fn = flip Node.buildPeers Container.empty
+ namelst = map (\n -> (Node.name n, fn n)) nodes
(_, nlst) = Loader.assignIndices namelst
in nlst
setInstanceSmallerThanNode node inst)
-- | Checks if a node is "big" enough.
-isNodeBig :: Node.Node -> Int -> Bool
-isNodeBig node size = Node.availDisk node > size * Types.unitDsk
+isNodeBig :: Int -> Node.Node -> Bool
+isNodeBig size node = Node.availDisk node > size * Types.unitDsk
&& Node.availMem node > size * Types.unitMem
&& Node.availCpu node > size * Types.unitCpu
il' = Container.add maxiidx inst' il
in (nl', il')
+-- | Generates a list of a given size with non-duplicate elements.
+genUniquesList :: (Eq a, Arbitrary a) => Int -> Gen [a]
+genUniquesList cnt =
+ foldM (\lst _ -> do
+ newelem <- arbitrary `suchThat` (`notElem` lst)
+ return (newelem:lst)) [] [1..cnt]
+
-- * Arbitrary instances
-- | Defines a DNS name.
x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
return (DNSChar x)
+-- | Generates a single name component.
getName :: Gen String
getName = do
n <- choose (1, 64)
dn <- vector n::Gen [DNSChar]
return (map dnsGetChar dn)
+-- | Generates an entire FQDN.
getFQDN :: Gen String
getFQDN = do
- felem <- getName
ncomps <- choose (1, 4)
- frest <- vector ncomps::Gen [[DNSChar]]
- let frest' = map (map dnsGetChar) frest
- return (felem ++ "." ++ intercalate "." frest')
+ names <- mapM (const getName) [1..ncomps::Int]
+ return $ intercalate "." names
+
+-- | Defines a tag type.
+newtype TagChar = TagChar { tagGetChar :: Char }
+
+-- | All valid tag chars. This doesn't need to match _exactly_
+-- Ganeti's own tag regex, just enough for it to be close.
+tagChar :: [Char]
+tagChar = ['a'..'z'] ++ ['A'..'Z'] ++ ['0'..'9'] ++ ".+*/:@-"
+
+instance Arbitrary TagChar where
+ arbitrary = do
+ c <- elements tagChar
+ return (TagChar c)
+
+-- | Generates a tag
+genTag :: Gen [TagChar]
+genTag = do
+ -- the correct value would be C.maxTagLen, but that's way too
+ -- verbose in unittests, and at the moment I don't see any possible
+ -- bugs with longer tags and the way we use tags in htools
+ n <- choose (1, 10)
+ vector n
+
+-- | Generates a list of tags (correctly upper bounded).
+genTags :: Gen [String]
+genTags = do
+ -- the correct value would be C.maxTagsPerObj, but per the comment
+ -- in genTag, we don't use tags enough in htools to warrant testing
+ -- such big values
+ n <- choose (0, 10::Int)
+ tags <- mapM (const genTag) [1..n]
+ return $ map (map tagGetChar) tags
instance Arbitrary Types.InstanceStatus where
arbitrary = elements [minBound..maxBound]
n' = Node.setPolicy nullIPolicy n
return $ Node.buildPeers n' Container.empty
+-- | Helper function to generate a sane node.
+genOnlineNode :: Gen Node.Node
+genOnlineNode = do
+ arbitrary `suchThat` (\n -> not (Node.offline n) &&
+ not (Node.failN1 n) &&
+ Node.availDisk n > 0 &&
+ Node.availMem n > 0 &&
+ Node.availCpu n > 0)
+
-- and a random node
instance Arbitrary Node.Node where
arbitrary = genNode Nothing Nothing
instance Arbitrary Types.FailMode where
arbitrary = elements [minBound..maxBound]
+instance Arbitrary Types.EvacMode where
+ arbitrary = elements [minBound..maxBound]
+
instance Arbitrary a => Arbitrary (Types.OpResult a) where
arbitrary = arbitrary >>= \c ->
if c
then liftM Types.OpGood arbitrary
else liftM Types.OpFail arbitrary
+instance Arbitrary Types.ISpec where
+ arbitrary = do
+ mem <- arbitrary::Gen (NonNegative Int)
+ dsk_c <- arbitrary::Gen (NonNegative Int)
+ dsk_s <- arbitrary::Gen (NonNegative Int)
+ cpu <- arbitrary::Gen (NonNegative Int)
+ nic <- arbitrary::Gen (NonNegative Int)
+ return Types.ISpec { Types.iSpecMemorySize = fromIntegral mem
+ , Types.iSpecCpuCount = fromIntegral cpu
+ , Types.iSpecDiskSize = fromIntegral dsk_s
+ , Types.iSpecDiskCount = fromIntegral dsk_c
+ , Types.iSpecNicCount = fromIntegral nic
+ }
+
+-- | Helper function to check whether a spec is LTE than another
+iSpecSmaller :: Types.ISpec -> Types.ISpec -> Bool
+iSpecSmaller imin imax =
+ Types.iSpecMemorySize imin <= Types.iSpecMemorySize imax &&
+ Types.iSpecCpuCount imin <= Types.iSpecCpuCount imax &&
+ Types.iSpecDiskSize imin <= Types.iSpecDiskSize imax &&
+ Types.iSpecDiskCount imin <= Types.iSpecDiskCount imax &&
+ Types.iSpecNicCount imin <= Types.iSpecNicCount imax
+
+instance Arbitrary Types.IPolicy where
+ arbitrary = do
+ imin <- arbitrary
+ istd <- arbitrary `suchThat` (iSpecSmaller imin)
+ imax <- arbitrary `suchThat` (iSpecSmaller istd)
+ dts <- arbitrary
+ return Types.IPolicy { Types.iPolicyMinSpec = imin
+ , Types.iPolicyStdSpec = istd
+ , Types.iPolicyMaxSpec = imax
+ , Types.iPolicyDiskTemplates = dts
+ }
+
-- * Actual tests
-- ** Utils tests
-- | We test that in a cluster, given a random node, we can find it by
-- its name and alias, as long as all names and aliases are unique,
-- and that we fail to find a non-existing name.
-prop_Container_findByName node othername =
+prop_Container_findByName node =
forAll (choose (1, 20)) $ \ cnt ->
forAll (choose (0, cnt - 1)) $ \ fidx ->
- forAll (vector cnt) $ \ names ->
- (length . nub) (map fst names ++ map snd names) ==
- length names * 2 &&
- othername `notElem` (map fst names ++ map snd names) ==>
- let nl = makeSmallCluster node cnt
+ forAll (genUniquesList (cnt * 2)) $ \ allnames ->
+ forAll (arbitrary `suchThat` (`notElem` allnames)) $ \ othername ->
+ let names = zip (take cnt allnames) (drop cnt allnames)
+ nl = makeSmallCluster node cnt
nodes = Container.elems nl
nodes' = map (\((name, alias), nn) -> (Node.idx nn,
nn { Node.name = name,
, snode::String
, autobal::Bool)
in case inst of
- Types.Bad msg -> printTestCase ("Failed to load instance: " ++ msg)
- False
+ Types.Bad msg -> failTest $ "Failed to load instance: " ++ msg
Types.Ok (_, i) -> printTestCase "Mismatch in some field while\
\ loading the instance" $
Instance.name i == name &&
prop_Text_Load_InstanceFail ktn fields =
length fields /= 10 ==>
case Text.loadInst nl fields of
- Types.Ok _ -> printTestCase "Managed to load instance from invalid\
- \ data" False
+ Types.Ok _ -> failTest "Managed to load instance from invalid data"
Types.Bad msg -> printTestCase ("Unrecognised error message: " ++ msg) $
"Invalid/incomplete instance data: '" `isPrefixOf` msg
where nl = Data.Map.fromList ktn
where n = node { Node.failN1 = True, Node.offline = False
, Node.iPolicy = Types.defIPolicy }
+prop_Text_ISpecIdempotent ispec =
+ case Text.loadISpec "dummy" . Utils.sepSplit ',' .
+ Text.serializeISpec $ ispec of
+ Types.Bad msg -> failTest $ "Failed to load ispec: " ++ msg
+ Types.Ok ispec' -> ispec ==? ispec'
+
+prop_Text_IPolicyIdempotent ipol =
+ case Text.loadIPolicy . Utils.sepSplit '|' $
+ Text.serializeIPolicy owner ipol of
+ Types.Bad msg -> failTest $ "Failed to load ispec: " ++ msg
+ Types.Ok res -> (owner, ipol) ==? res
+ where owner = "dummy"
+
+-- | This property, while being in the text tests, does more than just
+-- test end-to-end the serialisation and loading back workflow; it
+-- also tests the Loader.mergeData and the actuall
+-- Cluster.iterateAlloc (for well-behaving w.r.t. instance
+-- allocations, not for the business logic). As such, it's a quite
+-- complex and slow test, and that's the reason we restrict it to
+-- small cluster sizes.
+prop_Text_CreateSerialise =
+ forAll genTags $ \ctags ->
+ forAll (choose (1, 2)) $ \reqnodes ->
+ forAll (choose (1, 20)) $ \maxiter ->
+ forAll (choose (2, 10)) $ \count ->
+ forAll genOnlineNode $ \node ->
+ forAll (arbitrary `suchThat` isInstanceSmallerThanNode node) $ \inst ->
+ let inst' = Instance.setMovable inst $ Utils.if' (reqnodes == 2) True False
+ nl = makeSmallCluster node count
+ in case Cluster.genAllocNodes defGroupList nl reqnodes True >>= \allocn ->
+ Cluster.iterateAlloc nl Container.empty (Just maxiter) inst' allocn [] []
+ of
+ Types.Bad msg -> failTest $ "Failed to allocate: " ++ msg
+ Types.Ok (_, _, _, [], _) -> printTestCase
+ "Failed to allocate: no allocations" False
+ Types.Ok (_, nl', il', _, _) ->
+ let cdata = Loader.ClusterData defGroupList nl' il' ctags
+ Types.defIPolicy
+ saved = Text.serializeCluster cdata
+ in case Text.parseData saved >>= Loader.mergeData [] [] [] [] of
+ Types.Bad msg -> failTest $ "Failed to load/merge: " ++ msg
+ Types.Ok (Loader.ClusterData gl2 nl2 il2 ctags2 cpol2) ->
+ ctags ==? ctags2 .&&.
+ Types.defIPolicy ==? cpol2 .&&.
+ il' ==? il2 .&&.
+ defGroupList ==? gl2 .&&.
+ nl' ==? nl2
+
testSuite "Text"
[ 'prop_Text_Load_Instance
, 'prop_Text_Load_InstanceFail
, 'prop_Text_Load_Node
, 'prop_Text_Load_NodeFail
, 'prop_Text_NodeLSIdempotent
+ , 'prop_Text_ISpecIdempotent
+ , 'prop_Text_IPolicyIdempotent
+ , 'prop_Text_CreateSerialise
]
-- ** Node tests
_ -> False
prop_Node_addSecOffline pdx =
- forAll (arbitrary `suchThat` ((> 0) . Node.fMem)) $ \node ->
+ forAll genOnlineNode $ \node ->
forAll (arbitrary `suchThat`
- (\ x -> (Instance.dsk x < Node.fDsk node) &&
- Instance.instanceOffline x)) $ \inst ->
- case Node.addSec node inst pdx of
+ (\ inst -> Instance.dsk inst < Node.availDisk node)) $ \inst ->
+ case Node.addSec node (inst { Instance.runSt = Types.AdminOffline }) pdx of
Types.OpGood _ -> True
_ -> False
-- test as any
inst_idx `elem` Node.sList a_ab &&
inst_idx `notElem` Node.sList d_ab
- x -> printTestCase ("Failed to add/remove instances: " ++ show x) False
+ x -> failTest $ "Failed to add/remove instances: " ++ show x
-- | Check mdsk setting.
prop_Node_setMdsk node mx =
in score <= 1e-12
-- | Check that cluster stats are sane.
-prop_CStats_sane node =
+prop_CStats_sane =
forAll (choose (1, 1024)) $ \count ->
- (not (Node.offline node) && not (Node.failN1 node) &&
- (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
+ forAll genOnlineNode $ \node ->
let fn = Node.buildPeers node Container.empty
nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
nl = Container.fromList nlst
-- | Check that one instance is allocated correctly, without
-- rebalances needed.
-prop_ClusterAlloc_sane node inst =
+prop_ClusterAlloc_sane inst =
forAll (choose (5, 20)) $ \count ->
- not (Node.offline node)
- && not (Node.failN1 node)
- && Node.availDisk node > 0
- && Node.availMem node > 0
- ==>
+ forAll genOnlineNode $ \node ->
let (nl, il, inst') = makeSmallEmptyCluster node count inst
in case Cluster.genAllocNodes defGroupList nl 2 True >>=
Cluster.tryAlloc nl il inst' of
-- | Checks that on a 2-5 node cluster, we can allocate a random
-- instance spec via tiered allocation (whatever the original instance
-- spec), on either one or two nodes.
-prop_ClusterCanTieredAlloc node inst =
+prop_ClusterCanTieredAlloc inst =
forAll (choose (2, 5)) $ \count ->
forAll (choose (1, 2)) $ \rqnodes ->
- not (Node.offline node)
- && not (Node.failN1 node)
- && isNodeBig node 4
- ==>
+ forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
let nl = makeSmallCluster node count
il = Container.empty
allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
IntMap.size il' == length ixes &&
length ixes == length cstats
--- | Checks that on a 4-8 node cluster, once we allocate an instance,
--- we can also evacuate it.
-prop_ClusterAllocEvac node inst =
- forAll (choose (4, 8)) $ \count ->
- not (Node.offline node)
- && not (Node.failN1 node)
- && isNodeBig node 4
- ==>
- let (nl, il, inst') = makeSmallEmptyCluster node count inst
+-- | Helper function to create a cluster with the given range of nodes
+-- and allocate an instance on it.
+genClusterAlloc count node inst =
+ let nl = makeSmallCluster node count
in case Cluster.genAllocNodes defGroupList nl 2 True >>=
- Cluster.tryAlloc nl il inst' of
- Types.Bad _ -> False
+ Cluster.tryAlloc nl Container.empty inst of
+ Types.Bad _ -> Types.Bad "Can't allocate"
Types.Ok as ->
case Cluster.asSolution as of
- Nothing -> False
+ Nothing -> Types.Bad "Empty solution?"
Just (xnl, xi, _, _) ->
- let sdx = Instance.sNode xi
- il' = Container.add (Instance.idx xi) xi il
- in case IAlloc.processRelocate defGroupList xnl il'
- (Instance.idx xi) 1 [sdx] of
- Types.Ok _ -> True
- _ -> False
+ let xil = Container.add (Instance.idx xi) xi Container.empty
+ in Types.Ok (xnl, xil, xi)
+
+-- | Checks that on a 4-8 node cluster, once we allocate an instance,
+-- we can also relocate it.
+prop_ClusterAllocRelocate =
+ forAll (choose (4, 8)) $ \count ->
+ forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
+ forAll (arbitrary `suchThat` (isInstanceSmallerThanNode node)) $ \inst ->
+ case genClusterAlloc count node inst of
+ Types.Bad msg -> failTest msg
+ Types.Ok (nl, il, inst') ->
+ case IAlloc.processRelocate defGroupList nl il
+ (Instance.idx inst) 1 [Instance.sNode inst'] of
+ Types.Ok _ -> printTestCase "??" True -- huh, how to make
+ -- this nicer...
+ Types.Bad msg -> failTest $ "Failed to relocate: " ++ msg
+
+-- | Helper property checker for the result of a nodeEvac or
+-- changeGroup operation.
+check_EvacMode grp inst result =
+ case result of
+ Types.Bad msg -> failTest $ "Couldn't evacuate/change group:" ++ msg
+ Types.Ok (_, _, es) ->
+ let moved = Cluster.esMoved es
+ failed = Cluster.esFailed es
+ opcodes = not . null $ Cluster.esOpCodes es
+ in failmsg ("'failed' not empty: " ++ show failed) (null failed) .&&.
+ failmsg "'opcodes' is null" opcodes .&&.
+ case moved of
+ [(idx', gdx, _)] -> failmsg "invalid instance moved" (idx == idx')
+ .&&.
+ failmsg "wrong target group"
+ (gdx == Group.idx grp)
+ v -> failmsg ("invalid solution: " ++ show v) False
+ where failmsg = \msg -> printTestCase ("Failed to evacuate: " ++ msg)
+ idx = Instance.idx inst
+
+-- | Checks that on a 4-8 node cluster, once we allocate an instance,
+-- we can also node-evacuate it.
+prop_ClusterAllocEvacuate =
+ forAll (choose (4, 8)) $ \count ->
+ forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
+ forAll (arbitrary `suchThat` (isInstanceSmallerThanNode node)) $ \inst ->
+ case genClusterAlloc count node inst of
+ Types.Bad msg -> failTest msg
+ Types.Ok (nl, il, inst') ->
+ conjoin $ map (\mode -> check_EvacMode defGroup inst' $
+ Cluster.tryNodeEvac defGroupList nl il mode
+ [Instance.idx inst']) [minBound..maxBound]
+
+-- | Checks that on a 4-8 node cluster with two node groups, once we
+-- allocate an instance on the first node group, we can also change
+-- its group.
+prop_ClusterAllocChangeGroup =
+ forAll (choose (4, 8)) $ \count ->
+ forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
+ forAll (arbitrary `suchThat` (isInstanceSmallerThanNode node)) $ \inst ->
+ case genClusterAlloc count node inst of
+ Types.Bad msg -> failTest msg
+ Types.Ok (nl, il, inst') ->
+ -- we need to add a second node group and nodes to the cluster
+ let nl2 = Container.elems $ makeSmallCluster node count
+ grp2 = Group.setIdx defGroup (Group.idx defGroup + 1)
+ maxndx = maximum . map Node.idx $ nl2
+ nl3 = map (\n -> n { Node.group = Group.idx grp2
+ , Node.idx = Node.idx n + maxndx }) nl2
+ nl4 = Container.fromList . map (\n -> (Node.idx n, n)) $ nl3
+ gl' = Container.add (Group.idx grp2) grp2 defGroupList
+ nl' = IntMap.union nl nl4
+ in check_EvacMode grp2 inst' $
+ Cluster.tryChangeGroup gl' nl' il [] [Instance.idx inst']
-- | Check that allocating multiple instances on a cluster, then
-- adding an empty node, results in a valid rebalance.
i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
in case allocnodes >>= \allocnodes' ->
Cluster.iterateAlloc nl' il (Just 5) i_templ allocnodes' [] [] of
- Types.Bad _ -> printTestCase "Failed to allocate" False
- Types.Ok (_, _, _, [], _) -> printTestCase "Failed to allocate" False
+ Types.Bad msg -> failTest $ "Failed to allocate: " ++ msg
+ Types.Ok (_, _, _, [], _) -> failTest "Failed to allocate: no instances"
Types.Ok (_, xnl, il', _, _) ->
let ynl = Container.add (Node.idx hnode) hnode xnl
cv = Cluster.compCV ynl
all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
(Container.elems nl'')) gni
+-- | Helper function to check if we can allocate an instance on a
+-- given node list.
+canAllocOn :: Node.List -> Int -> Instance.Instance -> Bool
+canAllocOn nl reqnodes inst =
+ case Cluster.genAllocNodes defGroupList nl reqnodes True >>=
+ Cluster.tryAlloc nl (Container.empty) inst of
+ Types.Bad _ -> False
+ Types.Ok as ->
+ case Cluster.asSolution as of
+ Nothing -> False
+ Just _ -> True
+
+-- | Checks that allocation obeys minimum and maximum instance
+-- policies. The unittest generates a random node, duplicates it count
+-- times, and generates a random instance that can be allocated on
+-- this mini-cluster; it then checks that after applying a policy that
+-- the instance doesn't fits, the allocation fails.
+prop_ClusterAllocPolicy node =
+ -- rqn is the required nodes (1 or 2)
+ forAll (choose (1, 2)) $ \rqn ->
+ forAll (choose (5, 20)) $ \count ->
+ forAll (arbitrary `suchThat` (canAllocOn (makeSmallCluster node count) rqn))
+ $ \inst ->
+ forAll (arbitrary `suchThat` (isFailure .
+ Instance.instMatchesPolicy inst)) $ \ipol ->
+ let node' = Node.setPolicy ipol node
+ nl = makeSmallCluster node' count
+ in not $ canAllocOn nl rqn inst
+
testSuite "Cluster"
[ 'prop_Score_Zero
, 'prop_CStats_sane
, 'prop_ClusterAlloc_sane
, 'prop_ClusterCanTieredAlloc
- , 'prop_ClusterAllocEvac
+ , 'prop_ClusterAllocRelocate
+ , 'prop_ClusterAllocEvacuate
+ , 'prop_ClusterAllocChangeGroup
, 'prop_ClusterAllocBalance
, 'prop_ClusterCheckConsistency
, 'prop_ClusterSplitCluster
+ , 'prop_ClusterAllocPolicy
]
-- ** OpCodes tests
-- | Check that opcode serialization is idempotent.
prop_OpCodes_serialization op =
case J.readJSON (J.showJSON op) of
- J.Error e -> printTestCase ("Cannot deserialise: " ++ e) False
+ J.Error e -> failTest $ "Cannot deserialise: " ++ e
J.Ok op' -> op ==? op'
where _types = op::OpCodes.OpCode
-- | Check that (queued) job\/opcode status serialization is idempotent.
prop_OpStatus_serialization os =
case J.readJSON (J.showJSON os) of
- J.Error e -> printTestCase ("Cannot deserialise: " ++ e) False
+ J.Error e -> failTest $ "Cannot deserialise: " ++ e
J.Ok os' -> os ==? os'
where _types = os::Jobs.OpStatus
prop_JobStatus_serialization js =
case J.readJSON (J.showJSON js) of
- J.Error e -> printTestCase ("Cannot deserialise: " ++ e) False
+ J.Error e -> failTest $ "Cannot deserialise: " ++ e
J.Ok js' -> js ==? js'
where _types = js::Jobs.JobStatus
Loader.lookupInstance il inst ==? Data.Map.lookup inst il
where il = Data.Map.fromList kti
-prop_Loader_assignIndices nodes =
- Data.Map.size nassoc == length nodes &&
- Container.size kt == length nodes &&
- (if not (null nodes)
- then maximum (IntMap.keys kt) == length nodes - 1
- else True)
- where (nassoc, kt) =
- Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
+prop_Loader_assignIndices =
+ -- generate nodes with unique names
+ forAll (arbitrary `suchThat`
+ (\nodes ->
+ let names = map Node.name nodes
+ in length names == length (nub names))) $ \nodes ->
+ let (nassoc, kt) =
+ Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
+ in Data.Map.size nassoc == length nodes &&
+ Container.size kt == length nodes &&
+ if not (null nodes)
+ then maximum (IntMap.keys kt) == length nodes - 1
+ else True
-- | Checks that the number of primary instances recorded on the nodes
-- is zero.
prop_Types_AllocPolicy_serialisation apol =
case J.readJSON (J.showJSON apol) of
- J.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
- p == apol
- J.Error s -> printTestCase ("failed to deserialise: " ++ s) False
+ J.Ok p -> p ==? apol
+ J.Error s -> failTest $ "Failed to deserialise: " ++ s
where _types = apol::Types.AllocPolicy
prop_Types_DiskTemplate_serialisation dt =
case J.readJSON (J.showJSON dt) of
- J.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
- p == dt
- J.Error s -> printTestCase ("failed to deserialise: " ++ s)
- False
+ J.Ok p -> p ==? dt
+ J.Error s -> failTest $ "Failed to deserialise: " ++ s
where _types = dt::Types.DiskTemplate
+prop_Types_ISpec_serialisation ispec =
+ case J.readJSON (J.showJSON ispec) of
+ J.Ok p -> p ==? ispec
+ J.Error s -> failTest $ "Failed to deserialise: " ++ s
+ where _types = ispec::Types.ISpec
+
+prop_Types_IPolicy_serialisation ipol =
+ case J.readJSON (J.showJSON ipol) of
+ J.Ok p -> p ==? ipol
+ J.Error s -> failTest $ "Failed to deserialise: " ++ s
+ where _types = ipol::Types.IPolicy
+
+prop_Types_EvacMode_serialisation em =
+ case J.readJSON (J.showJSON em) of
+ J.Ok p -> p ==? em
+ J.Error s -> failTest $ "Failed to deserialise: " ++ s
+ where _types = em::Types.EvacMode
+
prop_Types_opToResult op =
case op of
Types.OpFail _ -> Types.isBad r
testSuite "Types"
[ 'prop_Types_AllocPolicy_serialisation
, 'prop_Types_DiskTemplate_serialisation
+ , 'prop_Types_ISpec_serialisation
+ , 'prop_Types_IPolicy_serialisation
+ , 'prop_Types_EvacMode_serialisation
, 'prop_Types_opToResult
, 'prop_Types_eitherToResult
]