{-
-Copyright (C) 2009, 2010, 2011 Google Inc.
+Copyright (C) 2009, 2010, 2011, 2012 Google Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
import qualified Ganeti.HTools.ExtLoader
import qualified Ganeti.HTools.IAlloc as IAlloc
import qualified Ganeti.HTools.Instance as Instance
+import qualified Ganeti.HTools.JSON as JSON
import qualified Ganeti.HTools.Loader as Loader
import qualified Ganeti.HTools.Luxi
import qualified Ganeti.HTools.Node as Node
maxCpu :: Int
maxCpu = 1024
+-- | Null iPolicy, and by null we mean very liberal.
+nullIPolicy = Types.IPolicy
+ { Types.iPolicyMinSpec = Types.ISpec { Types.iSpecMemorySize = 0
+ , Types.iSpecCpuCount = 0
+ , Types.iSpecDiskSize = 0
+ , Types.iSpecDiskCount = 0
+ , Types.iSpecNicCount = 0
+ }
+ , Types.iPolicyMaxSpec = Types.ISpec { Types.iSpecMemorySize = maxBound
+ , Types.iSpecCpuCount = maxBound
+ , Types.iSpecDiskSize = maxBound
+ , Types.iSpecDiskCount = C.maxDisks
+ , Types.iSpecNicCount = C.maxNics
+ }
+ , Types.iPolicyStdSpec = Types.ISpec { Types.iSpecMemorySize = Types.unitMem
+ , Types.iSpecCpuCount = Types.unitCpu
+ , Types.iSpecDiskSize = Types.unitDsk
+ , Types.iSpecDiskCount = 1
+ , Types.iSpecNicCount = 1
+ }
+ , Types.iPolicyDiskTemplates = [Types.DTDrbd8, Types.DTPlain]
+ }
+
+
defGroup :: Group.Group
defGroup = flip Group.setIdx 0 $
- Group.create "default" Utils.defaultGroupID Types.AllocPreferred
+ Group.create "default" Types.defaultGroupID Types.AllocPreferred
+ nullIPolicy
defGroupList :: Group.List
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
show x ++ "' /= '" ++ show y ++ "'") (x == y)
infix 3 ==?
+-- | Show a message and fail the test.
+failTest :: String -> Property
+failTest msg = printTestCase msg False
+
-- | Update an instance to be smaller than a node.
setInstanceSmallerThanNode node inst =
inst { Instance.mem = Node.availMem node `div` 2
, Instance.vcpus = Node.availCpu node `div` 2
}
+-- | Check if an instance is smaller than a node.
+isInstanceSmallerThanNode node inst =
+ Instance.mem inst <= Node.availMem node `div` 2 &&
+ Instance.dsk inst <= Node.availDisk node `div` 2 &&
+ Instance.vcpus inst <= Node.availCpu node `div` 2
+
-- | Create an instance given its spec.
createInstance mem dsk vcpus =
Instance.create "inst-unnamed" mem dsk vcpus Types.Running [] True (-1) (-1)
-- | Create a small cluster by repeating a node spec.
makeSmallCluster :: Node.Node -> Int -> Node.List
makeSmallCluster node count =
- let fn = Node.buildPeers node Container.empty
- namelst = map (\n -> (Node.name n, n)) (replicate count fn)
+ let origname = Node.name node
+ origalias = Node.alias node
+ nodes = map (\idx -> node { Node.name = origname ++ "-" ++ show idx
+ , Node.alias = origalias ++ "-" ++ show idx })
+ [1..count]
+ fn = flip Node.buildPeers Container.empty
+ namelst = map (\n -> (Node.name n, fn n)) nodes
(_, nlst) = Loader.assignIndices namelst
in nlst
+-- | Make a small cluster, both nodes and instances.
+makeSmallEmptyCluster :: Node.Node -> Int -> Instance.Instance
+ -> (Node.List, Instance.List, Instance.Instance)
+makeSmallEmptyCluster node count inst =
+ (makeSmallCluster node count, Container.empty,
+ setInstanceSmallerThanNode node inst)
+
-- | Checks if a node is "big" enough.
-isNodeBig :: Node.Node -> Int -> Bool
-isNodeBig node size = Node.availDisk node > size * Types.unitDsk
+isNodeBig :: Int -> Node.Node -> Bool
+isNodeBig size node = Node.availDisk node > size * Types.unitDsk
&& Node.availMem node > size * Types.unitMem
&& Node.availCpu node > size * Types.unitCpu
let frest' = map (map dnsGetChar) frest
return (felem ++ "." ++ intercalate "." frest')
+-- | Defines a tag type.
+newtype TagChar = TagChar { tagGetChar :: Char }
+
+-- | All valid tag chars. This doesn't need to match _exactly_
+-- Ganeti's own tag regex, just enough for it to be close.
+tagChar :: [Char]
+tagChar = ['a'..'z'] ++ ['A'..'Z'] ++ ['0'..'9'] ++ ".+*/:@-"
+
+instance Arbitrary TagChar where
+ arbitrary = do
+ c <- elements tagChar
+ return (TagChar c)
+
+-- | Generates a tag
+genTag :: Gen [TagChar]
+genTag = do
+ -- the correct value would be C.maxTagLen, but that's way too
+ -- verbose in unittests, and at the moment I don't see any possible
+ -- bugs with longer tags and the way we use tags in htools
+ n <- choose (1, 10)
+ vector n
+
+-- | Generates a list of tags (correctly upper bounded).
+genTags :: Gen [String]
+genTags = do
+ -- the correct value would be C.maxTagsPerObj, but per the comment
+ -- in genTag, we don't use tags enough in htools to warrant testing
+ -- such big values
+ n <- choose (0, 10::Int)
+ tags <- mapM (const genTag) [1..n]
+ return $ map (map tagGetChar) tags
+
instance Arbitrary Types.InstanceStatus where
arbitrary = elements [minBound..maxBound]
offl <- arbitrary
let n = Node.create name (fromIntegral mem_t) mem_n mem_f
(fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl 0
- return $ Node.buildPeers n Container.empty
+ n' = Node.setPolicy nullIPolicy n
+ return $ Node.buildPeers n' Container.empty
+
+-- | Helper function to generate a sane node.
+genOnlineNode :: Gen Node.Node
+genOnlineNode = do
+ arbitrary `suchThat` (\n -> not (Node.offline n) &&
+ not (Node.failN1 n) &&
+ Node.availDisk n > 0 &&
+ Node.availMem n > 0 &&
+ Node.availCpu n > 0)
-- and a random node
instance Arbitrary Node.Node where
, "OP_INSTANCE_FAILOVER"
, "OP_INSTANCE_MIGRATE"
]
- (case op_id of
- "OP_TEST_DELAY" ->
- liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
- "OP_INSTANCE_REPLACE_DISKS" ->
- liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
- arbitrary arbitrary arbitrary
- "OP_INSTANCE_FAILOVER" ->
- liftM3 OpCodes.OpInstanceFailover arbitrary arbitrary
- arbitrary
- "OP_INSTANCE_MIGRATE" ->
- liftM5 OpCodes.OpInstanceMigrate arbitrary arbitrary
- arbitrary arbitrary arbitrary
- _ -> fail "Wrong opcode")
+ case op_id of
+ "OP_TEST_DELAY" ->
+ liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
+ "OP_INSTANCE_REPLACE_DISKS" ->
+ liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
+ arbitrary arbitrary arbitrary
+ "OP_INSTANCE_FAILOVER" ->
+ liftM3 OpCodes.OpInstanceFailover arbitrary arbitrary
+ arbitrary
+ "OP_INSTANCE_MIGRATE" ->
+ liftM5 OpCodes.OpInstanceMigrate arbitrary arbitrary
+ arbitrary arbitrary arbitrary
+ _ -> fail "Wrong opcode"
instance Arbitrary Jobs.OpStatus where
arbitrary = elements [minBound..maxBound]
instance Arbitrary Types.FailMode where
arbitrary = elements [minBound..maxBound]
+instance Arbitrary Types.EvacMode where
+ arbitrary = elements [minBound..maxBound]
+
instance Arbitrary a => Arbitrary (Types.OpResult a) where
arbitrary = arbitrary >>= \c ->
- case c of
- False -> liftM Types.OpFail arbitrary
- True -> liftM Types.OpGood arbitrary
+ if c
+ then liftM Types.OpGood arbitrary
+ else liftM Types.OpFail arbitrary
+
+instance Arbitrary Types.ISpec where
+ arbitrary = do
+ mem <- arbitrary::Gen (NonNegative Int)
+ dsk_c <- arbitrary::Gen (NonNegative Int)
+ dsk_s <- arbitrary::Gen (NonNegative Int)
+ cpu <- arbitrary::Gen (NonNegative Int)
+ nic <- arbitrary::Gen (NonNegative Int)
+ return Types.ISpec { Types.iSpecMemorySize = fromIntegral mem
+ , Types.iSpecCpuCount = fromIntegral cpu
+ , Types.iSpecDiskSize = fromIntegral dsk_s
+ , Types.iSpecDiskCount = fromIntegral dsk_c
+ , Types.iSpecNicCount = fromIntegral nic
+ }
+
+-- | Helper function to check whether a spec is LTE than another
+iSpecSmaller :: Types.ISpec -> Types.ISpec -> Bool
+iSpecSmaller imin imax =
+ Types.iSpecMemorySize imin <= Types.iSpecMemorySize imax &&
+ Types.iSpecCpuCount imin <= Types.iSpecCpuCount imax &&
+ Types.iSpecDiskSize imin <= Types.iSpecDiskSize imax &&
+ Types.iSpecDiskCount imin <= Types.iSpecDiskCount imax &&
+ Types.iSpecNicCount imin <= Types.iSpecNicCount imax
+
+instance Arbitrary Types.IPolicy where
+ arbitrary = do
+ imin <- arbitrary
+ istd <- arbitrary `suchThat` (iSpecSmaller imin)
+ imax <- arbitrary `suchThat` (iSpecSmaller istd)
+ dts <- arbitrary
+ return Types.IPolicy { Types.iPolicyMinSpec = imin
+ , Types.iPolicyStdSpec = istd
+ , Types.iPolicyMaxSpec = imax
+ , Types.iPolicyDiskTemplates = dts
+ }
-- * Actual tests
-- not contain commas, then join+split should be idempotent.
prop_Utils_commaJoinSplit =
forAll (arbitrary `suchThat`
- (\l -> l /= [""] && all (not . elem ',') l )) $ \lst ->
+ (\l -> l /= [""] && all (notElem ',') l )) $ \lst ->
Utils.sepSplit ',' (Utils.commaJoin lst) ==? lst
-- | Split and join should always be idempotent.
-- value.
prop_Utils_fromObjWithDefault def_value random_key =
-- a missing key will be returned with the default
- Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
+ JSON.fromObjWithDefault [] random_key def_value == Just def_value &&
-- a found key will be returned as is, not with default
- Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
+ JSON.fromObjWithDefault [(random_key, J.showJSON def_value)]
random_key (def_value+1) == Just def_value
where _types = def_value :: Integer
-> [Int] -- ^ List of True values
-> Gen Prop -- ^ Test result
prop_Utils_select def lst1 lst2 =
- Utils.select def cndlist ==? expectedresult
- where expectedresult = Utils.if' (null lst2) def (head lst2)
- flist = map (\e -> (False, e)) lst1
- tlist = map (\e -> (True, e)) lst2
- cndlist = flist ++ tlist
+ Utils.select def (flist ++ tlist) ==? expectedresult
+ where expectedresult = Utils.if' (null lst2) def (head lst2)
+ flist = zip (repeat False) lst1
+ tlist = zip (repeat True) lst2
-- | Test basic select functionality with undefined default
prop_Utils_select_undefd :: [Int] -- ^ List of False values
-> NonEmptyList Int -- ^ List of True values
-> Gen Prop -- ^ Test result
prop_Utils_select_undefd lst1 (NonEmpty lst2) =
- Utils.select undefined cndlist ==? head lst2
- where flist = map (\e -> (False, e)) lst1
- tlist = map (\e -> (True, e)) lst2
- cndlist = flist ++ tlist
+ Utils.select undefined (flist ++ tlist) ==? head lst2
+ where flist = zip (repeat False) lst1
+ tlist = zip (repeat True) lst2
-- | Test basic select functionality with undefined list values
prop_Utils_select_undefv :: [Int] -- ^ List of False values
-> Gen Prop -- ^ Test result
prop_Utils_select_undefv lst1 (NonEmpty lst2) =
Utils.select undefined cndlist ==? head lst2
- where flist = map (\e -> (False, e)) lst1
- tlist = map (\e -> (True, e)) lst2
- cndlist = flist ++ tlist ++ [undefined]
+ where flist = zip (repeat False) lst1
+ tlist = zip (repeat True) lst2
+ cndlist = flist ++ tlist ++ [undefined]
prop_Utils_parseUnit (NonNegative n) =
Utils.parseUnit (show n) == Types.Ok n &&
-- ** Container tests
+-- we silence the following due to hlint bug fixed in later versions
+{-# ANN prop_Container_addTwo "HLint: ignore Avoid lambda" #-}
prop_Container_addTwo cdata i1 i2 =
fn i1 i2 cont == fn i2 i1 cont &&
fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
forAll (vector cnt) $ \ names ->
(length . nub) (map fst names ++ map snd names) ==
length names * 2 &&
- not (othername `elem` (map fst names ++ map snd names)) ==>
+ othername `notElem` (map fst names ++ map snd names) ==>
let nl = makeSmallCluster node cnt
nodes = Container.elems nl
nodes' = map (\((name, alias), nn) -> (Node.idx nn,
target = snd (nodes' !! fidx)
in Container.findByName nl' (Node.name target) == Just target &&
Container.findByName nl' (Node.alias target) == Just target &&
- Container.findByName nl' othername == Nothing
+ isNothing (Container.findByName nl' othername)
testSuite "Container"
[ 'prop_Container_addTwo
, snode::String
, autobal::Bool)
in case inst of
- Types.Bad msg -> printTestCase ("Failed to load instance: " ++ msg)
- False
+ Types.Bad msg -> failTest $ "Failed to load instance: " ++ msg
Types.Ok (_, i) -> printTestCase "Mismatch in some field while\
\ loading the instance" $
Instance.name i == name &&
prop_Text_Load_InstanceFail ktn fields =
length fields /= 10 ==>
case Text.loadInst nl fields of
- Types.Ok _ -> printTestCase "Managed to load instance from invalid\
- \ data" False
+ Types.Ok _ -> failTest "Managed to load instance from invalid data"
Types.Bad msg -> printTestCase ("Unrecognised error message: " ++ msg) $
"Invalid/incomplete instance data: '" `isPrefixOf` msg
where nl = Data.Map.fromList ktn
Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
Just (Node.name n, n)
-- override failN1 to what loadNode returns by default
- where n = node { Node.failN1 = True, Node.offline = False }
+ where n = node { Node.failN1 = True, Node.offline = False
+ , Node.iPolicy = Types.defIPolicy }
+
+prop_Text_ISpecIdempotent ispec =
+ case Text.loadISpec "dummy" . Utils.sepSplit ',' .
+ Text.serializeISpec $ ispec of
+ Types.Bad msg -> failTest $ "Failed to load ispec: " ++ msg
+ Types.Ok ispec' -> ispec ==? ispec'
+
+prop_Text_IPolicyIdempotent ipol =
+ case Text.loadIPolicy . Utils.sepSplit '|' $
+ Text.serializeIPolicy owner ipol of
+ Types.Bad msg -> failTest $ "Failed to load ispec: " ++ msg
+ Types.Ok res -> (owner, ipol) ==? res
+ where owner = "dummy"
+
+-- | This property, while being in the text tests, does more than just
+-- test end-to-end the serialisation and loading back workflow; it
+-- also tests the Loader.mergeData and the actuall
+-- Cluster.iterateAlloc (for well-behaving w.r.t. instance
+-- allocations, not for the business logic). As such, it's a quite
+-- complex and slow test, and that's the reason we restrict it to
+-- small cluster sizes.
+prop_Text_CreateSerialise =
+ forAll genTags $ \ctags ->
+ forAll (choose (1, 2)) $ \reqnodes ->
+ forAll (choose (1, 20)) $ \maxiter ->
+ forAll (choose (2, 10)) $ \count ->
+ forAll genOnlineNode $ \node ->
+ forAll (arbitrary `suchThat` isInstanceSmallerThanNode node) $ \inst ->
+ let inst' = Instance.setMovable inst $ Utils.if' (reqnodes == 2) True False
+ nl = makeSmallCluster node count
+ in case Cluster.genAllocNodes defGroupList nl reqnodes True >>= \allocn ->
+ Cluster.iterateAlloc nl Container.empty (Just maxiter) inst' allocn [] []
+ of
+ Types.Bad msg -> failTest $ "Failed to allocate: " ++ msg
+ Types.Ok (_, _, _, [], _) -> printTestCase
+ "Failed to allocate: no allocations" False
+ Types.Ok (_, nl', il', _, _) ->
+ let cdata = Loader.ClusterData defGroupList nl' il' ctags
+ Types.defIPolicy
+ saved = Text.serializeCluster cdata
+ in case Text.parseData saved >>= Loader.mergeData [] [] [] [] of
+ Types.Bad msg -> failTest $ "Failed to load/merge: " ++ msg
+ Types.Ok (Loader.ClusterData gl2 nl2 il2 ctags2 cpol2) ->
+ ctags ==? ctags2 .&&.
+ Types.defIPolicy ==? cpol2 .&&.
+ il' ==? il2 .&&.
+ defGroupList ==? gl2 .&&.
+ nl' ==? nl2
testSuite "Text"
[ 'prop_Text_Load_Instance
, 'prop_Text_Load_Node
, 'prop_Text_Load_NodeFail
, 'prop_Text_NodeLSIdempotent
+ , 'prop_Text_ISpecIdempotent
+ , 'prop_Text_IPolicyIdempotent
+ , 'prop_Text_CreateSerialise
]
-- ** Node tests
-- this is not related to rMem, but as good a place to
-- test as any
inst_idx `elem` Node.sList a_ab &&
- not (inst_idx `elem` Node.sList d_ab)
- x -> printTestCase ("Failed to add/remove instances: " ++ show x) False
+ inst_idx `notElem` Node.sList d_ab
+ x -> failTest $ "Failed to add/remove instances: " ++ show x
-- | Check mdsk setting.
prop_Node_setMdsk node mx =
in score <= 1e-12
-- | Check that cluster stats are sane.
-prop_CStats_sane node =
+prop_CStats_sane =
forAll (choose (1, 1024)) $ \count ->
- (not (Node.offline node) && not (Node.failN1 node) &&
- (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
+ forAll genOnlineNode $ \node ->
let fn = Node.buildPeers node Container.empty
nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
nl = Container.fromList nlst
-- | Check that one instance is allocated correctly, without
-- rebalances needed.
-prop_ClusterAlloc_sane node inst =
+prop_ClusterAlloc_sane inst =
forAll (choose (5, 20)) $ \count ->
- not (Node.offline node)
- && not (Node.failN1 node)
- && Node.availDisk node > 0
- && Node.availMem node > 0
- ==>
- let nl = makeSmallCluster node count
- il = Container.empty
- inst' = setInstanceSmallerThanNode node inst
+ forAll genOnlineNode $ \node ->
+ let (nl, il, inst') = makeSmallEmptyCluster node count inst
in case Cluster.genAllocNodes defGroupList nl 2 True >>=
Cluster.tryAlloc nl il inst' of
Types.Bad _ -> False
-- | Checks that on a 2-5 node cluster, we can allocate a random
-- instance spec via tiered allocation (whatever the original instance
-- spec), on either one or two nodes.
-prop_ClusterCanTieredAlloc node inst =
+prop_ClusterCanTieredAlloc inst =
forAll (choose (2, 5)) $ \count ->
forAll (choose (1, 2)) $ \rqnodes ->
- not (Node.offline node)
- && not (Node.failN1 node)
- && isNodeBig node 4
- ==>
+ forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
let nl = makeSmallCluster node count
il = Container.empty
allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
-- we can also evacuate it.
-prop_ClusterAllocEvac node inst =
+prop_ClusterAllocEvac inst =
forAll (choose (4, 8)) $ \count ->
- not (Node.offline node)
- && not (Node.failN1 node)
- && isNodeBig node 4
- ==>
- let nl = makeSmallCluster node count
- il = Container.empty
- inst' = setInstanceSmallerThanNode node inst
+ forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
+ let (nl, il, inst') = makeSmallEmptyCluster node count inst
in case Cluster.genAllocNodes defGroupList nl 2 True >>=
Cluster.tryAlloc nl il inst' of
Types.Bad _ -> False
i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
in case allocnodes >>= \allocnodes' ->
Cluster.iterateAlloc nl' il (Just 5) i_templ allocnodes' [] [] of
- Types.Bad _ -> False
+ Types.Bad msg -> failTest $ "Failed to allocate: " ++ msg
+ Types.Ok (_, _, _, [], _) -> failTest "Failed to allocate: no instances"
Types.Ok (_, xnl, il', _, _) ->
let ynl = Container.add (Node.idx hnode) hnode xnl
cv = Cluster.compCV ynl
tbl = Cluster.Table ynl il' cv []
- in canBalance tbl True True False
+ in printTestCase "Failed to rebalance" $
+ canBalance tbl True True False
-- | Checks consistency.
prop_ClusterCheckConsistency node inst =
all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
(Container.elems nl'')) gni
+-- | Helper function to check if we can allocate an instance on a
+-- given node list.
+canAllocOn :: Node.List -> Int -> Instance.Instance -> Bool
+canAllocOn nl reqnodes inst =
+ case Cluster.genAllocNodes defGroupList nl reqnodes True >>=
+ Cluster.tryAlloc nl (Container.empty) inst of
+ Types.Bad _ -> False
+ Types.Ok as ->
+ case Cluster.asSolution as of
+ Nothing -> False
+ Just _ -> True
+
+-- | Checks that allocation obeys minimum and maximum instance
+-- policies. The unittest generates a random node, duplicates it count
+-- times, and generates a random instance that can be allocated on
+-- this mini-cluster; it then checks that after applying a policy that
+-- the instance doesn't fits, the allocation fails.
+prop_ClusterAllocPolicy node =
+ -- rqn is the required nodes (1 or 2)
+ forAll (choose (1, 2)) $ \rqn ->
+ forAll (choose (5, 20)) $ \count ->
+ forAll (arbitrary `suchThat` (canAllocOn (makeSmallCluster node count) rqn))
+ $ \inst ->
+ forAll (arbitrary `suchThat` (isFailure .
+ Instance.instMatchesPolicy inst)) $ \ipol ->
+ let node' = Node.setPolicy ipol node
+ nl = makeSmallCluster node' count
+ in not $ canAllocOn nl rqn inst
+
testSuite "Cluster"
[ 'prop_Score_Zero
, 'prop_CStats_sane
, 'prop_ClusterAllocBalance
, 'prop_ClusterCheckConsistency
, 'prop_ClusterSplitCluster
+ , 'prop_ClusterAllocPolicy
]
-- ** OpCodes tests
-- | Check that opcode serialization is idempotent.
prop_OpCodes_serialization op =
case J.readJSON (J.showJSON op) of
- J.Error e -> printTestCase ("Cannot deserialise: " ++ e) False
+ J.Error e -> failTest $ "Cannot deserialise: " ++ e
J.Ok op' -> op ==? op'
where _types = op::OpCodes.OpCode
-- | Check that (queued) job\/opcode status serialization is idempotent.
prop_OpStatus_serialization os =
case J.readJSON (J.showJSON os) of
- J.Error e -> printTestCase ("Cannot deserialise: " ++ e) False
+ J.Error e -> failTest $ "Cannot deserialise: " ++ e
J.Ok os' -> os ==? os'
where _types = os::Jobs.OpStatus
prop_JobStatus_serialization js =
case J.readJSON (J.showJSON js) of
- J.Error e -> printTestCase ("Cannot deserialise: " ++ e) False
+ J.Error e -> failTest $ "Cannot deserialise: " ++ e
J.Ok js' -> js ==? js'
where _types = js::Jobs.JobStatus
in case Loader.mergeData [] [] [] []
(Loader.emptyCluster {Loader.cdNodes = na}) of
Types.Bad _ -> False
- Types.Ok (Loader.ClusterData _ nl il _) ->
+ Types.Ok (Loader.ClusterData _ nl il _ _) ->
let nodes = Container.elems nl
instances = Container.elems il
in (sum . map (length . Node.pList)) nodes == 0 &&
prop_Types_AllocPolicy_serialisation apol =
case J.readJSON (J.showJSON apol) of
- J.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
- p == apol
- J.Error s -> printTestCase ("failed to deserialise: " ++ s) False
+ J.Ok p -> p ==? apol
+ J.Error s -> failTest $ "Failed to deserialise: " ++ s
where _types = apol::Types.AllocPolicy
prop_Types_DiskTemplate_serialisation dt =
case J.readJSON (J.showJSON dt) of
- J.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
- p == dt
- J.Error s -> printTestCase ("failed to deserialise: " ++ s)
- False
+ J.Ok p -> p ==? dt
+ J.Error s -> failTest $ "Failed to deserialise: " ++ s
where _types = dt::Types.DiskTemplate
+prop_Types_ISpec_serialisation ispec =
+ case J.readJSON (J.showJSON ispec) of
+ J.Ok p -> p ==? ispec
+ J.Error s -> failTest $ "Failed to deserialise: " ++ s
+ where _types = ispec::Types.ISpec
+
+prop_Types_IPolicy_serialisation ipol =
+ case J.readJSON (J.showJSON ipol) of
+ J.Ok p -> p ==? ipol
+ J.Error s -> failTest $ "Failed to deserialise: " ++ s
+ where _types = ipol::Types.IPolicy
+
+prop_Types_EvacMode_serialisation em =
+ case J.readJSON (J.showJSON em) of
+ J.Ok p -> p ==? em
+ J.Error s -> failTest $ "Failed to deserialise: " ++ s
+ where _types = em::Types.EvacMode
+
prop_Types_opToResult op =
case op of
Types.OpFail _ -> Types.isBad r
testSuite "Types"
[ 'prop_Types_AllocPolicy_serialisation
, 'prop_Types_DiskTemplate_serialisation
+ , 'prop_Types_ISpec_serialisation
+ , 'prop_Types_IPolicy_serialisation
+ , 'prop_Types_EvacMode_serialisation
, 'prop_Types_opToResult
, 'prop_Types_eitherToResult
]