-}
module Ganeti.HTools.QC
- ( testUtils
- , testPeerMap
- , testContainer
- , testInstance
- , testNode
- , testText
- , testOpCodes
- , testJobs
- , testCluster
- , testLoader
- , testTypes
- ) where
+ ( testUtils
+ , testPeerMap
+ , testContainer
+ , testInstance
+ , testNode
+ , testText
+ , testOpCodes
+ , testJobs
+ , testCluster
+ , testLoader
+ , testTypes
+ ) where
import Test.QuickCheck
import Data.List (findIndex, intercalate, nub, isPrefixOf)
defGroup :: Group.Group
defGroup = flip Group.setIdx 0 $
- Group.create "default" Utils.defaultGroupID
- Types.AllocPreferred
+ Group.create "default" Utils.defaultGroupID Types.AllocPreferred
defGroupList :: Group.List
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
-- | Update an instance to be smaller than a node.
setInstanceSmallerThanNode node inst =
- inst { Instance.mem = Node.availMem node `div` 2
- , Instance.dsk = Node.availDisk node `div` 2
- , Instance.vcpus = Node.availCpu node `div` 2
- }
+ inst { Instance.mem = Node.availMem node `div` 2
+ , Instance.dsk = Node.availDisk node `div` 2
+ , Instance.vcpus = Node.availCpu node `div` 2
+ }
-- | Create an instance given its spec.
createInstance mem dsk vcpus =
- Instance.create "inst-unnamed" mem dsk vcpus Types.Running [] True (-1) (-1)
- Types.DTDrbd8
+ Instance.create "inst-unnamed" mem dsk vcpus Types.Running [] True (-1) (-1)
+ Types.DTDrbd8
-- | Create a small cluster by repeating a node spec.
makeSmallCluster :: Node.Node -> Int -> Node.List
makeSmallCluster node count =
- let fn = Node.buildPeers node Container.empty
- namelst = map (\n -> (Node.name n, n)) (replicate count fn)
- (_, nlst) = Loader.assignIndices namelst
- in nlst
+ let fn = Node.buildPeers node Container.empty
+ namelst = map (\n -> (Node.name n, n)) (replicate count fn)
+ (_, nlst) = Loader.assignIndices namelst
+ in nlst
-- | Checks if a node is "big" enough.
isNodeBig :: Node.Node -> Int -> Bool
let pnode = Container.find pdx nl
snode = Container.find sdx nl
maxiidx = if Container.null il
- then 0
- else fst (Container.findMax il) + 1
+ then 0
+ else fst (Container.findMax il) + 1
inst' = inst { Instance.idx = maxiidx,
Instance.pNode = pdx, Instance.sNode = sdx }
pnode' = Node.setPri pnode inst'
newtype DNSChar = DNSChar { dnsGetChar::Char }
instance Arbitrary DNSChar where
- arbitrary = do
- x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
- return (DNSChar x)
+ arbitrary = do
+ x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
+ return (DNSChar x)
getName :: Gen String
getName = do
dn <- vector n::Gen [DNSChar]
return (map dnsGetChar dn)
-
getFQDN :: Gen String
getFQDN = do
felem <- getName
-- let's generate a random instance
instance Arbitrary Instance.Instance where
- arbitrary = do
- name <- getFQDN
- mem <- choose (0, maxMem)
- dsk <- choose (0, maxDsk)
- run_st <- arbitrary
- pn <- arbitrary
- sn <- arbitrary
- vcpus <- choose (0, maxCpu)
- return $ Instance.create name mem dsk vcpus run_st [] True pn sn
- Types.DTDrbd8
+ arbitrary = do
+ name <- getFQDN
+ mem <- choose (0, maxMem)
+ dsk <- choose (0, maxDsk)
+ run_st <- arbitrary
+ pn <- arbitrary
+ sn <- arbitrary
+ vcpus <- choose (0, maxCpu)
+ return $ Instance.create name mem dsk vcpus run_st [] True pn sn
+ Types.DTDrbd8
-- | Generas an arbitrary node based on sizing information.
genNode :: Maybe Int -- ^ Minimum node size in terms of units
-> Gen Node.Node
genNode min_multiplier max_multiplier = do
let (base_mem, base_dsk, base_cpu) =
- case min_multiplier of
- Just mm -> (mm * Types.unitMem,
- mm * Types.unitDsk,
- mm * Types.unitCpu)
- Nothing -> (0, 0, 0)
+ case min_multiplier of
+ Just mm -> (mm * Types.unitMem,
+ mm * Types.unitDsk,
+ mm * Types.unitCpu)
+ Nothing -> (0, 0, 0)
(top_mem, top_dsk, top_cpu) =
- case max_multiplier of
- Just mm -> (mm * Types.unitMem,
- mm * Types.unitDsk,
- mm * Types.unitCpu)
- Nothing -> (maxMem, maxDsk, maxCpu)
+ case max_multiplier of
+ Just mm -> (mm * Types.unitMem,
+ mm * Types.unitDsk,
+ mm * Types.unitCpu)
+ Nothing -> (maxMem, maxDsk, maxCpu)
name <- getFQDN
mem_t <- choose (base_mem, top_mem)
mem_f <- choose (base_mem, mem_t)
-- and a random node
instance Arbitrary Node.Node where
- arbitrary = genNode Nothing Nothing
+ arbitrary = genNode Nothing Nothing
-- replace disks
instance Arbitrary OpCodes.ReplaceDisksMode where
, "OP_INSTANCE_MIGRATE"
]
(case op_id of
- "OP_TEST_DELAY" ->
- liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
- "OP_INSTANCE_REPLACE_DISKS" ->
- liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
- arbitrary arbitrary arbitrary
- "OP_INSTANCE_FAILOVER" ->
- liftM3 OpCodes.OpInstanceFailover arbitrary arbitrary
- arbitrary
- "OP_INSTANCE_MIGRATE" ->
- liftM5 OpCodes.OpInstanceMigrate arbitrary arbitrary
- arbitrary arbitrary
- arbitrary
- _ -> fail "Wrong opcode")
+ "OP_TEST_DELAY" ->
+ liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
+ "OP_INSTANCE_REPLACE_DISKS" ->
+ liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
+ arbitrary arbitrary arbitrary
+ "OP_INSTANCE_FAILOVER" ->
+ liftM3 OpCodes.OpInstanceFailover arbitrary arbitrary
+ arbitrary
+ "OP_INSTANCE_MIGRATE" ->
+ liftM5 OpCodes.OpInstanceMigrate arbitrary arbitrary
+ arbitrary arbitrary arbitrary
+ _ -> fail "Wrong opcode")
instance Arbitrary Jobs.OpStatus where
arbitrary = elements [minBound..maxBound]
newtype SmallRatio = SmallRatio Double deriving Show
instance Arbitrary SmallRatio where
- arbitrary = do
- v <- choose (0, 1)
- return $ SmallRatio v
+ arbitrary = do
+ v <- choose (0, 1)
+ return $ SmallRatio v
instance Arbitrary Types.AllocPolicy where
arbitrary = elements [minBound..maxBound]
arbitrary = elements [minBound..maxBound]
instance Arbitrary Types.FailMode where
- arbitrary = elements [minBound..maxBound]
+ arbitrary = elements [minBound..maxBound]
instance Arbitrary a => Arbitrary (Types.OpResult a) where
- arbitrary = arbitrary >>= \c ->
- case c of
- False -> liftM Types.OpFail arbitrary
- True -> liftM Types.OpGood arbitrary
+ arbitrary = arbitrary >>= \c ->
+ case c of
+ False -> liftM Types.OpFail arbitrary
+ True -> liftM Types.OpGood arbitrary
-- * Actual tests
-- | If the list is not just an empty element, and if the elements do
-- not contain commas, then join+split should be idempotent.
prop_Utils_commaJoinSplit =
- forAll (arbitrary `suchThat`
- (\l -> l /= [""] && all (not . elem ',') l )) $ \lst ->
- Utils.sepSplit ',' (Utils.commaJoin lst) ==? lst
+ forAll (arbitrary `suchThat`
+ (\l -> l /= [""] && all (not . elem ',') l )) $ \lst ->
+ Utils.sepSplit ',' (Utils.commaJoin lst) ==? lst
-- | Split and join should always be idempotent.
prop_Utils_commaSplitJoin s =
- Utils.commaJoin (Utils.sepSplit ',' s) ==? s
+ Utils.commaJoin (Utils.sepSplit ',' s) ==? s
-- | fromObjWithDefault, we test using the Maybe monad and an integer
-- value.
prop_Utils_fromObjWithDefault def_value random_key =
- -- a missing key will be returned with the default
- Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
- -- a found key will be returned as is, not with default
- Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
- random_key (def_value+1) == Just def_value
- where _types = def_value :: Integer
+ -- a missing key will be returned with the default
+ Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
+ -- a found key will be returned as is, not with default
+ Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
+ random_key (def_value+1) == Just def_value
+ where _types = def_value :: Integer
-- | Test that functional if' behaves like the syntactic sugar if.
prop_Utils_if'if :: Bool -> Int -> Int -> Gen Prop
prop_Utils_if'if cnd a b =
- Utils.if' cnd a b ==? if cnd then a else b
+ Utils.if' cnd a b ==? if cnd then a else b
-- | Test basic select functionality
prop_Utils_select :: Int -- ^ Default result
cndlist = flist ++ tlist ++ [undefined]
prop_Utils_parseUnit (NonNegative n) =
- Utils.parseUnit (show n) == Types.Ok n &&
- Utils.parseUnit (show n ++ "m") == Types.Ok n &&
- (case Utils.parseUnit (show n ++ "M") of
- Types.Ok m -> if n > 0
- then m < n -- for positive values, X MB is less than X MiB
- else m == 0 -- but for 0, 0 MB == 0 MiB
- Types.Bad _ -> False) &&
- Utils.parseUnit (show n ++ "g") == Types.Ok (n*1024) &&
- Utils.parseUnit (show n ++ "t") == Types.Ok (n*1048576) &&
- Types.isBad (Utils.parseUnit (show n ++ "x")::Types.Result Int)
+ Utils.parseUnit (show n) == Types.Ok n &&
+ Utils.parseUnit (show n ++ "m") == Types.Ok n &&
+ (case Utils.parseUnit (show n ++ "M") of
+ Types.Ok m -> if n > 0
+ then m < n -- for positive values, X MB is < than X MiB
+ else m == 0 -- but for 0, 0 MB == 0 MiB
+ Types.Bad _ -> False) &&
+ Utils.parseUnit (show n ++ "g") == Types.Ok (n*1024) &&
+ Utils.parseUnit (show n ++ "t") == Types.Ok (n*1048576) &&
+ Types.isBad (Utils.parseUnit (show n ++ "x")::Types.Result Int)
where _types = n::Int
-- | Test list for the Utils module.
testSuite "Utils"
- [ 'prop_Utils_commaJoinSplit
- , 'prop_Utils_commaSplitJoin
- , 'prop_Utils_fromObjWithDefault
- , 'prop_Utils_if'if
- , 'prop_Utils_select
- , 'prop_Utils_select_undefd
- , 'prop_Utils_select_undefv
- , 'prop_Utils_parseUnit
- ]
+ [ 'prop_Utils_commaJoinSplit
+ , 'prop_Utils_commaSplitJoin
+ , 'prop_Utils_fromObjWithDefault
+ , 'prop_Utils_if'if
+ , 'prop_Utils_select
+ , 'prop_Utils_select_undefd
+ , 'prop_Utils_select_undefv
+ , 'prop_Utils_parseUnit
+ ]
-- ** PeerMap tests
-- | Make sure add is idempotent.
prop_PeerMap_addIdempotent pmap key em =
- fn puniq ==? fn (fn puniq)
+ fn puniq ==? fn (fn puniq)
where _types = (pmap::PeerMap.PeerMap,
key::PeerMap.Key, em::PeerMap.Elem)
fn = PeerMap.add key em
-- | Make sure remove is idempotent.
prop_PeerMap_removeIdempotent pmap key =
- fn puniq ==? fn (fn puniq)
+ fn puniq ==? fn (fn puniq)
where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
fn = PeerMap.remove key
puniq = PeerMap.accumArray const pmap
-- | Make sure a missing item returns 0.
prop_PeerMap_findMissing pmap key =
- PeerMap.find key (PeerMap.remove key puniq) ==? 0
+ PeerMap.find key (PeerMap.remove key puniq) ==? 0
where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
puniq = PeerMap.accumArray const pmap
-- | Make sure an added item is found.
prop_PeerMap_addFind pmap key em =
- PeerMap.find key (PeerMap.add key em puniq) ==? em
+ PeerMap.find key (PeerMap.add key em puniq) ==? em
where _types = (pmap::PeerMap.PeerMap,
key::PeerMap.Key, em::PeerMap.Elem)
puniq = PeerMap.accumArray const pmap
-- | Manual check that maxElem returns the maximum indeed, or 0 for null.
prop_PeerMap_maxElem pmap =
- PeerMap.maxElem puniq ==? if null puniq then 0
+ PeerMap.maxElem puniq ==? if null puniq then 0
else (maximum . snd . unzip) puniq
where _types = pmap::PeerMap.PeerMap
puniq = PeerMap.accumArray const pmap
-- | List of tests for the PeerMap module.
testSuite "PeerMap"
- [ 'prop_PeerMap_addIdempotent
- , 'prop_PeerMap_removeIdempotent
- , 'prop_PeerMap_maxElem
- , 'prop_PeerMap_addFind
- , 'prop_PeerMap_findMissing
- ]
+ [ 'prop_PeerMap_addIdempotent
+ , 'prop_PeerMap_removeIdempotent
+ , 'prop_PeerMap_maxElem
+ , 'prop_PeerMap_addFind
+ , 'prop_PeerMap_findMissing
+ ]
-- ** Container tests
prop_Container_addTwo cdata i1 i2 =
- fn i1 i2 cont == fn i2 i1 cont &&
- fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
+ fn i1 i2 cont == fn i2 i1 cont &&
+ fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
where _types = (cdata::[Int],
i1::Int, i2::Int)
cont = foldl (\c x -> Container.add x x c) Container.empty cdata
Container.findByName nl' othername == Nothing
testSuite "Container"
- [ 'prop_Container_addTwo
- , 'prop_Container_nameOf
- , 'prop_Container_findByName
- ]
+ [ 'prop_Container_addTwo
+ , 'prop_Container_nameOf
+ , 'prop_Container_findByName
+ ]
-- ** Instance tests
-- Simple instance tests, we only have setter/getters
prop_Instance_creat inst =
- Instance.name inst ==? Instance.alias inst
+ Instance.name inst ==? Instance.alias inst
prop_Instance_setIdx inst idx =
- Instance.idx (Instance.setIdx inst idx) ==? idx
+ Instance.idx (Instance.setIdx inst idx) ==? idx
where _types = (inst::Instance.Instance, idx::Types.Idx)
prop_Instance_setName inst name =
- Instance.name newinst == name &&
- Instance.alias newinst == name
+ Instance.name newinst == name &&
+ Instance.alias newinst == name
where _types = (inst::Instance.Instance, name::String)
newinst = Instance.setName inst name
prop_Instance_setAlias inst name =
- Instance.name newinst == Instance.name inst &&
- Instance.alias newinst == name
+ Instance.name newinst == Instance.name inst &&
+ Instance.alias newinst == name
where _types = (inst::Instance.Instance, name::String)
newinst = Instance.setAlias inst name
prop_Instance_setPri inst pdx =
- Instance.pNode (Instance.setPri inst pdx) ==? pdx
+ Instance.pNode (Instance.setPri inst pdx) ==? pdx
where _types = (inst::Instance.Instance, pdx::Types.Ndx)
prop_Instance_setSec inst sdx =
- Instance.sNode (Instance.setSec inst sdx) ==? sdx
+ Instance.sNode (Instance.setSec inst sdx) ==? sdx
where _types = (inst::Instance.Instance, sdx::Types.Ndx)
prop_Instance_setBoth inst pdx sdx =
- Instance.pNode si == pdx && Instance.sNode si == sdx
+ Instance.pNode si == pdx && Instance.sNode si == sdx
where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
si = Instance.setBoth inst pdx sdx
prop_Instance_shrinkMG inst =
- Instance.mem inst >= 2 * Types.unitMem ==>
- case Instance.shrinkByType inst Types.FailMem of
- Types.Ok inst' ->
- Instance.mem inst' == Instance.mem inst - Types.unitMem
- _ -> False
+ Instance.mem inst >= 2 * Types.unitMem ==>
+ case Instance.shrinkByType inst Types.FailMem of
+ Types.Ok inst' -> Instance.mem inst' == Instance.mem inst - Types.unitMem
+ _ -> False
prop_Instance_shrinkMF inst =
- forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
+ forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
let inst' = inst { Instance.mem = mem}
in Types.isBad $ Instance.shrinkByType inst' Types.FailMem
prop_Instance_shrinkCG inst =
- Instance.vcpus inst >= 2 * Types.unitCpu ==>
- case Instance.shrinkByType inst Types.FailCPU of
- Types.Ok inst' ->
- Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
- _ -> False
+ Instance.vcpus inst >= 2 * Types.unitCpu ==>
+ case Instance.shrinkByType inst Types.FailCPU of
+ Types.Ok inst' ->
+ Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
+ _ -> False
prop_Instance_shrinkCF inst =
- forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
+ forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
let inst' = inst { Instance.vcpus = vcpus }
in Types.isBad $ Instance.shrinkByType inst' Types.FailCPU
prop_Instance_shrinkDG inst =
- Instance.dsk inst >= 2 * Types.unitDsk ==>
- case Instance.shrinkByType inst Types.FailDisk of
- Types.Ok inst' ->
- Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
- _ -> False
+ Instance.dsk inst >= 2 * Types.unitDsk ==>
+ case Instance.shrinkByType inst Types.FailDisk of
+ Types.Ok inst' ->
+ Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
+ _ -> False
prop_Instance_shrinkDF inst =
- forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
+ forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
let inst' = inst { Instance.dsk = dsk }
in Types.isBad $ Instance.shrinkByType inst' Types.FailDisk
prop_Instance_setMovable inst m =
- Instance.movable inst' ==? m
+ Instance.movable inst' ==? m
where inst' = Instance.setMovable inst m
testSuite "Instance"
- [ 'prop_Instance_creat
- , 'prop_Instance_setIdx
- , 'prop_Instance_setName
- , 'prop_Instance_setAlias
- , 'prop_Instance_setPri
- , 'prop_Instance_setSec
- , 'prop_Instance_setBoth
- , 'prop_Instance_shrinkMG
- , 'prop_Instance_shrinkMF
- , 'prop_Instance_shrinkCG
- , 'prop_Instance_shrinkCF
- , 'prop_Instance_shrinkDG
- , 'prop_Instance_shrinkDF
- , 'prop_Instance_setMovable
- ]
+ [ 'prop_Instance_creat
+ , 'prop_Instance_setIdx
+ , 'prop_Instance_setName
+ , 'prop_Instance_setAlias
+ , 'prop_Instance_setPri
+ , 'prop_Instance_setSec
+ , 'prop_Instance_setBoth
+ , 'prop_Instance_shrinkMG
+ , 'prop_Instance_shrinkMF
+ , 'prop_Instance_shrinkCG
+ , 'prop_Instance_shrinkCF
+ , 'prop_Instance_shrinkDG
+ , 'prop_Instance_shrinkDF
+ , 'prop_Instance_setMovable
+ ]
-- ** Text backend tests
prop_Text_Load_Instance name mem dsk vcpus status
(NonEmpty pnode) snode
(NonNegative pdx) (NonNegative sdx) autobal dt =
- pnode /= snode && pdx /= sdx ==>
- let vcpus_s = show vcpus
- dsk_s = show dsk
- mem_s = show mem
- status_s = Types.instanceStatusToRaw status
- ndx = if null snode
+ pnode /= snode && pdx /= sdx ==>
+ let vcpus_s = show vcpus
+ dsk_s = show dsk
+ mem_s = show mem
+ status_s = Types.instanceStatusToRaw status
+ ndx = if null snode
then [(pnode, pdx)]
else [(pnode, pdx), (snode, sdx)]
- nl = Data.Map.fromList ndx
- tags = ""
- sbal = if autobal then "Y" else "N"
- sdt = Types.diskTemplateToRaw dt
- inst = Text.loadInst nl
- [name, mem_s, dsk_s, vcpus_s, status_s,
- sbal, pnode, snode, sdt, tags]
- fail1 = Text.loadInst nl
- [name, mem_s, dsk_s, vcpus_s, status_s,
- sbal, pnode, pnode, tags]
- _types = ( name::String, mem::Int, dsk::Int
- , vcpus::Int, status::Types.InstanceStatus
- , snode::String
- , autobal::Bool)
- in
- case inst of
- Types.Bad msg -> printTestCase ("Failed to load instance: " ++ msg)
- False
- Types.Ok (_, i) -> printTestCase "Mismatch in some field while\
- \ loading the instance" $
- Instance.name i == name &&
- Instance.vcpus i == vcpus &&
- Instance.mem i == mem &&
- Instance.pNode i == pdx &&
- Instance.sNode i == (if null snode
- then Node.noSecondary
- else sdx) &&
- Instance.autoBalance i == autobal &&
- Types.isBad fail1
+ nl = Data.Map.fromList ndx
+ tags = ""
+ sbal = if autobal then "Y" else "N"
+ sdt = Types.diskTemplateToRaw dt
+ inst = Text.loadInst nl
+ [name, mem_s, dsk_s, vcpus_s, status_s,
+ sbal, pnode, snode, sdt, tags]
+ fail1 = Text.loadInst nl
+ [name, mem_s, dsk_s, vcpus_s, status_s,
+ sbal, pnode, pnode, tags]
+ _types = ( name::String, mem::Int, dsk::Int
+ , vcpus::Int, status::Types.InstanceStatus
+ , snode::String
+ , autobal::Bool)
+ in case inst of
+ Types.Bad msg -> printTestCase ("Failed to load instance: " ++ msg)
+ False
+ Types.Ok (_, i) -> printTestCase "Mismatch in some field while\
+ \ loading the instance" $
+ Instance.name i == name &&
+ Instance.vcpus i == vcpus &&
+ Instance.mem i == mem &&
+ Instance.pNode i == pdx &&
+ Instance.sNode i == (if null snode
+ then Node.noSecondary
+ else sdx) &&
+ Instance.autoBalance i == autobal &&
+ Types.isBad fail1
prop_Text_Load_InstanceFail ktn fields =
- length fields /= 10 ==>
+ length fields /= 10 ==>
case Text.loadInst nl fields of
Types.Ok _ -> printTestCase "Managed to load instance from invalid\
\ data" False
where nl = Data.Map.fromList ktn
prop_Text_Load_Node name tm nm fm td fd tc fo =
- let conv v = if v < 0
- then "?"
- else show v
- tm_s = conv tm
- nm_s = conv nm
- fm_s = conv fm
- td_s = conv td
- fd_s = conv fd
- tc_s = conv tc
- fo_s = if fo
+ let conv v = if v < 0
+ then "?"
+ else show v
+ tm_s = conv tm
+ nm_s = conv nm
+ fm_s = conv fm
+ td_s = conv td
+ fd_s = conv fd
+ tc_s = conv tc
+ fo_s = if fo
then "Y"
else "N"
- any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
- gid = Group.uuid defGroup
- in case Text.loadNode defGroupAssoc
- [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
- Nothing -> False
- Just (name', node) ->
- if fo || any_broken
- then Node.offline node
- else Node.name node == name' && name' == name &&
- Node.alias node == name &&
- Node.tMem node == fromIntegral tm &&
- Node.nMem node == nm &&
- Node.fMem node == fm &&
- Node.tDsk node == fromIntegral td &&
- Node.fDsk node == fd &&
- Node.tCpu node == fromIntegral tc
+ any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
+ gid = Group.uuid defGroup
+ in case Text.loadNode defGroupAssoc
+ [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
+ Nothing -> False
+ Just (name', node) ->
+ if fo || any_broken
+ then Node.offline node
+ else Node.name node == name' && name' == name &&
+ Node.alias node == name &&
+ Node.tMem node == fromIntegral tm &&
+ Node.nMem node == nm &&
+ Node.fMem node == fm &&
+ Node.tDsk node == fromIntegral td &&
+ Node.fDsk node == fd &&
+ Node.tCpu node == fromIntegral tc
prop_Text_Load_NodeFail fields =
- length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
+ length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
prop_Text_NodeLSIdempotent node =
- (Text.loadNode defGroupAssoc.
- Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
- Just (Node.name n, n)
+ (Text.loadNode defGroupAssoc.
+ Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
+ Just (Node.name n, n)
-- override failN1 to what loadNode returns by default
where n = node { Node.failN1 = True, Node.offline = False }
testSuite "Text"
- [ 'prop_Text_Load_Instance
- , 'prop_Text_Load_InstanceFail
- , 'prop_Text_Load_Node
- , 'prop_Text_Load_NodeFail
- , 'prop_Text_NodeLSIdempotent
- ]
+ [ 'prop_Text_Load_Instance
+ , 'prop_Text_Load_InstanceFail
+ , 'prop_Text_Load_Node
+ , 'prop_Text_Load_NodeFail
+ , 'prop_Text_NodeLSIdempotent
+ ]
-- ** Node tests
prop_Node_setAlias node name =
- Node.name newnode == Node.name node &&
- Node.alias newnode == name
+ Node.name newnode == Node.name node &&
+ Node.alias newnode == name
where _types = (node::Node.Node, name::String)
newnode = Node.setAlias node name
prop_Node_setOffline node status =
- Node.offline newnode ==? status
+ Node.offline newnode ==? status
where newnode = Node.setOffline node status
prop_Node_setXmem node xm =
- Node.xMem newnode ==? xm
+ Node.xMem newnode ==? xm
where newnode = Node.setXmem node xm
prop_Node_setMcpu node mc =
- Node.mCpu newnode ==? mc
+ Node.mCpu newnode ==? mc
where newnode = Node.setMcpu node mc
-- | Check that an instance add with too high memory or disk will be
-- rejected.
-prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
- not (Node.failN1 node) &&
- not (Instance.instanceOffline inst)
- ==>
- case Node.addPri node inst'' of
- Types.OpFail Types.FailMem -> True
- _ -> False
- where _types = (node::Node.Node, inst::Instance.Instance)
- inst' = setInstanceSmallerThanNode node inst
- inst'' = inst' { Instance.mem = Instance.mem inst }
-
-prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
- not (Node.failN1 node)
- ==>
- case Node.addPri node inst'' of
- Types.OpFail Types.FailDisk -> True
- _ -> False
+prop_Node_addPriFM node inst =
+ Instance.mem inst >= Node.fMem node && not (Node.failN1 node) &&
+ not (Instance.instanceOffline inst) ==>
+ case Node.addPri node inst'' of
+ Types.OpFail Types.FailMem -> True
+ _ -> False
+ where _types = (node::Node.Node, inst::Instance.Instance)
+ inst' = setInstanceSmallerThanNode node inst
+ inst'' = inst' { Instance.mem = Instance.mem inst }
+
+prop_Node_addPriFD node inst =
+ Instance.dsk inst >= Node.fDsk node && not (Node.failN1 node) ==>
+ case Node.addPri node inst'' of
+ Types.OpFail Types.FailDisk -> True
+ _ -> False
where _types = (node::Node.Node, inst::Instance.Instance)
inst' = setInstanceSmallerThanNode node inst
inst'' = inst' { Instance.dsk = Instance.dsk inst }
prop_Node_addPriFC node inst (Positive extra) =
- not (Node.failN1 node) &&
- not (Instance.instanceOffline inst) ==>
- case Node.addPri node inst'' of
- Types.OpFail Types.FailCPU -> True
- _ -> False
+ not (Node.failN1 node) && not (Instance.instanceOffline inst) ==>
+ case Node.addPri node inst'' of
+ Types.OpFail Types.FailCPU -> True
+ _ -> False
where _types = (node::Node.Node, inst::Instance.Instance)
inst' = setInstanceSmallerThanNode node inst
inst'' = inst' { Instance.vcpus = Node.availCpu node + extra }
-- | Check that an instance add with too high memory or disk will be
-- rejected.
prop_Node_addSec node inst pdx =
- ((Instance.mem inst >= (Node.fMem node - Node.rMem node) &&
- not (Instance.instanceOffline inst)) ||
- Instance.dsk inst >= Node.fDsk node) &&
- not (Node.failN1 node)
- ==> isFailure (Node.addSec node inst pdx)
+ ((Instance.mem inst >= (Node.fMem node - Node.rMem node) &&
+ not (Instance.instanceOffline inst)) ||
+ Instance.dsk inst >= Node.fDsk node) &&
+ not (Node.failN1 node) ==>
+ isFailure (Node.addSec node inst pdx)
where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
-- | Check that an offline instance with reasonable disk size can always
-- be added.
prop_Node_addPriOffline node =
- forAll (arbitrary `suchThat`
- (\ x -> (Instance.dsk x < Node.fDsk node) &&
- Instance.instanceOffline x)) $ \inst ->
- case Node.addPri node inst of
- Types.OpGood _ -> True
- _ -> False
+ forAll (arbitrary `suchThat`
+ (\ x -> (Instance.dsk x < Node.fDsk node) &&
+ Instance.instanceOffline x)) $ \inst ->
+ case Node.addPri node inst of
+ Types.OpGood _ -> True
+ _ -> False
prop_Node_addSecOffline node pdx =
- forAll (arbitrary `suchThat`
- (\ x -> (Instance.dsk x < Node.fDsk node) &&
- Instance.instanceOffline x)) $ \inst ->
- case Node.addSec node inst pdx of
- Types.OpGood _ -> True
- _ -> False
+ forAll (arbitrary `suchThat`
+ (\ x -> (Instance.dsk x < Node.fDsk node) &&
+ Instance.instanceOffline x)) $ \inst ->
+ case Node.addSec node inst pdx of
+ Types.OpGood _ -> True
+ _ -> False
-- | Checks for memory reservation changes.
prop_Node_rMem inst =
- not (Instance.instanceOffline inst) ==>
- forAll (arbitrary `suchThat` ((> Types.unitMem) . Node.fMem)) $ \node ->
- -- ab = auto_balance, nb = non-auto_balance
- -- we use -1 as the primary node of the instance
- let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True }
- inst_ab = setInstanceSmallerThanNode node inst'
- inst_nb = inst_ab { Instance.autoBalance = False }
- -- now we have the two instances, identical except the
- -- autoBalance attribute
- orig_rmem = Node.rMem node
- inst_idx = Instance.idx inst_ab
- node_add_ab = Node.addSec node inst_ab (-1)
- node_add_nb = Node.addSec node inst_nb (-1)
- node_del_ab = liftM (`Node.removeSec` inst_ab) node_add_ab
- node_del_nb = liftM (`Node.removeSec` inst_nb) node_add_nb
- in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
- (Types.OpGood a_ab, Types.OpGood a_nb,
- Types.OpGood d_ab, Types.OpGood d_nb) ->
- printTestCase "Consistency checks failed" $
- Node.rMem a_ab > orig_rmem &&
- Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
- Node.rMem a_nb == orig_rmem &&
- Node.rMem d_ab == orig_rmem &&
- Node.rMem d_nb == orig_rmem &&
- -- this is not related to rMem, but as good a place to
- -- test as any
- inst_idx `elem` Node.sList a_ab &&
- not (inst_idx `elem` Node.sList d_ab)
- x -> printTestCase ("Failed to add/remove instances: " ++ show x)
- False
+ not (Instance.instanceOffline inst) ==>
+ forAll (arbitrary `suchThat` ((> Types.unitMem) . Node.fMem)) $ \node ->
+ -- ab = auto_balance, nb = non-auto_balance
+ -- we use -1 as the primary node of the instance
+ let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True }
+ inst_ab = setInstanceSmallerThanNode node inst'
+ inst_nb = inst_ab { Instance.autoBalance = False }
+ -- now we have the two instances, identical except the
+ -- autoBalance attribute
+ orig_rmem = Node.rMem node
+ inst_idx = Instance.idx inst_ab
+ node_add_ab = Node.addSec node inst_ab (-1)
+ node_add_nb = Node.addSec node inst_nb (-1)
+ node_del_ab = liftM (`Node.removeSec` inst_ab) node_add_ab
+ node_del_nb = liftM (`Node.removeSec` inst_nb) node_add_nb
+ in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
+ (Types.OpGood a_ab, Types.OpGood a_nb,
+ Types.OpGood d_ab, Types.OpGood d_nb) ->
+ printTestCase "Consistency checks failed" $
+ Node.rMem a_ab > orig_rmem &&
+ Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
+ Node.rMem a_nb == orig_rmem &&
+ Node.rMem d_ab == orig_rmem &&
+ Node.rMem d_nb == orig_rmem &&
+ -- this is not related to rMem, but as good a place to
+ -- test as any
+ inst_idx `elem` Node.sList a_ab &&
+ not (inst_idx `elem` Node.sList d_ab)
+ x -> printTestCase ("Failed to add/remove instances: " ++ show x) False
-- | Check mdsk setting.
prop_Node_setMdsk node mx =
- Node.loDsk node' >= 0 &&
- fromIntegral (Node.loDsk node') <= Node.tDsk node &&
- Node.availDisk node' >= 0 &&
- Node.availDisk node' <= Node.fDsk node' &&
- fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
- Node.mDsk node' == mx'
+ Node.loDsk node' >= 0 &&
+ fromIntegral (Node.loDsk node') <= Node.tDsk node &&
+ Node.availDisk node' >= 0 &&
+ Node.availDisk node' <= Node.fDsk node' &&
+ fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
+ Node.mDsk node' == mx'
where _types = (node::Node.Node, mx::SmallRatio)
node' = Node.setMdsk node mx'
SmallRatio mx' = mx
-- Check tag maps
prop_Node_tagMaps_idempotent tags =
- Node.delTags (Node.addTags m tags) tags ==? m
+ Node.delTags (Node.addTags m tags) tags ==? m
where m = Data.Map.empty
prop_Node_tagMaps_reject tags =
- not (null tags) ==>
- all (\t -> Node.rejectAddTags m [t]) tags
+ not (null tags) ==>
+ all (\t -> Node.rejectAddTags m [t]) tags
where m = Node.addTags Data.Map.empty tags
prop_Node_showField node =
(null nodes || not (null ng))
testSuite "Node"
- [ 'prop_Node_setAlias
- , 'prop_Node_setOffline
- , 'prop_Node_setMcpu
- , 'prop_Node_setXmem
- , 'prop_Node_addPriFM
- , 'prop_Node_addPriFD
- , 'prop_Node_addPriFC
- , 'prop_Node_addSec
- , 'prop_Node_addPriOffline
- , 'prop_Node_addSecOffline
- , 'prop_Node_rMem
- , 'prop_Node_setMdsk
- , 'prop_Node_tagMaps_idempotent
- , 'prop_Node_tagMaps_reject
- , 'prop_Node_showField
- , 'prop_Node_computeGroups
- ]
+ [ 'prop_Node_setAlias
+ , 'prop_Node_setOffline
+ , 'prop_Node_setMcpu
+ , 'prop_Node_setXmem
+ , 'prop_Node_addPriFM
+ , 'prop_Node_addPriFD
+ , 'prop_Node_addPriFC
+ , 'prop_Node_addSec
+ , 'prop_Node_addPriOffline
+ , 'prop_Node_addSecOffline
+ , 'prop_Node_rMem
+ , 'prop_Node_setMdsk
+ , 'prop_Node_tagMaps_idempotent
+ , 'prop_Node_tagMaps_reject
+ , 'prop_Node_showField
+ , 'prop_Node_computeGroups
+ ]
-- ** Cluster tests
-- | Check that the cluster score is close to zero for a homogeneous
-- cluster.
prop_Score_Zero node =
- forAll (choose (1, 1024)) $ \count ->
+ forAll (choose (1, 1024)) $ \count ->
(not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
(Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
- let fn = Node.buildPeers node Container.empty
- nlst = replicate count fn
- score = Cluster.compCVNodes nlst
- -- we can't say == 0 here as the floating point errors accumulate;
- -- this should be much lower than the default score in CLI.hs
- in score <= 1e-12
+ let fn = Node.buildPeers node Container.empty
+ nlst = replicate count fn
+ score = Cluster.compCVNodes nlst
+ -- we can't say == 0 here as the floating point errors accumulate;
+ -- this should be much lower than the default score in CLI.hs
+ in score <= 1e-12
-- | Check that cluster stats are sane.
prop_CStats_sane node =
- forAll (choose (1, 1024)) $ \count ->
+ forAll (choose (1, 1024)) $ \count ->
(not (Node.offline node) && not (Node.failN1 node) &&
(Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
- let fn = Node.buildPeers node Container.empty
- nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
- nl = Container.fromList nlst
- cstats = Cluster.totalResources nl
- in Cluster.csAdsk cstats >= 0 &&
- Cluster.csAdsk cstats <= Cluster.csFdsk cstats
+ let fn = Node.buildPeers node Container.empty
+ nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
+ nl = Container.fromList nlst
+ cstats = Cluster.totalResources nl
+ in Cluster.csAdsk cstats >= 0 &&
+ Cluster.csAdsk cstats <= Cluster.csFdsk cstats
-- | Check that one instance is allocated correctly, without
-- rebalances needed.
prop_ClusterAlloc_sane node inst =
- forAll (choose (5, 20)) $ \count ->
- not (Node.offline node)
- && not (Node.failN1 node)
- && Node.availDisk node > 0
- && Node.availMem node > 0
- ==>
- let nl = makeSmallCluster node count
- il = Container.empty
- inst' = setInstanceSmallerThanNode node inst
- in case Cluster.genAllocNodes defGroupList nl 2 True >>=
- Cluster.tryAlloc nl il inst' of
- Types.Bad _ -> False
- Types.Ok as ->
- case Cluster.asSolution as of
- Nothing -> False
- Just (xnl, xi, _, cv) ->
- let il' = Container.add (Instance.idx xi) xi il
- tbl = Cluster.Table xnl il' cv []
- in not (canBalance tbl True True False)
+ forAll (choose (5, 20)) $ \count ->
+ not (Node.offline node)
+ && not (Node.failN1 node)
+ && Node.availDisk node > 0
+ && Node.availMem node > 0
+ ==>
+ let nl = makeSmallCluster node count
+ il = Container.empty
+ inst' = setInstanceSmallerThanNode node inst
+ in case Cluster.genAllocNodes defGroupList nl 2 True >>=
+ Cluster.tryAlloc nl il inst' of
+ Types.Bad _ -> False
+ Types.Ok as ->
+ case Cluster.asSolution as of
+ Nothing -> False
+ Just (xnl, xi, _, cv) ->
+ let il' = Container.add (Instance.idx xi) xi il
+ tbl = Cluster.Table xnl il' cv []
+ in not (canBalance tbl True True False)
-- | Checks that on a 2-5 node cluster, we can allocate a random
-- instance spec via tiered allocation (whatever the original instance
-- spec), on either one or two nodes.
prop_ClusterCanTieredAlloc node inst =
- forAll (choose (2, 5)) $ \count ->
- forAll (choose (1, 2)) $ \rqnodes ->
- not (Node.offline node)
- && not (Node.failN1 node)
- && isNodeBig node 4
- ==>
- let nl = makeSmallCluster node count
- il = Container.empty
- allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
- in case allocnodes >>= \allocnodes' ->
- Cluster.tieredAlloc nl il (Just 1) inst allocnodes' [] [] of
- Types.Bad _ -> False
- Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
- IntMap.size il' == length ixes &&
- length ixes == length cstats
+ forAll (choose (2, 5)) $ \count ->
+ forAll (choose (1, 2)) $ \rqnodes ->
+ not (Node.offline node)
+ && not (Node.failN1 node)
+ && isNodeBig node 4
+ ==>
+ let nl = makeSmallCluster node count
+ il = Container.empty
+ allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
+ in case allocnodes >>= \allocnodes' ->
+ Cluster.tieredAlloc nl il (Just 1) inst allocnodes' [] [] of
+ Types.Bad _ -> False
+ Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
+ IntMap.size il' == length ixes &&
+ length ixes == length cstats
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
-- we can also evacuate it.
prop_ClusterAllocEvac node inst =
- forAll (choose (4, 8)) $ \count ->
- not (Node.offline node)
- && not (Node.failN1 node)
- && isNodeBig node 4
- ==>
- let nl = makeSmallCluster node count
- il = Container.empty
- inst' = setInstanceSmallerThanNode node inst
- in case Cluster.genAllocNodes defGroupList nl 2 True >>=
- Cluster.tryAlloc nl il inst' of
- Types.Bad _ -> False
- Types.Ok as ->
- case Cluster.asSolution as of
- Nothing -> False
- Just (xnl, xi, _, _) ->
- let sdx = Instance.sNode xi
- il' = Container.add (Instance.idx xi) xi il
- in case IAlloc.processRelocate defGroupList xnl il'
- (Instance.idx xi) 1 [sdx] of
- Types.Ok _ -> True
- _ -> False
+ forAll (choose (4, 8)) $ \count ->
+ not (Node.offline node)
+ && not (Node.failN1 node)
+ && isNodeBig node 4
+ ==>
+ let nl = makeSmallCluster node count
+ il = Container.empty
+ inst' = setInstanceSmallerThanNode node inst
+ in case Cluster.genAllocNodes defGroupList nl 2 True >>=
+ Cluster.tryAlloc nl il inst' of
+ Types.Bad _ -> False
+ Types.Ok as ->
+ case Cluster.asSolution as of
+ Nothing -> False
+ Just (xnl, xi, _, _) ->
+ let sdx = Instance.sNode xi
+ il' = Container.add (Instance.idx xi) xi il
+ in case IAlloc.processRelocate defGroupList xnl il'
+ (Instance.idx xi) 1 [sdx] of
+ Types.Ok _ -> True
+ _ -> False
-- | Check that allocating multiple instances on a cluster, then
-- adding an empty node, results in a valid rebalance.
prop_ClusterAllocBalance =
- forAll (genNode (Just 5) (Just 128)) $ \node ->
- forAll (choose (3, 5)) $ \count ->
- not (Node.offline node) && not (Node.failN1 node) ==>
- let nl = makeSmallCluster node count
- (hnode, nl') = IntMap.deleteFindMax nl
- il = Container.empty
- allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
- i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
- in case allocnodes >>= \allocnodes' ->
- Cluster.iterateAlloc nl' il (Just 5) i_templ allocnodes' [] [] of
- Types.Bad _ -> False
- Types.Ok (_, xnl, il', _, _) ->
- let ynl = Container.add (Node.idx hnode) hnode xnl
- cv = Cluster.compCV ynl
- tbl = Cluster.Table ynl il' cv []
- in canBalance tbl True True False
+ forAll (genNode (Just 5) (Just 128)) $ \node ->
+ forAll (choose (3, 5)) $ \count ->
+ not (Node.offline node) && not (Node.failN1 node) ==>
+ let nl = makeSmallCluster node count
+ (hnode, nl') = IntMap.deleteFindMax nl
+ il = Container.empty
+ allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
+ i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
+ in case allocnodes >>= \allocnodes' ->
+ Cluster.iterateAlloc nl' il (Just 5) i_templ allocnodes' [] [] of
+ Types.Bad _ -> False
+ Types.Ok (_, xnl, il', _, _) ->
+ let ynl = Container.add (Node.idx hnode) hnode xnl
+ cv = Cluster.compCV ynl
+ tbl = Cluster.Table ynl il' cv []
+ in canBalance tbl True True False
-- | Checks consistency.
prop_ClusterCheckConsistency node inst =
(Container.elems nl'')) gni
testSuite "Cluster"
- [ 'prop_Score_Zero
- , 'prop_CStats_sane
- , 'prop_ClusterAlloc_sane
- , 'prop_ClusterCanTieredAlloc
- , 'prop_ClusterAllocEvac
- , 'prop_ClusterAllocBalance
- , 'prop_ClusterCheckConsistency
- , 'prop_ClusterSplitCluster
- ]
+ [ 'prop_Score_Zero
+ , 'prop_CStats_sane
+ , 'prop_ClusterAlloc_sane
+ , 'prop_ClusterCanTieredAlloc
+ , 'prop_ClusterAllocEvac
+ , 'prop_ClusterAllocBalance
+ , 'prop_ClusterCheckConsistency
+ , 'prop_ClusterSplitCluster
+ ]
-- ** OpCodes tests
where _types = op::OpCodes.OpCode
testSuite "OpCodes"
- [ 'prop_OpCodes_serialization ]
+ [ 'prop_OpCodes_serialization ]
-- ** Jobs tests
where _types = js::Jobs.JobStatus
testSuite "Jobs"
- [ 'prop_OpStatus_serialization
- , 'prop_JobStatus_serialization
- ]
+ [ 'prop_OpStatus_serialization
+ , 'prop_JobStatus_serialization
+ ]
-- ** Loader tests
prop_Loader_lookupNode ktn inst node =
Loader.lookupNode nl inst node ==? Data.Map.lookup node nl
- where nl = Data.Map.fromList ktn
+ where nl = Data.Map.fromList ktn
prop_Loader_lookupInstance kti inst =
Loader.lookupInstance il inst ==? Data.Map.lookup inst il
- where il = Data.Map.fromList kti
+ where il = Data.Map.fromList kti
prop_Loader_assignIndices nodes =
Data.Map.size nassoc == length nodes &&
(if not (null nodes)
then maximum (IntMap.keys kt) == length nodes - 1
else True)
- where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
+ where (nassoc, kt) =
+ Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
-- | Checks that the number of primary instances recorded on the nodes
-- is zero.
Loader.LookupResult Loader.PartialMatch s1
testSuite "Loader"
- [ 'prop_Loader_lookupNode
- , 'prop_Loader_lookupInstance
- , 'prop_Loader_assignIndices
- , 'prop_Loader_mergeData
- , 'prop_Loader_compareNameComponent_equal
- , 'prop_Loader_compareNameComponent_prefix
- ]
+ [ 'prop_Loader_lookupNode
+ , 'prop_Loader_lookupInstance
+ , 'prop_Loader_assignIndices
+ , 'prop_Loader_mergeData
+ , 'prop_Loader_compareNameComponent_equal
+ , 'prop_Loader_compareNameComponent_prefix
+ ]
-- ** Types tests
prop_Types_AllocPolicy_serialisation apol =
- case J.readJSON (J.showJSON apol) of
- J.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
- p == apol
- J.Error s -> printTestCase ("failed to deserialise: " ++ s) False
- where _types = apol::Types.AllocPolicy
+ case J.readJSON (J.showJSON apol) of
+ J.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
+ p == apol
+ J.Error s -> printTestCase ("failed to deserialise: " ++ s) False
+ where _types = apol::Types.AllocPolicy
prop_Types_DiskTemplate_serialisation dt =
- case J.readJSON (J.showJSON dt) of
- J.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
- p == dt
- J.Error s -> printTestCase ("failed to deserialise: " ++ s)
- False
- where _types = dt::Types.DiskTemplate
+ case J.readJSON (J.showJSON dt) of
+ J.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
+ p == dt
+ J.Error s -> printTestCase ("failed to deserialise: " ++ s)
+ False
+ where _types = dt::Types.DiskTemplate
prop_Types_opToResult op =
- case op of
- Types.OpFail _ -> Types.isBad r
- Types.OpGood v -> case r of
- Types.Bad _ -> False
- Types.Ok v' -> v == v'
- where r = Types.opToResult op
- _types = op::Types.OpResult Int
+ case op of
+ Types.OpFail _ -> Types.isBad r
+ Types.OpGood v -> case r of
+ Types.Bad _ -> False
+ Types.Ok v' -> v == v'
+ where r = Types.opToResult op
+ _types = op::Types.OpResult Int
prop_Types_eitherToResult ei =
- case ei of
- Left _ -> Types.isBad r
- Right v -> case r of
- Types.Bad _ -> False
- Types.Ok v' -> v == v'
+ case ei of
+ Left _ -> Types.isBad r
+ Right v -> case r of
+ Types.Bad _ -> False
+ Types.Ok v' -> v == v'
where r = Types.eitherToResult ei
_types = ei::Either String Int
testSuite "Types"
- [ 'prop_Types_AllocPolicy_serialisation
- , 'prop_Types_DiskTemplate_serialisation
- , 'prop_Types_opToResult
- , 'prop_Types_eitherToResult
- ]
+ [ 'prop_Types_AllocPolicy_serialisation
+ , 'prop_Types_DiskTemplate_serialisation
+ , 'prop_Types_opToResult
+ , 'prop_Types_eitherToResult
+ ]