+{-# LANGUAGE TemplateHaskell #-}
+
{-| Unittests for ganeti-htools.
-}
{-
-Copyright (C) 2009, 2010, 2011 Google Inc.
+Copyright (C) 2009, 2010, 2011, 2012 Google Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
-}
module Ganeti.HTools.QC
- ( testUtils
- , testPeerMap
- , testContainer
- , testInstance
- , testNode
- , testText
- , testOpCodes
- , testJobs
- , testCluster
- , testLoader
- , testTypes
- ) where
+ ( testUtils
+ , testPeerMap
+ , testContainer
+ , testInstance
+ , testNode
+ , testText
+ , testOpCodes
+ , testJobs
+ , testCluster
+ , testLoader
+ , testTypes
+ ) where
import Test.QuickCheck
import Data.List (findIndex, intercalate, nub, isPrefixOf)
import qualified Ganeti.HTools.ExtLoader
import qualified Ganeti.HTools.IAlloc as IAlloc
import qualified Ganeti.HTools.Instance as Instance
+import qualified Ganeti.HTools.JSON as JSON
import qualified Ganeti.HTools.Loader as Loader
import qualified Ganeti.HTools.Luxi
import qualified Ganeti.HTools.Node as Node
import qualified Ganeti.HTools.Version
import qualified Ganeti.Constants as C
-run :: Testable prop => prop -> Args -> IO Result
-run = flip quickCheckWithResult
+import qualified Ganeti.HTools.Program.Hail
+import qualified Ganeti.HTools.Program.Hbal
+import qualified Ganeti.HTools.Program.Hscan
+import qualified Ganeti.HTools.Program.Hspace
+
+import Ganeti.HTools.QCHelper (testSuite)
-- * Constants
maxCpu :: Int
maxCpu = 1024
+-- | Null iPolicy, and by null we mean very liberal.
+nullIPolicy = Types.IPolicy
+ { Types.iPolicyMinSpec = Types.ISpec { Types.iSpecMemorySize = 0
+ , Types.iSpecCpuCount = 0
+ , Types.iSpecDiskSize = 0
+ , Types.iSpecDiskCount = 0
+ , Types.iSpecNicCount = 0
+ }
+ , Types.iPolicyMaxSpec = Types.ISpec { Types.iSpecMemorySize = maxBound
+ , Types.iSpecCpuCount = maxBound
+ , Types.iSpecDiskSize = maxBound
+ , Types.iSpecDiskCount = C.maxDisks
+ , Types.iSpecNicCount = C.maxNics
+ }
+ , Types.iPolicyStdSpec = Types.ISpec { Types.iSpecMemorySize = Types.unitMem
+ , Types.iSpecCpuCount = Types.unitCpu
+ , Types.iSpecDiskSize = Types.unitDsk
+ , Types.iSpecDiskCount = 1
+ , Types.iSpecNicCount = 1
+ }
+ , Types.iPolicyDiskTemplates = [Types.DTDrbd8, Types.DTPlain]
+ }
+
+
defGroup :: Group.Group
defGroup = flip Group.setIdx 0 $
- Group.create "default" Utils.defaultGroupID
- Types.AllocPreferred
+ Group.create "default" Types.defaultGroupID Types.AllocPreferred
+ nullIPolicy
defGroupList :: Group.List
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
isFailure (Types.OpFail _) = True
isFailure _ = False
+-- | Checks for equality with proper annotation.
+(==?) :: (Show a, Eq a) => a -> a -> Property
+(==?) x y = printTestCase
+ ("Expected equality, but '" ++
+ show x ++ "' /= '" ++ show y ++ "'") (x == y)
+infix 3 ==?
+
+-- | Show a message and fail the test.
+failTest :: String -> Property
+failTest msg = printTestCase msg False
+
-- | Update an instance to be smaller than a node.
setInstanceSmallerThanNode node inst =
- inst { Instance.mem = Node.availMem node `div` 2
- , Instance.dsk = Node.availDisk node `div` 2
- , Instance.vcpus = Node.availCpu node `div` 2
- }
+ inst { Instance.mem = Node.availMem node `div` 2
+ , Instance.dsk = Node.availDisk node `div` 2
+ , Instance.vcpus = Node.availCpu node `div` 2
+ }
+
+-- | Check if an instance is smaller than a node.
+isInstanceSmallerThanNode node inst =
+ Instance.mem inst <= Node.availMem node `div` 2 &&
+ Instance.dsk inst <= Node.availDisk node `div` 2 &&
+ Instance.vcpus inst <= Node.availCpu node `div` 2
-- | Create an instance given its spec.
createInstance mem dsk vcpus =
- Instance.create "inst-unnamed" mem dsk vcpus "running" [] True (-1) (-1)
- Types.DTDrbd8
+ Instance.create "inst-unnamed" mem dsk vcpus Types.Running [] True (-1) (-1)
+ Types.DTDrbd8
-- | Create a small cluster by repeating a node spec.
makeSmallCluster :: Node.Node -> Int -> Node.List
makeSmallCluster node count =
- let fn = Node.buildPeers node Container.empty
- namelst = map (\n -> (Node.name n, n)) (replicate count fn)
- (_, nlst) = Loader.assignIndices namelst
- in nlst
+ let origname = Node.name node
+ origalias = Node.alias node
+ nodes = map (\idx -> node { Node.name = origname ++ "-" ++ show idx
+ , Node.alias = origalias ++ "-" ++ show idx })
+ [1..count]
+ fn = flip Node.buildPeers Container.empty
+ namelst = map (\n -> (Node.name n, fn n)) nodes
+ (_, nlst) = Loader.assignIndices namelst
+ in nlst
+
+-- | Make a small cluster, both nodes and instances.
+makeSmallEmptyCluster :: Node.Node -> Int -> Instance.Instance
+ -> (Node.List, Instance.List, Instance.Instance)
+makeSmallEmptyCluster node count inst =
+ (makeSmallCluster node count, Container.empty,
+ setInstanceSmallerThanNode node inst)
-- | Checks if a node is "big" enough.
-isNodeBig :: Node.Node -> Int -> Bool
-isNodeBig node size = Node.availDisk node > size * Types.unitDsk
+isNodeBig :: Int -> Node.Node -> Bool
+isNodeBig size node = Node.availDisk node > size * Types.unitDsk
&& Node.availMem node > size * Types.unitMem
&& Node.availCpu node > size * Types.unitCpu
let pnode = Container.find pdx nl
snode = Container.find sdx nl
maxiidx = if Container.null il
- then 0
- else fst (Container.findMax il) + 1
+ then 0
+ else fst (Container.findMax il) + 1
inst' = inst { Instance.idx = maxiidx,
Instance.pNode = pdx, Instance.sNode = sdx }
pnode' = Node.setPri pnode inst'
newtype DNSChar = DNSChar { dnsGetChar::Char }
instance Arbitrary DNSChar where
- arbitrary = do
- x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
- return (DNSChar x)
+ arbitrary = do
+ x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
+ return (DNSChar x)
getName :: Gen String
getName = do
dn <- vector n::Gen [DNSChar]
return (map dnsGetChar dn)
-
getFQDN :: Gen String
getFQDN = do
felem <- getName
let frest' = map (map dnsGetChar) frest
return (felem ++ "." ++ intercalate "." frest')
+-- | Defines a tag type.
+newtype TagChar = TagChar { tagGetChar :: Char }
+
+-- | All valid tag chars. This doesn't need to match _exactly_
+-- Ganeti's own tag regex, just enough for it to be close.
+tagChar :: [Char]
+tagChar = ['a'..'z'] ++ ['A'..'Z'] ++ ['0'..'9'] ++ ".+*/:@-"
+
+instance Arbitrary TagChar where
+ arbitrary = do
+ c <- elements tagChar
+ return (TagChar c)
+
+-- | Generates a tag
+genTag :: Gen [TagChar]
+genTag = do
+ -- the correct value would be C.maxTagLen, but that's way too
+ -- verbose in unittests, and at the moment I don't see any possible
+ -- bugs with longer tags and the way we use tags in htools
+ n <- choose (1, 10)
+ vector n
+
+-- | Generates a list of tags (correctly upper bounded).
+genTags :: Gen [String]
+genTags = do
+ -- the correct value would be C.maxTagsPerObj, but per the comment
+ -- in genTag, we don't use tags enough in htools to warrant testing
+ -- such big values
+ n <- choose (0, 10::Int)
+ tags <- mapM (const genTag) [1..n]
+ return $ map (map tagGetChar) tags
+
+instance Arbitrary Types.InstanceStatus where
+ arbitrary = elements [minBound..maxBound]
+
-- let's generate a random instance
instance Arbitrary Instance.Instance where
- arbitrary = do
- name <- getFQDN
- mem <- choose (0, maxMem)
- dsk <- choose (0, maxDsk)
- run_st <- elements [ C.inststErrorup
- , C.inststErrordown
- , C.inststAdmindown
- , C.inststNodedown
- , C.inststNodeoffline
- , C.inststRunning
- , "no_such_status1"
- , "no_such_status2"]
- pn <- arbitrary
- sn <- arbitrary
- vcpus <- choose (0, maxCpu)
- return $ Instance.create name mem dsk vcpus run_st [] True pn sn
- Types.DTDrbd8
+ arbitrary = do
+ name <- getFQDN
+ mem <- choose (0, maxMem)
+ dsk <- choose (0, maxDsk)
+ run_st <- arbitrary
+ pn <- arbitrary
+ sn <- arbitrary
+ vcpus <- choose (0, maxCpu)
+ return $ Instance.create name mem dsk vcpus run_st [] True pn sn
+ Types.DTDrbd8
-- | Generas an arbitrary node based on sizing information.
genNode :: Maybe Int -- ^ Minimum node size in terms of units
-> Gen Node.Node
genNode min_multiplier max_multiplier = do
let (base_mem, base_dsk, base_cpu) =
- case min_multiplier of
- Just mm -> (mm * Types.unitMem,
- mm * Types.unitDsk,
- mm * Types.unitCpu)
- Nothing -> (0, 0, 0)
+ case min_multiplier of
+ Just mm -> (mm * Types.unitMem,
+ mm * Types.unitDsk,
+ mm * Types.unitCpu)
+ Nothing -> (0, 0, 0)
(top_mem, top_dsk, top_cpu) =
- case max_multiplier of
- Just mm -> (mm * Types.unitMem,
- mm * Types.unitDsk,
- mm * Types.unitCpu)
- Nothing -> (maxMem, maxDsk, maxCpu)
+ case max_multiplier of
+ Just mm -> (mm * Types.unitMem,
+ mm * Types.unitDsk,
+ mm * Types.unitCpu)
+ Nothing -> (maxMem, maxDsk, maxCpu)
name <- getFQDN
mem_t <- choose (base_mem, top_mem)
mem_f <- choose (base_mem, mem_t)
offl <- arbitrary
let n = Node.create name (fromIntegral mem_t) mem_n mem_f
(fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl 0
- return $ Node.buildPeers n Container.empty
+ n' = Node.setPolicy nullIPolicy n
+ return $ Node.buildPeers n' Container.empty
+
+-- | Helper function to generate a sane node.
+genOnlineNode :: Gen Node.Node
+genOnlineNode = do
+ arbitrary `suchThat` (\n -> not (Node.offline n) &&
+ not (Node.failN1 n) &&
+ Node.availDisk n > 0 &&
+ Node.availMem n > 0 &&
+ Node.availCpu n > 0)
-- and a random node
instance Arbitrary Node.Node where
- arbitrary = genNode Nothing Nothing
+ arbitrary = genNode Nothing Nothing
-- replace disks
instance Arbitrary OpCodes.ReplaceDisksMode where
- arbitrary = elements [ OpCodes.ReplaceOnPrimary
- , OpCodes.ReplaceOnSecondary
- , OpCodes.ReplaceNewSecondary
- , OpCodes.ReplaceAuto
- ]
+ arbitrary = elements [minBound..maxBound]
instance Arbitrary OpCodes.OpCode where
arbitrary = do
, "OP_INSTANCE_FAILOVER"
, "OP_INSTANCE_MIGRATE"
]
- (case op_id of
- "OP_TEST_DELAY" ->
- liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
- "OP_INSTANCE_REPLACE_DISKS" ->
- liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
+ case op_id of
+ "OP_TEST_DELAY" ->
+ liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
+ "OP_INSTANCE_REPLACE_DISKS" ->
+ liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
arbitrary arbitrary arbitrary
- "OP_INSTANCE_FAILOVER" ->
- liftM2 OpCodes.OpInstanceFailover arbitrary arbitrary
- "OP_INSTANCE_MIGRATE" ->
- liftM4 OpCodes.OpInstanceMigrate arbitrary arbitrary arbitrary
+ "OP_INSTANCE_FAILOVER" ->
+ liftM3 OpCodes.OpInstanceFailover arbitrary arbitrary
arbitrary
- _ -> fail "Wrong opcode")
+ "OP_INSTANCE_MIGRATE" ->
+ liftM5 OpCodes.OpInstanceMigrate arbitrary arbitrary
+ arbitrary arbitrary arbitrary
+ _ -> fail "Wrong opcode"
instance Arbitrary Jobs.OpStatus where
arbitrary = elements [minBound..maxBound]
newtype SmallRatio = SmallRatio Double deriving Show
instance Arbitrary SmallRatio where
- arbitrary = do
- v <- choose (0, 1)
- return $ SmallRatio v
+ arbitrary = do
+ v <- choose (0, 1)
+ return $ SmallRatio v
instance Arbitrary Types.AllocPolicy where
arbitrary = elements [minBound..maxBound]
instance Arbitrary Types.DiskTemplate where
arbitrary = elements [minBound..maxBound]
+instance Arbitrary Types.FailMode where
+ arbitrary = elements [minBound..maxBound]
+
+instance Arbitrary Types.EvacMode where
+ arbitrary = elements [minBound..maxBound]
+
+instance Arbitrary a => Arbitrary (Types.OpResult a) where
+ arbitrary = arbitrary >>= \c ->
+ if c
+ then liftM Types.OpGood arbitrary
+ else liftM Types.OpFail arbitrary
+
+instance Arbitrary Types.ISpec where
+ arbitrary = do
+ mem <- arbitrary::Gen (NonNegative Int)
+ dsk_c <- arbitrary::Gen (NonNegative Int)
+ dsk_s <- arbitrary::Gen (NonNegative Int)
+ cpu <- arbitrary::Gen (NonNegative Int)
+ nic <- arbitrary::Gen (NonNegative Int)
+ return Types.ISpec { Types.iSpecMemorySize = fromIntegral mem
+ , Types.iSpecCpuCount = fromIntegral cpu
+ , Types.iSpecDiskSize = fromIntegral dsk_s
+ , Types.iSpecDiskCount = fromIntegral dsk_c
+ , Types.iSpecNicCount = fromIntegral nic
+ }
+
+-- | Helper function to check whether a spec is LTE than another
+iSpecSmaller :: Types.ISpec -> Types.ISpec -> Bool
+iSpecSmaller imin imax =
+ Types.iSpecMemorySize imin <= Types.iSpecMemorySize imax &&
+ Types.iSpecCpuCount imin <= Types.iSpecCpuCount imax &&
+ Types.iSpecDiskSize imin <= Types.iSpecDiskSize imax &&
+ Types.iSpecDiskCount imin <= Types.iSpecDiskCount imax &&
+ Types.iSpecNicCount imin <= Types.iSpecNicCount imax
+
+instance Arbitrary Types.IPolicy where
+ arbitrary = do
+ imin <- arbitrary
+ istd <- arbitrary `suchThat` (iSpecSmaller imin)
+ imax <- arbitrary `suchThat` (iSpecSmaller istd)
+ dts <- arbitrary
+ return Types.IPolicy { Types.iPolicyMinSpec = imin
+ , Types.iPolicyStdSpec = istd
+ , Types.iPolicyMaxSpec = imax
+ , Types.iPolicyDiskTemplates = dts
+ }
+
-- * Actual tests
-- ** Utils tests
-- | If the list is not just an empty element, and if the elements do
-- not contain commas, then join+split should be idempotent.
prop_Utils_commaJoinSplit =
- forAll (arbitrary `suchThat`
- (\l -> l /= [""] && all (not . elem ',') l )) $ \lst ->
- Utils.sepSplit ',' (Utils.commaJoin lst) == lst
+ forAll (arbitrary `suchThat`
+ (\l -> l /= [""] && all (notElem ',') l )) $ \lst ->
+ Utils.sepSplit ',' (Utils.commaJoin lst) ==? lst
-- | Split and join should always be idempotent.
-prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
+prop_Utils_commaSplitJoin s =
+ Utils.commaJoin (Utils.sepSplit ',' s) ==? s
-- | fromObjWithDefault, we test using the Maybe monad and an integer
-- value.
prop_Utils_fromObjWithDefault def_value random_key =
- -- a missing key will be returned with the default
- Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
- -- a found key will be returned as is, not with default
- Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
- random_key (def_value+1) == Just def_value
- where _types = def_value :: Integer
+ -- a missing key will be returned with the default
+ JSON.fromObjWithDefault [] random_key def_value == Just def_value &&
+ -- a found key will be returned as is, not with default
+ JSON.fromObjWithDefault [(random_key, J.showJSON def_value)]
+ random_key (def_value+1) == Just def_value
+ where _types = def_value :: Integer
+
+-- | Test that functional if' behaves like the syntactic sugar if.
+prop_Utils_if'if :: Bool -> Int -> Int -> Gen Prop
+prop_Utils_if'if cnd a b =
+ Utils.if' cnd a b ==? if cnd then a else b
+
+-- | Test basic select functionality
+prop_Utils_select :: Int -- ^ Default result
+ -> [Int] -- ^ List of False values
+ -> [Int] -- ^ List of True values
+ -> Gen Prop -- ^ Test result
+prop_Utils_select def lst1 lst2 =
+ Utils.select def (flist ++ tlist) ==? expectedresult
+ where expectedresult = Utils.if' (null lst2) def (head lst2)
+ flist = zip (repeat False) lst1
+ tlist = zip (repeat True) lst2
+
+-- | Test basic select functionality with undefined default
+prop_Utils_select_undefd :: [Int] -- ^ List of False values
+ -> NonEmptyList Int -- ^ List of True values
+ -> Gen Prop -- ^ Test result
+prop_Utils_select_undefd lst1 (NonEmpty lst2) =
+ Utils.select undefined (flist ++ tlist) ==? head lst2
+ where flist = zip (repeat False) lst1
+ tlist = zip (repeat True) lst2
+
+-- | Test basic select functionality with undefined list values
+prop_Utils_select_undefv :: [Int] -- ^ List of False values
+ -> NonEmptyList Int -- ^ List of True values
+ -> Gen Prop -- ^ Test result
+prop_Utils_select_undefv lst1 (NonEmpty lst2) =
+ Utils.select undefined cndlist ==? head lst2
+ where flist = zip (repeat False) lst1
+ tlist = zip (repeat True) lst2
+ cndlist = flist ++ tlist ++ [undefined]
+
+prop_Utils_parseUnit (NonNegative n) =
+ Utils.parseUnit (show n) == Types.Ok n &&
+ Utils.parseUnit (show n ++ "m") == Types.Ok n &&
+ (case Utils.parseUnit (show n ++ "M") of
+ Types.Ok m -> if n > 0
+ then m < n -- for positive values, X MB is < than X MiB
+ else m == 0 -- but for 0, 0 MB == 0 MiB
+ Types.Bad _ -> False) &&
+ Utils.parseUnit (show n ++ "g") == Types.Ok (n*1024) &&
+ Utils.parseUnit (show n ++ "t") == Types.Ok (n*1048576) &&
+ Types.isBad (Utils.parseUnit (show n ++ "x")::Types.Result Int)
+ where _types = n::Int
-- | Test list for the Utils module.
-testUtils =
- [ run prop_Utils_commaJoinSplit
- , run prop_Utils_commaSplitJoin
- , run prop_Utils_fromObjWithDefault
- ]
+testSuite "Utils"
+ [ 'prop_Utils_commaJoinSplit
+ , 'prop_Utils_commaSplitJoin
+ , 'prop_Utils_fromObjWithDefault
+ , 'prop_Utils_if'if
+ , 'prop_Utils_select
+ , 'prop_Utils_select_undefd
+ , 'prop_Utils_select_undefv
+ , 'prop_Utils_parseUnit
+ ]
-- ** PeerMap tests
-- | Make sure add is idempotent.
prop_PeerMap_addIdempotent pmap key em =
- fn puniq == fn (fn puniq)
+ fn puniq ==? fn (fn puniq)
where _types = (pmap::PeerMap.PeerMap,
key::PeerMap.Key, em::PeerMap.Elem)
fn = PeerMap.add key em
-- | Make sure remove is idempotent.
prop_PeerMap_removeIdempotent pmap key =
- fn puniq == fn (fn puniq)
+ fn puniq ==? fn (fn puniq)
where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
fn = PeerMap.remove key
puniq = PeerMap.accumArray const pmap
-- | Make sure a missing item returns 0.
prop_PeerMap_findMissing pmap key =
- PeerMap.find key (PeerMap.remove key puniq) == 0
+ PeerMap.find key (PeerMap.remove key puniq) ==? 0
where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
puniq = PeerMap.accumArray const pmap
-- | Make sure an added item is found.
prop_PeerMap_addFind pmap key em =
- PeerMap.find key (PeerMap.add key em puniq) == em
+ PeerMap.find key (PeerMap.add key em puniq) ==? em
where _types = (pmap::PeerMap.PeerMap,
key::PeerMap.Key, em::PeerMap.Elem)
puniq = PeerMap.accumArray const pmap
-- | Manual check that maxElem returns the maximum indeed, or 0 for null.
prop_PeerMap_maxElem pmap =
- PeerMap.maxElem puniq == if null puniq then 0
- else (maximum . snd . unzip) puniq
+ PeerMap.maxElem puniq ==? if null puniq then 0
+ else (maximum . snd . unzip) puniq
where _types = pmap::PeerMap.PeerMap
puniq = PeerMap.accumArray const pmap
-- | List of tests for the PeerMap module.
-testPeerMap =
- [ run prop_PeerMap_addIdempotent
- , run prop_PeerMap_removeIdempotent
- , run prop_PeerMap_maxElem
- , run prop_PeerMap_addFind
- , run prop_PeerMap_findMissing
- ]
+testSuite "PeerMap"
+ [ 'prop_PeerMap_addIdempotent
+ , 'prop_PeerMap_removeIdempotent
+ , 'prop_PeerMap_maxElem
+ , 'prop_PeerMap_addFind
+ , 'prop_PeerMap_findMissing
+ ]
-- ** Container tests
+-- we silence the following due to hlint bug fixed in later versions
+{-# ANN prop_Container_addTwo "HLint: ignore Avoid lambda" #-}
prop_Container_addTwo cdata i1 i2 =
- fn i1 i2 cont == fn i2 i1 cont &&
- fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
+ fn i1 i2 cont == fn i2 i1 cont &&
+ fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
where _types = (cdata::[Int],
i1::Int, i2::Int)
cont = foldl (\c x -> Container.add x x c) Container.empty cdata
prop_Container_nameOf node =
let nl = makeSmallCluster node 1
fnode = head (Container.elems nl)
- in Container.nameOf nl (Node.idx fnode) == Node.name fnode
+ in Container.nameOf nl (Node.idx fnode) ==? Node.name fnode
-- | We test that in a cluster, given a random node, we can find it by
-- its name and alias, as long as all names and aliases are unique,
forAll (vector cnt) $ \ names ->
(length . nub) (map fst names ++ map snd names) ==
length names * 2 &&
- not (othername `elem` (map fst names ++ map snd names)) ==>
+ othername `notElem` (map fst names ++ map snd names) ==>
let nl = makeSmallCluster node cnt
nodes = Container.elems nl
nodes' = map (\((name, alias), nn) -> (Node.idx nn,
target = snd (nodes' !! fidx)
in Container.findByName nl' (Node.name target) == Just target &&
Container.findByName nl' (Node.alias target) == Just target &&
- Container.findByName nl' othername == Nothing
+ isNothing (Container.findByName nl' othername)
-testContainer =
- [ run prop_Container_addTwo
- , run prop_Container_nameOf
- , run prop_Container_findByName
- ]
+testSuite "Container"
+ [ 'prop_Container_addTwo
+ , 'prop_Container_nameOf
+ , 'prop_Container_findByName
+ ]
-- ** Instance tests
-- Simple instance tests, we only have setter/getters
prop_Instance_creat inst =
- Instance.name inst == Instance.alias inst
+ Instance.name inst ==? Instance.alias inst
prop_Instance_setIdx inst idx =
- Instance.idx (Instance.setIdx inst idx) == idx
+ Instance.idx (Instance.setIdx inst idx) ==? idx
where _types = (inst::Instance.Instance, idx::Types.Idx)
prop_Instance_setName inst name =
- Instance.name newinst == name &&
- Instance.alias newinst == name
+ Instance.name newinst == name &&
+ Instance.alias newinst == name
where _types = (inst::Instance.Instance, name::String)
newinst = Instance.setName inst name
prop_Instance_setAlias inst name =
- Instance.name newinst == Instance.name inst &&
- Instance.alias newinst == name
+ Instance.name newinst == Instance.name inst &&
+ Instance.alias newinst == name
where _types = (inst::Instance.Instance, name::String)
newinst = Instance.setAlias inst name
prop_Instance_setPri inst pdx =
- Instance.pNode (Instance.setPri inst pdx) == pdx
+ Instance.pNode (Instance.setPri inst pdx) ==? pdx
where _types = (inst::Instance.Instance, pdx::Types.Ndx)
prop_Instance_setSec inst sdx =
- Instance.sNode (Instance.setSec inst sdx) == sdx
+ Instance.sNode (Instance.setSec inst sdx) ==? sdx
where _types = (inst::Instance.Instance, sdx::Types.Ndx)
prop_Instance_setBoth inst pdx sdx =
- Instance.pNode si == pdx && Instance.sNode si == sdx
+ Instance.pNode si == pdx && Instance.sNode si == sdx
where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
si = Instance.setBoth inst pdx sdx
-prop_Instance_runStatus_True =
- forAll (arbitrary `suchThat`
- ((`elem` Instance.runningStates) . Instance.runSt))
- Instance.running
-
-prop_Instance_runStatus_False inst =
- let run_st = Instance.running inst
- run_tx = Instance.runSt inst
- in
- run_tx `notElem` Instance.runningStates ==> not run_st
-
prop_Instance_shrinkMG inst =
- Instance.mem inst >= 2 * Types.unitMem ==>
- case Instance.shrinkByType inst Types.FailMem of
- Types.Ok inst' ->
- Instance.mem inst' == Instance.mem inst - Types.unitMem
- _ -> False
+ Instance.mem inst >= 2 * Types.unitMem ==>
+ case Instance.shrinkByType inst Types.FailMem of
+ Types.Ok inst' -> Instance.mem inst' == Instance.mem inst - Types.unitMem
+ _ -> False
prop_Instance_shrinkMF inst =
- forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
+ forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
let inst' = inst { Instance.mem = mem}
in Types.isBad $ Instance.shrinkByType inst' Types.FailMem
prop_Instance_shrinkCG inst =
- Instance.vcpus inst >= 2 * Types.unitCpu ==>
- case Instance.shrinkByType inst Types.FailCPU of
- Types.Ok inst' ->
- Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
- _ -> False
+ Instance.vcpus inst >= 2 * Types.unitCpu ==>
+ case Instance.shrinkByType inst Types.FailCPU of
+ Types.Ok inst' ->
+ Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
+ _ -> False
prop_Instance_shrinkCF inst =
- forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
+ forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
let inst' = inst { Instance.vcpus = vcpus }
in Types.isBad $ Instance.shrinkByType inst' Types.FailCPU
prop_Instance_shrinkDG inst =
- Instance.dsk inst >= 2 * Types.unitDsk ==>
- case Instance.shrinkByType inst Types.FailDisk of
- Types.Ok inst' ->
- Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
- _ -> False
+ Instance.dsk inst >= 2 * Types.unitDsk ==>
+ case Instance.shrinkByType inst Types.FailDisk of
+ Types.Ok inst' ->
+ Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
+ _ -> False
prop_Instance_shrinkDF inst =
- forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
+ forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
let inst' = inst { Instance.dsk = dsk }
in Types.isBad $ Instance.shrinkByType inst' Types.FailDisk
prop_Instance_setMovable inst m =
- Instance.movable inst' == m
+ Instance.movable inst' ==? m
where inst' = Instance.setMovable inst m
-testInstance =
- [ run prop_Instance_creat
- , run prop_Instance_setIdx
- , run prop_Instance_setName
- , run prop_Instance_setAlias
- , run prop_Instance_setPri
- , run prop_Instance_setSec
- , run prop_Instance_setBoth
- , run prop_Instance_runStatus_True
- , run prop_Instance_runStatus_False
- , run prop_Instance_shrinkMG
- , run prop_Instance_shrinkMF
- , run prop_Instance_shrinkCG
- , run prop_Instance_shrinkCF
- , run prop_Instance_shrinkDG
- , run prop_Instance_shrinkDF
- , run prop_Instance_setMovable
- ]
+testSuite "Instance"
+ [ 'prop_Instance_creat
+ , 'prop_Instance_setIdx
+ , 'prop_Instance_setName
+ , 'prop_Instance_setAlias
+ , 'prop_Instance_setPri
+ , 'prop_Instance_setSec
+ , 'prop_Instance_setBoth
+ , 'prop_Instance_shrinkMG
+ , 'prop_Instance_shrinkMF
+ , 'prop_Instance_shrinkCG
+ , 'prop_Instance_shrinkCF
+ , 'prop_Instance_shrinkDG
+ , 'prop_Instance_shrinkDF
+ , 'prop_Instance_setMovable
+ ]
-- ** Text backend tests
prop_Text_Load_Instance name mem dsk vcpus status
(NonEmpty pnode) snode
(NonNegative pdx) (NonNegative sdx) autobal dt =
- pnode /= snode && pdx /= sdx ==>
- let vcpus_s = show vcpus
- dsk_s = show dsk
- mem_s = show mem
- ndx = if null snode
+ pnode /= snode && pdx /= sdx ==>
+ let vcpus_s = show vcpus
+ dsk_s = show dsk
+ mem_s = show mem
+ status_s = Types.instanceStatusToRaw status
+ ndx = if null snode
then [(pnode, pdx)]
else [(pnode, pdx), (snode, sdx)]
- nl = Data.Map.fromList ndx
- tags = ""
- sbal = if autobal then "Y" else "N"
- sdt = Types.dtToString dt
- inst = Text.loadInst nl
- [name, mem_s, dsk_s, vcpus_s, status,
- sbal, pnode, snode, sdt, tags]
- fail1 = Text.loadInst nl
- [name, mem_s, dsk_s, vcpus_s, status,
- sbal, pnode, pnode, tags]
- _types = ( name::String, mem::Int, dsk::Int
- , vcpus::Int, status::String
- , snode::String
- , autobal::Bool)
- in
- case inst of
- Types.Bad msg -> printTestCase ("Failed to load instance: " ++ msg)
- False
- Types.Ok (_, i) -> printTestCase ("Mismatch in some field while\
- \ loading the instance") $
- Instance.name i == name &&
- Instance.vcpus i == vcpus &&
- Instance.mem i == mem &&
- Instance.pNode i == pdx &&
- Instance.sNode i == (if null snode
- then Node.noSecondary
- else sdx) &&
- Instance.autoBalance i == autobal &&
- Types.isBad fail1
+ nl = Data.Map.fromList ndx
+ tags = ""
+ sbal = if autobal then "Y" else "N"
+ sdt = Types.diskTemplateToRaw dt
+ inst = Text.loadInst nl
+ [name, mem_s, dsk_s, vcpus_s, status_s,
+ sbal, pnode, snode, sdt, tags]
+ fail1 = Text.loadInst nl
+ [name, mem_s, dsk_s, vcpus_s, status_s,
+ sbal, pnode, pnode, tags]
+ _types = ( name::String, mem::Int, dsk::Int
+ , vcpus::Int, status::Types.InstanceStatus
+ , snode::String
+ , autobal::Bool)
+ in case inst of
+ Types.Bad msg -> failTest $ "Failed to load instance: " ++ msg
+ Types.Ok (_, i) -> printTestCase "Mismatch in some field while\
+ \ loading the instance" $
+ Instance.name i == name &&
+ Instance.vcpus i == vcpus &&
+ Instance.mem i == mem &&
+ Instance.pNode i == pdx &&
+ Instance.sNode i == (if null snode
+ then Node.noSecondary
+ else sdx) &&
+ Instance.autoBalance i == autobal &&
+ Types.isBad fail1
prop_Text_Load_InstanceFail ktn fields =
- length fields /= 10 ==>
+ length fields /= 10 ==>
case Text.loadInst nl fields of
- Types.Ok _ -> printTestCase "Managed to load instance from invalid\
- \ data" False
+ Types.Ok _ -> failTest "Managed to load instance from invalid data"
Types.Bad msg -> printTestCase ("Unrecognised error message: " ++ msg) $
"Invalid/incomplete instance data: '" `isPrefixOf` msg
where nl = Data.Map.fromList ktn
prop_Text_Load_Node name tm nm fm td fd tc fo =
- let conv v = if v < 0
- then "?"
- else show v
- tm_s = conv tm
- nm_s = conv nm
- fm_s = conv fm
- td_s = conv td
- fd_s = conv fd
- tc_s = conv tc
- fo_s = if fo
+ let conv v = if v < 0
+ then "?"
+ else show v
+ tm_s = conv tm
+ nm_s = conv nm
+ fm_s = conv fm
+ td_s = conv td
+ fd_s = conv fd
+ tc_s = conv tc
+ fo_s = if fo
then "Y"
else "N"
- any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
- gid = Group.uuid defGroup
- in case Text.loadNode defGroupAssoc
- [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
- Nothing -> False
- Just (name', node) ->
- if fo || any_broken
- then Node.offline node
- else Node.name node == name' && name' == name &&
- Node.alias node == name &&
- Node.tMem node == fromIntegral tm &&
- Node.nMem node == nm &&
- Node.fMem node == fm &&
- Node.tDsk node == fromIntegral td &&
- Node.fDsk node == fd &&
- Node.tCpu node == fromIntegral tc
+ any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
+ gid = Group.uuid defGroup
+ in case Text.loadNode defGroupAssoc
+ [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
+ Nothing -> False
+ Just (name', node) ->
+ if fo || any_broken
+ then Node.offline node
+ else Node.name node == name' && name' == name &&
+ Node.alias node == name &&
+ Node.tMem node == fromIntegral tm &&
+ Node.nMem node == nm &&
+ Node.fMem node == fm &&
+ Node.tDsk node == fromIntegral td &&
+ Node.fDsk node == fd &&
+ Node.tCpu node == fromIntegral tc
prop_Text_Load_NodeFail fields =
- length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
+ length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
prop_Text_NodeLSIdempotent node =
- (Text.loadNode defGroupAssoc.
- Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
- Just (Node.name n, n)
+ (Text.loadNode defGroupAssoc.
+ Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
+ Just (Node.name n, n)
-- override failN1 to what loadNode returns by default
- where n = node { Node.failN1 = True, Node.offline = False }
-
-testText =
- [ run prop_Text_Load_Instance
- , run prop_Text_Load_InstanceFail
- , run prop_Text_Load_Node
- , run prop_Text_Load_NodeFail
- , run prop_Text_NodeLSIdempotent
- ]
+ where n = node { Node.failN1 = True, Node.offline = False
+ , Node.iPolicy = Types.defIPolicy }
+
+prop_Text_ISpecIdempotent ispec =
+ case Text.loadISpec "dummy" . Utils.sepSplit ',' .
+ Text.serializeISpec $ ispec of
+ Types.Bad msg -> failTest $ "Failed to load ispec: " ++ msg
+ Types.Ok ispec' -> ispec ==? ispec'
+
+prop_Text_IPolicyIdempotent ipol =
+ case Text.loadIPolicy . Utils.sepSplit '|' $
+ Text.serializeIPolicy owner ipol of
+ Types.Bad msg -> failTest $ "Failed to load ispec: " ++ msg
+ Types.Ok res -> (owner, ipol) ==? res
+ where owner = "dummy"
+
+-- | This property, while being in the text tests, does more than just
+-- test end-to-end the serialisation and loading back workflow; it
+-- also tests the Loader.mergeData and the actuall
+-- Cluster.iterateAlloc (for well-behaving w.r.t. instance
+-- allocations, not for the business logic). As such, it's a quite
+-- complex and slow test, and that's the reason we restrict it to
+-- small cluster sizes.
+prop_Text_CreateSerialise =
+ forAll genTags $ \ctags ->
+ forAll (choose (1, 2)) $ \reqnodes ->
+ forAll (choose (1, 20)) $ \maxiter ->
+ forAll (choose (2, 10)) $ \count ->
+ forAll genOnlineNode $ \node ->
+ forAll (arbitrary `suchThat` isInstanceSmallerThanNode node) $ \inst ->
+ let inst' = Instance.setMovable inst $ Utils.if' (reqnodes == 2) True False
+ nl = makeSmallCluster node count
+ in case Cluster.genAllocNodes defGroupList nl reqnodes True >>= \allocn ->
+ Cluster.iterateAlloc nl Container.empty (Just maxiter) inst' allocn [] []
+ of
+ Types.Bad msg -> failTest $ "Failed to allocate: " ++ msg
+ Types.Ok (_, _, _, [], _) -> printTestCase
+ "Failed to allocate: no allocations" False
+ Types.Ok (_, nl', il', _, _) ->
+ let cdata = Loader.ClusterData defGroupList nl' il' ctags
+ Types.defIPolicy
+ saved = Text.serializeCluster cdata
+ in case Text.parseData saved >>= Loader.mergeData [] [] [] [] of
+ Types.Bad msg -> failTest $ "Failed to load/merge: " ++ msg
+ Types.Ok (Loader.ClusterData gl2 nl2 il2 ctags2 cpol2) ->
+ ctags ==? ctags2 .&&.
+ Types.defIPolicy ==? cpol2 .&&.
+ il' ==? il2 .&&.
+ defGroupList ==? gl2 .&&.
+ nl' ==? nl2
+
+testSuite "Text"
+ [ 'prop_Text_Load_Instance
+ , 'prop_Text_Load_InstanceFail
+ , 'prop_Text_Load_Node
+ , 'prop_Text_Load_NodeFail
+ , 'prop_Text_NodeLSIdempotent
+ , 'prop_Text_ISpecIdempotent
+ , 'prop_Text_IPolicyIdempotent
+ , 'prop_Text_CreateSerialise
+ ]
-- ** Node tests
prop_Node_setAlias node name =
- Node.name newnode == Node.name node &&
- Node.alias newnode == name
+ Node.name newnode == Node.name node &&
+ Node.alias newnode == name
where _types = (node::Node.Node, name::String)
newnode = Node.setAlias node name
prop_Node_setOffline node status =
- Node.offline newnode == status
+ Node.offline newnode ==? status
where newnode = Node.setOffline node status
prop_Node_setXmem node xm =
- Node.xMem newnode == xm
+ Node.xMem newnode ==? xm
where newnode = Node.setXmem node xm
prop_Node_setMcpu node mc =
- Node.mCpu newnode == mc
+ Node.mCpu newnode ==? mc
where newnode = Node.setMcpu node mc
-- | Check that an instance add with too high memory or disk will be
-- rejected.
-prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
- not (Node.failN1 node)
- ==>
- case Node.addPri node inst'' of
- Types.OpFail Types.FailMem -> True
- _ -> False
- where _types = (node::Node.Node, inst::Instance.Instance)
- inst' = setInstanceSmallerThanNode node inst
- inst'' = inst' { Instance.mem = Instance.mem inst }
-
-prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
- not (Node.failN1 node)
- ==>
- case Node.addPri node inst'' of
- Types.OpFail Types.FailDisk -> True
- _ -> False
+prop_Node_addPriFM node inst =
+ Instance.mem inst >= Node.fMem node && not (Node.failN1 node) &&
+ not (Instance.instanceOffline inst) ==>
+ case Node.addPri node inst'' of
+ Types.OpFail Types.FailMem -> True
+ _ -> False
+ where _types = (node::Node.Node, inst::Instance.Instance)
+ inst' = setInstanceSmallerThanNode node inst
+ inst'' = inst' { Instance.mem = Instance.mem inst }
+
+prop_Node_addPriFD node inst =
+ Instance.dsk inst >= Node.fDsk node && not (Node.failN1 node) ==>
+ case Node.addPri node inst'' of
+ Types.OpFail Types.FailDisk -> True
+ _ -> False
where _types = (node::Node.Node, inst::Instance.Instance)
inst' = setInstanceSmallerThanNode node inst
inst'' = inst' { Instance.dsk = Instance.dsk inst }
prop_Node_addPriFC node inst (Positive extra) =
- not (Node.failN1 node) ==>
- case Node.addPri node inst'' of
- Types.OpFail Types.FailCPU -> True
- _ -> False
+ not (Node.failN1 node) && not (Instance.instanceOffline inst) ==>
+ case Node.addPri node inst'' of
+ Types.OpFail Types.FailCPU -> True
+ _ -> False
where _types = (node::Node.Node, inst::Instance.Instance)
inst' = setInstanceSmallerThanNode node inst
inst'' = inst' { Instance.vcpus = Node.availCpu node + extra }
-- | Check that an instance add with too high memory or disk will be
-- rejected.
prop_Node_addSec node inst pdx =
- (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
- Instance.dsk inst >= Node.fDsk node) &&
- not (Node.failN1 node)
- ==> isFailure (Node.addSec node inst pdx)
+ ((Instance.mem inst >= (Node.fMem node - Node.rMem node) &&
+ not (Instance.instanceOffline inst)) ||
+ Instance.dsk inst >= Node.fDsk node) &&
+ not (Node.failN1 node) ==>
+ isFailure (Node.addSec node inst pdx)
where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
+-- | Check that an offline instance with reasonable disk size can always
+-- be added.
+prop_Node_addPriOffline =
+ forAll (arbitrary `suchThat` ((> 0) . Node.fMem)) $ \node ->
+ forAll (arbitrary `suchThat`
+ (\ x -> (Instance.dsk x < Node.fDsk node) &&
+ Instance.instanceOffline x)) $ \inst ->
+ case Node.addPri node inst of
+ Types.OpGood _ -> True
+ _ -> False
+
+prop_Node_addSecOffline pdx =
+ forAll (arbitrary `suchThat` ((> 0) . Node.fMem)) $ \node ->
+ forAll (arbitrary `suchThat`
+ (\ x -> (Instance.dsk x < Node.fDsk node) &&
+ Instance.instanceOffline x)) $ \inst ->
+ case Node.addSec node inst pdx of
+ Types.OpGood _ -> True
+ _ -> False
+
-- | Checks for memory reservation changes.
prop_Node_rMem inst =
- forAll (arbitrary `suchThat` ((> 0) . Node.fMem)) $ \node ->
- -- ab = auto_balance, nb = non-auto_balance
- -- we use -1 as the primary node of the instance
- let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True }
- inst_ab = setInstanceSmallerThanNode node inst'
- inst_nb = inst_ab { Instance.autoBalance = False }
- -- now we have the two instances, identical except the
- -- autoBalance attribute
- orig_rmem = Node.rMem node
- inst_idx = Instance.idx inst_ab
- node_add_ab = Node.addSec node inst_ab (-1)
- node_add_nb = Node.addSec node inst_nb (-1)
- node_del_ab = liftM (flip Node.removeSec inst_ab) node_add_ab
- node_del_nb = liftM (flip Node.removeSec inst_nb) node_add_nb
- in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
- (Types.OpGood a_ab, Types.OpGood a_nb,
- Types.OpGood d_ab, Types.OpGood d_nb) ->
- printTestCase "Consistency checks failed" $
- Node.rMem a_ab > orig_rmem &&
- Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
- Node.rMem a_nb == orig_rmem &&
- Node.rMem d_ab == orig_rmem &&
- Node.rMem d_nb == orig_rmem &&
- -- this is not related to rMem, but as good a place to
- -- test as any
- inst_idx `elem` Node.sList a_ab &&
- not (inst_idx `elem` Node.sList d_ab)
- x -> printTestCase ("Failed to add/remove instances: " ++ show x)
- False
+ not (Instance.instanceOffline inst) ==>
+ forAll (arbitrary `suchThat` ((> Types.unitMem) . Node.fMem)) $ \node ->
+ -- ab = auto_balance, nb = non-auto_balance
+ -- we use -1 as the primary node of the instance
+ let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True }
+ inst_ab = setInstanceSmallerThanNode node inst'
+ inst_nb = inst_ab { Instance.autoBalance = False }
+ -- now we have the two instances, identical except the
+ -- autoBalance attribute
+ orig_rmem = Node.rMem node
+ inst_idx = Instance.idx inst_ab
+ node_add_ab = Node.addSec node inst_ab (-1)
+ node_add_nb = Node.addSec node inst_nb (-1)
+ node_del_ab = liftM (`Node.removeSec` inst_ab) node_add_ab
+ node_del_nb = liftM (`Node.removeSec` inst_nb) node_add_nb
+ in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
+ (Types.OpGood a_ab, Types.OpGood a_nb,
+ Types.OpGood d_ab, Types.OpGood d_nb) ->
+ printTestCase "Consistency checks failed" $
+ Node.rMem a_ab > orig_rmem &&
+ Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
+ Node.rMem a_nb == orig_rmem &&
+ Node.rMem d_ab == orig_rmem &&
+ Node.rMem d_nb == orig_rmem &&
+ -- this is not related to rMem, but as good a place to
+ -- test as any
+ inst_idx `elem` Node.sList a_ab &&
+ inst_idx `notElem` Node.sList d_ab
+ x -> failTest $ "Failed to add/remove instances: " ++ show x
-- | Check mdsk setting.
prop_Node_setMdsk node mx =
- Node.loDsk node' >= 0 &&
- fromIntegral (Node.loDsk node') <= Node.tDsk node &&
- Node.availDisk node' >= 0 &&
- Node.availDisk node' <= Node.fDsk node' &&
- fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
- Node.mDsk node' == mx'
+ Node.loDsk node' >= 0 &&
+ fromIntegral (Node.loDsk node') <= Node.tDsk node &&
+ Node.availDisk node' >= 0 &&
+ Node.availDisk node' <= Node.fDsk node' &&
+ fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
+ Node.mDsk node' == mx'
where _types = (node::Node.Node, mx::SmallRatio)
node' = Node.setMdsk node mx'
SmallRatio mx' = mx
-- Check tag maps
prop_Node_tagMaps_idempotent tags =
- Node.delTags (Node.addTags m tags) tags == m
+ Node.delTags (Node.addTags m tags) tags ==? m
where m = Data.Map.empty
prop_Node_tagMaps_reject tags =
- not (null tags) ==>
- any (\t -> Node.rejectAddTags m [t]) tags
+ not (null tags) ==>
+ all (\t -> Node.rejectAddTags m [t]) tags
where m = Node.addTags Data.Map.empty tags
prop_Node_showField node =
fst (Node.showHeader field) /= Types.unknownField &&
Node.showField node field /= Types.unknownField
-
prop_Node_computeGroups nodes =
let ng = Node.computeGroups nodes
onlyuuid = map fst ng
length (nub onlyuuid) == length onlyuuid &&
(null nodes || not (null ng))
-testNode =
- [ run prop_Node_setAlias
- , run prop_Node_setOffline
- , run prop_Node_setMcpu
- , run prop_Node_setXmem
- , run prop_Node_addPriFM
- , run prop_Node_addPriFD
- , run prop_Node_addPriFC
- , run prop_Node_addSec
- , run prop_Node_rMem
- , run prop_Node_setMdsk
- , run prop_Node_tagMaps_idempotent
- , run prop_Node_tagMaps_reject
- , run prop_Node_showField
- , run prop_Node_computeGroups
- ]
-
+testSuite "Node"
+ [ 'prop_Node_setAlias
+ , 'prop_Node_setOffline
+ , 'prop_Node_setMcpu
+ , 'prop_Node_setXmem
+ , 'prop_Node_addPriFM
+ , 'prop_Node_addPriFD
+ , 'prop_Node_addPriFC
+ , 'prop_Node_addSec
+ , 'prop_Node_addPriOffline
+ , 'prop_Node_addSecOffline
+ , 'prop_Node_rMem
+ , 'prop_Node_setMdsk
+ , 'prop_Node_tagMaps_idempotent
+ , 'prop_Node_tagMaps_reject
+ , 'prop_Node_showField
+ , 'prop_Node_computeGroups
+ ]
-- ** Cluster tests
-- | Check that the cluster score is close to zero for a homogeneous
-- cluster.
prop_Score_Zero node =
- forAll (choose (1, 1024)) $ \count ->
+ forAll (choose (1, 1024)) $ \count ->
(not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
(Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
- let fn = Node.buildPeers node Container.empty
- nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
- nl = Container.fromList nlst
- score = Cluster.compCV nl
- -- we can't say == 0 here as the floating point errors accumulate;
- -- this should be much lower than the default score in CLI.hs
- in score <= 1e-12
+ let fn = Node.buildPeers node Container.empty
+ nlst = replicate count fn
+ score = Cluster.compCVNodes nlst
+ -- we can't say == 0 here as the floating point errors accumulate;
+ -- this should be much lower than the default score in CLI.hs
+ in score <= 1e-12
-- | Check that cluster stats are sane.
-prop_CStats_sane node =
- forAll (choose (1, 1024)) $ \count ->
- (not (Node.offline node) && not (Node.failN1 node) &&
- (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
- let fn = Node.buildPeers node Container.empty
- nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
- nl = Container.fromList nlst
- cstats = Cluster.totalResources nl
- in Cluster.csAdsk cstats >= 0 &&
- Cluster.csAdsk cstats <= Cluster.csFdsk cstats
+prop_CStats_sane =
+ forAll (choose (1, 1024)) $ \count ->
+ forAll genOnlineNode $ \node ->
+ let fn = Node.buildPeers node Container.empty
+ nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
+ nl = Container.fromList nlst
+ cstats = Cluster.totalResources nl
+ in Cluster.csAdsk cstats >= 0 &&
+ Cluster.csAdsk cstats <= Cluster.csFdsk cstats
-- | Check that one instance is allocated correctly, without
-- rebalances needed.
-prop_ClusterAlloc_sane node inst =
- forAll (choose (5, 20)) $ \count ->
- not (Node.offline node)
- && not (Node.failN1 node)
- && Node.availDisk node > 0
- && Node.availMem node > 0
- ==>
- let nl = makeSmallCluster node count
- il = Container.empty
- inst' = setInstanceSmallerThanNode node inst
- in case Cluster.genAllocNodes defGroupList nl 2 True >>=
- Cluster.tryAlloc nl il inst' of
- Types.Bad _ -> False
- Types.Ok as ->
- case Cluster.asSolutions as of
- [] -> False
- (xnl, xi, _, cv):[] ->
- let il' = Container.add (Instance.idx xi) xi il
- tbl = Cluster.Table xnl il' cv []
- in not (canBalance tbl True True False)
- _ -> False
+prop_ClusterAlloc_sane inst =
+ forAll (choose (5, 20)) $ \count ->
+ forAll genOnlineNode $ \node ->
+ let (nl, il, inst') = makeSmallEmptyCluster node count inst
+ in case Cluster.genAllocNodes defGroupList nl 2 True >>=
+ Cluster.tryAlloc nl il inst' of
+ Types.Bad _ -> False
+ Types.Ok as ->
+ case Cluster.asSolution as of
+ Nothing -> False
+ Just (xnl, xi, _, cv) ->
+ let il' = Container.add (Instance.idx xi) xi il
+ tbl = Cluster.Table xnl il' cv []
+ in not (canBalance tbl True True False)
-- | Checks that on a 2-5 node cluster, we can allocate a random
-- instance spec via tiered allocation (whatever the original instance
-- spec), on either one or two nodes.
-prop_ClusterCanTieredAlloc node inst =
- forAll (choose (2, 5)) $ \count ->
- forAll (choose (1, 2)) $ \rqnodes ->
- not (Node.offline node)
- && not (Node.failN1 node)
- && isNodeBig node 4
- ==>
- let nl = makeSmallCluster node count
- il = Container.empty
- allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
- in case allocnodes >>= \allocnodes' ->
- Cluster.tieredAlloc nl il inst allocnodes' [] [] of
- Types.Bad _ -> False
- Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
- IntMap.size il' == length ixes &&
- length ixes == length cstats
+prop_ClusterCanTieredAlloc inst =
+ forAll (choose (2, 5)) $ \count ->
+ forAll (choose (1, 2)) $ \rqnodes ->
+ forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
+ let nl = makeSmallCluster node count
+ il = Container.empty
+ allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
+ in case allocnodes >>= \allocnodes' ->
+ Cluster.tieredAlloc nl il (Just 1) inst allocnodes' [] [] of
+ Types.Bad _ -> False
+ Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
+ IntMap.size il' == length ixes &&
+ length ixes == length cstats
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
-- we can also evacuate it.
-prop_ClusterAllocEvac node inst =
- forAll (choose (4, 8)) $ \count ->
- not (Node.offline node)
- && not (Node.failN1 node)
- && isNodeBig node 4
- ==>
- let nl = makeSmallCluster node count
- il = Container.empty
- inst' = setInstanceSmallerThanNode node inst
- in case Cluster.genAllocNodes defGroupList nl 2 True >>=
- Cluster.tryAlloc nl il inst' of
- Types.Bad _ -> False
- Types.Ok as ->
- case Cluster.asSolutions as of
- [] -> False
- (xnl, xi, _, _):[] ->
- let sdx = Instance.sNode xi
- il' = Container.add (Instance.idx xi) xi il
- in case Cluster.tryEvac xnl il' [Instance.idx xi] [sdx] of
- Just _ -> True
- _ -> False
- _ -> False
+prop_ClusterAllocEvac inst =
+ forAll (choose (4, 8)) $ \count ->
+ forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
+ let (nl, il, inst') = makeSmallEmptyCluster node count inst
+ in case Cluster.genAllocNodes defGroupList nl 2 True >>=
+ Cluster.tryAlloc nl il inst' of
+ Types.Bad _ -> False
+ Types.Ok as ->
+ case Cluster.asSolution as of
+ Nothing -> False
+ Just (xnl, xi, _, _) ->
+ let sdx = Instance.sNode xi
+ il' = Container.add (Instance.idx xi) xi il
+ in case IAlloc.processRelocate defGroupList xnl il'
+ (Instance.idx xi) 1 [sdx] of
+ Types.Ok _ -> True
+ _ -> False
-- | Check that allocating multiple instances on a cluster, then
-- adding an empty node, results in a valid rebalance.
prop_ClusterAllocBalance =
- forAll (genNode (Just 5) (Just 128)) $ \node ->
- forAll (choose (3, 5)) $ \count ->
- not (Node.offline node) && not (Node.failN1 node) ==>
- let nl = makeSmallCluster node count
- (hnode, nl') = IntMap.deleteFindMax nl
- il = Container.empty
- allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
- i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
- in case allocnodes >>= \allocnodes' ->
- Cluster.iterateAlloc nl' il i_templ allocnodes' [] [] of
- Types.Bad _ -> False
- Types.Ok (_, xnl, il', _, _) ->
- let ynl = Container.add (Node.idx hnode) hnode xnl
- cv = Cluster.compCV ynl
- tbl = Cluster.Table ynl il' cv []
- in canBalance tbl True True False
+ forAll (genNode (Just 5) (Just 128)) $ \node ->
+ forAll (choose (3, 5)) $ \count ->
+ not (Node.offline node) && not (Node.failN1 node) ==>
+ let nl = makeSmallCluster node count
+ (hnode, nl') = IntMap.deleteFindMax nl
+ il = Container.empty
+ allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
+ i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
+ in case allocnodes >>= \allocnodes' ->
+ Cluster.iterateAlloc nl' il (Just 5) i_templ allocnodes' [] [] of
+ Types.Bad msg -> failTest $ "Failed to allocate: " ++ msg
+ Types.Ok (_, _, _, [], _) -> failTest "Failed to allocate: no instances"
+ Types.Ok (_, xnl, il', _, _) ->
+ let ynl = Container.add (Node.idx hnode) hnode xnl
+ cv = Cluster.compCV ynl
+ tbl = Cluster.Table ynl il' cv []
+ in printTestCase "Failed to rebalance" $
+ canBalance tbl True True False
-- | Checks consistency.
prop_ClusterCheckConsistency node inst =
all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
(Container.elems nl'')) gni
-testCluster =
- [ run prop_Score_Zero
- , run prop_CStats_sane
- , run prop_ClusterAlloc_sane
- , run prop_ClusterCanTieredAlloc
- , run prop_ClusterAllocEvac
- , run prop_ClusterAllocBalance
- , run prop_ClusterCheckConsistency
- , run prop_ClusterSplitCluster
- ]
+-- | Helper function to check if we can allocate an instance on a
+-- given node list.
+canAllocOn :: Node.List -> Int -> Instance.Instance -> Bool
+canAllocOn nl reqnodes inst =
+ case Cluster.genAllocNodes defGroupList nl reqnodes True >>=
+ Cluster.tryAlloc nl (Container.empty) inst of
+ Types.Bad _ -> False
+ Types.Ok as ->
+ case Cluster.asSolution as of
+ Nothing -> False
+ Just _ -> True
+
+-- | Checks that allocation obeys minimum and maximum instance
+-- policies. The unittest generates a random node, duplicates it count
+-- times, and generates a random instance that can be allocated on
+-- this mini-cluster; it then checks that after applying a policy that
+-- the instance doesn't fits, the allocation fails.
+prop_ClusterAllocPolicy node =
+ -- rqn is the required nodes (1 or 2)
+ forAll (choose (1, 2)) $ \rqn ->
+ forAll (choose (5, 20)) $ \count ->
+ forAll (arbitrary `suchThat` (canAllocOn (makeSmallCluster node count) rqn))
+ $ \inst ->
+ forAll (arbitrary `suchThat` (isFailure .
+ Instance.instMatchesPolicy inst)) $ \ipol ->
+ let node' = Node.setPolicy ipol node
+ nl = makeSmallCluster node' count
+ in not $ canAllocOn nl rqn inst
+
+testSuite "Cluster"
+ [ 'prop_Score_Zero
+ , 'prop_CStats_sane
+ , 'prop_ClusterAlloc_sane
+ , 'prop_ClusterCanTieredAlloc
+ , 'prop_ClusterAllocEvac
+ , 'prop_ClusterAllocBalance
+ , 'prop_ClusterCheckConsistency
+ , 'prop_ClusterSplitCluster
+ , 'prop_ClusterAllocPolicy
+ ]
-- ** OpCodes tests
-- | Check that opcode serialization is idempotent.
prop_OpCodes_serialization op =
case J.readJSON (J.showJSON op) of
- J.Error _ -> False
- J.Ok op' -> op == op'
+ J.Error e -> failTest $ "Cannot deserialise: " ++ e
+ J.Ok op' -> op ==? op'
where _types = op::OpCodes.OpCode
-testOpCodes =
- [ run prop_OpCodes_serialization
- ]
+testSuite "OpCodes"
+ [ 'prop_OpCodes_serialization ]
-- ** Jobs tests
-- | Check that (queued) job\/opcode status serialization is idempotent.
prop_OpStatus_serialization os =
case J.readJSON (J.showJSON os) of
- J.Error _ -> False
- J.Ok os' -> os == os'
+ J.Error e -> failTest $ "Cannot deserialise: " ++ e
+ J.Ok os' -> os ==? os'
where _types = os::Jobs.OpStatus
prop_JobStatus_serialization js =
case J.readJSON (J.showJSON js) of
- J.Error _ -> False
- J.Ok js' -> js == js'
+ J.Error e -> failTest $ "Cannot deserialise: " ++ e
+ J.Ok js' -> js ==? js'
where _types = js::Jobs.JobStatus
-testJobs =
- [ run prop_OpStatus_serialization
- , run prop_JobStatus_serialization
- ]
+testSuite "Jobs"
+ [ 'prop_OpStatus_serialization
+ , 'prop_JobStatus_serialization
+ ]
-- ** Loader tests
prop_Loader_lookupNode ktn inst node =
- Loader.lookupNode nl inst node == Data.Map.lookup node nl
- where nl = Data.Map.fromList ktn
+ Loader.lookupNode nl inst node ==? Data.Map.lookup node nl
+ where nl = Data.Map.fromList ktn
prop_Loader_lookupInstance kti inst =
- Loader.lookupInstance il inst == Data.Map.lookup inst il
- where il = Data.Map.fromList kti
+ Loader.lookupInstance il inst ==? Data.Map.lookup inst il
+ where il = Data.Map.fromList kti
prop_Loader_assignIndices nodes =
Data.Map.size nassoc == length nodes &&
(if not (null nodes)
then maximum (IntMap.keys kt) == length nodes - 1
else True)
- where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
+ where (nassoc, kt) =
+ Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
-- | Checks that the number of primary instances recorded on the nodes
-- is zero.
in case Loader.mergeData [] [] [] []
(Loader.emptyCluster {Loader.cdNodes = na}) of
Types.Bad _ -> False
- Types.Ok (Loader.ClusterData _ nl il _) ->
+ Types.Ok (Loader.ClusterData _ nl il _ _) ->
let nodes = Container.elems nl
instances = Container.elems il
in (sum . map (length . Node.pList)) nodes == 0 &&
null instances
-testLoader =
- [ run prop_Loader_lookupNode
- , run prop_Loader_lookupInstance
- , run prop_Loader_assignIndices
- , run prop_Loader_mergeData
- ]
+-- | Check that compareNameComponent on equal strings works.
+prop_Loader_compareNameComponent_equal :: String -> Bool
+prop_Loader_compareNameComponent_equal s =
+ Loader.compareNameComponent s s ==
+ Loader.LookupResult Loader.ExactMatch s
+
+-- | Check that compareNameComponent on prefix strings works.
+prop_Loader_compareNameComponent_prefix :: NonEmptyList Char -> String -> Bool
+prop_Loader_compareNameComponent_prefix (NonEmpty s1) s2 =
+ Loader.compareNameComponent (s1 ++ "." ++ s2) s1 ==
+ Loader.LookupResult Loader.PartialMatch s1
+
+testSuite "Loader"
+ [ 'prop_Loader_lookupNode
+ , 'prop_Loader_lookupInstance
+ , 'prop_Loader_assignIndices
+ , 'prop_Loader_mergeData
+ , 'prop_Loader_compareNameComponent_equal
+ , 'prop_Loader_compareNameComponent_prefix
+ ]
-- ** Types tests
-prop_AllocPolicy_serialisation apol =
- case Types.apolFromString (Types.apolToString apol) of
- Types.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
- p == apol
- Types.Bad s -> printTestCase ("failed to deserialise: " ++ s) False
-
-prop_DiskTemplate_serialisation dt =
- case Types.dtFromString (Types.dtToString dt) of
- Types.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
- p == dt
- Types.Bad s -> printTestCase ("failed to deserialise: " ++ s) False
-
-testTypes =
- [ run prop_AllocPolicy_serialisation
- , run prop_DiskTemplate_serialisation
- ]
+prop_Types_AllocPolicy_serialisation apol =
+ case J.readJSON (J.showJSON apol) of
+ J.Ok p -> p ==? apol
+ J.Error s -> failTest $ "Failed to deserialise: " ++ s
+ where _types = apol::Types.AllocPolicy
+
+prop_Types_DiskTemplate_serialisation dt =
+ case J.readJSON (J.showJSON dt) of
+ J.Ok p -> p ==? dt
+ J.Error s -> failTest $ "Failed to deserialise: " ++ s
+ where _types = dt::Types.DiskTemplate
+
+prop_Types_ISpec_serialisation ispec =
+ case J.readJSON (J.showJSON ispec) of
+ J.Ok p -> p ==? ispec
+ J.Error s -> failTest $ "Failed to deserialise: " ++ s
+ where _types = ispec::Types.ISpec
+
+prop_Types_IPolicy_serialisation ipol =
+ case J.readJSON (J.showJSON ipol) of
+ J.Ok p -> p ==? ipol
+ J.Error s -> failTest $ "Failed to deserialise: " ++ s
+ where _types = ipol::Types.IPolicy
+
+prop_Types_EvacMode_serialisation em =
+ case J.readJSON (J.showJSON em) of
+ J.Ok p -> p ==? em
+ J.Error s -> failTest $ "Failed to deserialise: " ++ s
+ where _types = em::Types.EvacMode
+
+prop_Types_opToResult op =
+ case op of
+ Types.OpFail _ -> Types.isBad r
+ Types.OpGood v -> case r of
+ Types.Bad _ -> False
+ Types.Ok v' -> v == v'
+ where r = Types.opToResult op
+ _types = op::Types.OpResult Int
+
+prop_Types_eitherToResult ei =
+ case ei of
+ Left _ -> Types.isBad r
+ Right v -> case r of
+ Types.Bad _ -> False
+ Types.Ok v' -> v == v'
+ where r = Types.eitherToResult ei
+ _types = ei::Either String Int
+
+testSuite "Types"
+ [ 'prop_Types_AllocPolicy_serialisation
+ , 'prop_Types_DiskTemplate_serialisation
+ , 'prop_Types_ISpec_serialisation
+ , 'prop_Types_IPolicy_serialisation
+ , 'prop_Types_EvacMode_serialisation
+ , 'prop_Types_opToResult
+ , 'prop_Types_eitherToResult
+ ]