1 {-| Unittests for ganeti-htools
7 Copyright (C) 2009, 2010, 2011 Google Inc.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26 module Ganeti.HTools.QC
39 import Test.QuickCheck
40 import Test.QuickCheck.Batch
41 import Data.List (findIndex, intercalate, nub, isPrefixOf)
44 import qualified Text.JSON as J
45 import qualified Data.Map
46 import qualified Data.IntMap as IntMap
47 import qualified Ganeti.OpCodes as OpCodes
48 import qualified Ganeti.Jobs as Jobs
49 import qualified Ganeti.Luxi
50 import qualified Ganeti.HTools.CLI as CLI
51 import qualified Ganeti.HTools.Cluster as Cluster
52 import qualified Ganeti.HTools.Container as Container
53 import qualified Ganeti.HTools.ExtLoader
54 import qualified Ganeti.HTools.IAlloc as IAlloc
55 import qualified Ganeti.HTools.Instance as Instance
56 import qualified Ganeti.HTools.Loader as Loader
57 import qualified Ganeti.HTools.Luxi
58 import qualified Ganeti.HTools.Node as Node
59 import qualified Ganeti.HTools.Group as Group
60 import qualified Ganeti.HTools.PeerMap as PeerMap
61 import qualified Ganeti.HTools.Rapi
62 import qualified Ganeti.HTools.Simu
63 import qualified Ganeti.HTools.Text as Text
64 import qualified Ganeti.HTools.Types as Types
65 import qualified Ganeti.HTools.Utils as Utils
66 import qualified Ganeti.HTools.Version
67 import qualified Ganeti.Constants as C
71 -- | Maximum memory (1TiB, somewhat random value)
75 -- | Maximum disk (8TiB, somewhat random value)
77 maxDsk = 1024 * 1024 * 8
79 -- | Max CPUs (1024, somewhat random value)
83 defGroup :: Group.Group
84 defGroup = flip Group.setIdx 0 $
85 Group.create "default" Utils.defaultGroupID
88 defGroupList :: Group.List
89 defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
91 defGroupAssoc :: Data.Map.Map String Types.Gdx
92 defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
96 -- | Simple checker for whether OpResult is fail or pass
97 isFailure :: Types.OpResult a -> Bool
98 isFailure (Types.OpFail _) = True
101 -- | Update an instance to be smaller than a node
102 setInstanceSmallerThanNode node inst =
103 inst { Instance.mem = Node.availMem node `div` 2
104 , Instance.dsk = Node.availDisk node `div` 2
105 , Instance.vcpus = Node.availCpu node `div` 2
108 -- | Create an instance given its spec
109 createInstance mem dsk vcpus =
110 Instance.create "inst-unnamed" mem dsk vcpus "running" [] True (-1) (-1)
112 -- | Create a small cluster by repeating a node spec
113 makeSmallCluster :: Node.Node -> Int -> Node.List
114 makeSmallCluster node count =
115 let fn = Node.buildPeers node Container.empty
116 namelst = map (\n -> (Node.name n, n)) (replicate count fn)
117 (_, nlst) = Loader.assignIndices namelst
120 -- | Checks if a node is "big" enough
121 isNodeBig :: Node.Node -> Int -> Bool
122 isNodeBig node size = Node.availDisk node > size * Types.unitDsk
123 && Node.availMem node > size * Types.unitMem
124 && Node.availCpu node > size * Types.unitCpu
126 canBalance :: Cluster.Table -> Bool -> Bool -> Bool -> Bool
127 canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac 0 0
129 -- | Assigns a new fresh instance to a cluster; this is not
130 -- allocation, so no resource checks are done
131 assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
132 Types.Idx -> Types.Idx ->
133 (Node.List, Instance.List)
134 assignInstance nl il inst pdx sdx =
135 let pnode = Container.find pdx nl
136 snode = Container.find sdx nl
137 maxiidx = if Container.null il
139 else fst (Container.findMax il) + 1
140 inst' = inst { Instance.idx = maxiidx,
141 Instance.pNode = pdx, Instance.sNode = sdx }
142 pnode' = Node.setPri pnode inst'
143 snode' = Node.setSec snode inst'
144 nl' = Container.addTwo pdx pnode' sdx snode' nl
145 il' = Container.add maxiidx inst' il
148 -- * Arbitrary instances
150 -- copied from the introduction to quickcheck
151 instance Arbitrary Char where
152 arbitrary = choose ('\32', '\128')
154 newtype DNSChar = DNSChar { dnsGetChar::Char }
155 instance Arbitrary DNSChar where
157 x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
160 getName :: Gen String
163 dn <- vector n::Gen [DNSChar]
164 return (map dnsGetChar dn)
167 getFQDN :: Gen String
170 ncomps <- choose (1, 4)
171 frest <- vector ncomps::Gen [[DNSChar]]
172 let frest' = map (map dnsGetChar) frest
173 return (felem ++ "." ++ intercalate "." frest')
175 -- let's generate a random instance
176 instance Arbitrary Instance.Instance where
179 mem <- choose (0, maxMem)
180 dsk <- choose (0, maxDsk)
181 run_st <- elements [ C.inststErrorup
185 , C.inststNodeoffline
191 vcpus <- choose (0, maxCpu)
192 return $ Instance.create name mem dsk vcpus run_st [] True pn sn
195 instance Arbitrary Node.Node where
198 mem_t <- choose (0, maxMem)
199 mem_f <- choose (0, mem_t)
200 mem_n <- choose (0, mem_t - mem_f)
201 dsk_t <- choose (0, maxDsk)
202 dsk_f <- choose (0, dsk_t)
203 cpu_t <- choose (0, maxCpu)
205 let n = Node.create name (fromIntegral mem_t) mem_n mem_f
206 (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl
208 n' = Node.buildPeers n Container.empty
212 instance Arbitrary OpCodes.ReplaceDisksMode where
213 arbitrary = elements [ OpCodes.ReplaceOnPrimary
214 , OpCodes.ReplaceOnSecondary
215 , OpCodes.ReplaceNewSecondary
216 , OpCodes.ReplaceAuto
219 instance Arbitrary OpCodes.OpCode where
221 op_id <- elements [ "OP_TEST_DELAY"
222 , "OP_INSTANCE_REPLACE_DISKS"
223 , "OP_INSTANCE_FAILOVER"
224 , "OP_INSTANCE_MIGRATE"
228 liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
229 "OP_INSTANCE_REPLACE_DISKS" ->
230 liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
231 arbitrary arbitrary arbitrary
232 "OP_INSTANCE_FAILOVER" ->
233 liftM2 OpCodes.OpInstanceFailover arbitrary arbitrary
234 "OP_INSTANCE_MIGRATE" ->
235 liftM4 OpCodes.OpInstanceMigrate arbitrary arbitrary arbitrary
237 _ -> fail "Wrong opcode")
239 instance Arbitrary Jobs.OpStatus where
240 arbitrary = elements [minBound..maxBound]
242 instance Arbitrary Jobs.JobStatus where
243 arbitrary = elements [minBound..maxBound]
247 -- If the list is not just an empty element, and if the elements do
248 -- not contain commas, then join+split should be idepotent
249 prop_Utils_commaJoinSplit lst = lst /= [""] &&
250 all (not . elem ',') lst ==>
251 Utils.sepSplit ',' (Utils.commaJoin lst) == lst
252 -- Split and join should always be idempotent
253 prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
255 -- | fromObjWithDefault, we test using the Maybe monad and an integer
257 prop_Utils_fromObjWithDefault def_value random_key =
258 -- a missing key will be returned with the default
259 Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
260 -- a found key will be returned as is, not with default
261 Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
262 random_key (def_value+1) == Just def_value
263 where _types = def_value :: Integer
266 [ run prop_Utils_commaJoinSplit
267 , run prop_Utils_commaSplitJoin
268 , run prop_Utils_fromObjWithDefault
271 -- | Make sure add is idempotent
272 prop_PeerMap_addIdempotent pmap key em =
273 fn puniq == fn (fn puniq)
274 where _types = (pmap::PeerMap.PeerMap,
275 key::PeerMap.Key, em::PeerMap.Elem)
276 fn = PeerMap.add key em
277 puniq = PeerMap.accumArray const pmap
279 -- | Make sure remove is idempotent
280 prop_PeerMap_removeIdempotent pmap key =
281 fn puniq == fn (fn puniq)
282 where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
283 fn = PeerMap.remove key
284 puniq = PeerMap.accumArray const pmap
286 -- | Make sure a missing item returns 0
287 prop_PeerMap_findMissing pmap key =
288 PeerMap.find key (PeerMap.remove key puniq) == 0
289 where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
290 puniq = PeerMap.accumArray const pmap
292 -- | Make sure an added item is found
293 prop_PeerMap_addFind pmap key em =
294 PeerMap.find key (PeerMap.add key em puniq) == em
295 where _types = (pmap::PeerMap.PeerMap,
296 key::PeerMap.Key, em::PeerMap.Elem)
297 puniq = PeerMap.accumArray const pmap
299 -- | Manual check that maxElem returns the maximum indeed, or 0 for null
300 prop_PeerMap_maxElem pmap =
301 PeerMap.maxElem puniq == if null puniq then 0
302 else (maximum . snd . unzip) puniq
303 where _types = pmap::PeerMap.PeerMap
304 puniq = PeerMap.accumArray const pmap
307 [ run prop_PeerMap_addIdempotent
308 , run prop_PeerMap_removeIdempotent
309 , run prop_PeerMap_maxElem
310 , run prop_PeerMap_addFind
311 , run prop_PeerMap_findMissing
316 prop_Container_addTwo cdata i1 i2 =
317 fn i1 i2 cont == fn i2 i1 cont &&
318 fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
319 where _types = (cdata::[Int],
321 cont = foldl (\c x -> Container.add x x c) Container.empty cdata
322 fn x1 x2 = Container.addTwo x1 x1 x2 x2
324 prop_Container_nameOf node =
325 let nl = makeSmallCluster node 1
326 fnode = head (Container.elems nl)
327 in Container.nameOf nl (Node.idx fnode) == Node.name fnode
329 -- We test that in a cluster, given a random node, we can find it by
330 -- its name and alias, as long as all names and aliases are unique,
331 -- and that we fail to find a non-existing name
332 prop_Container_findByName node othername =
333 forAll (choose (1, 20)) $ \ cnt ->
334 forAll (choose (0, cnt - 1)) $ \ fidx ->
335 forAll (vector cnt) $ \ names ->
336 (length . nub) (map fst names ++ map snd names) ==
338 not (othername `elem` (map fst names ++ map snd names)) ==>
339 let nl = makeSmallCluster node cnt
340 nodes = Container.elems nl
341 nodes' = map (\((name, alias), nn) -> (Node.idx nn,
342 nn { Node.name = name,
343 Node.alias = alias }))
345 nl' = Container.fromList nodes'
346 target = snd (nodes' !! fidx)
347 in Container.findByName nl' (Node.name target) == Just target &&
348 Container.findByName nl' (Node.alias target) == Just target &&
349 Container.findByName nl' othername == Nothing
352 [ run prop_Container_addTwo
353 , run prop_Container_nameOf
354 , run prop_Container_findByName
357 -- Simple instance tests, we only have setter/getters
359 prop_Instance_creat inst =
360 Instance.name inst == Instance.alias inst
362 prop_Instance_setIdx inst idx =
363 Instance.idx (Instance.setIdx inst idx) == idx
364 where _types = (inst::Instance.Instance, idx::Types.Idx)
366 prop_Instance_setName inst name =
367 Instance.name newinst == name &&
368 Instance.alias newinst == name
369 where _types = (inst::Instance.Instance, name::String)
370 newinst = Instance.setName inst name
372 prop_Instance_setAlias inst name =
373 Instance.name newinst == Instance.name inst &&
374 Instance.alias newinst == name
375 where _types = (inst::Instance.Instance, name::String)
376 newinst = Instance.setAlias inst name
378 prop_Instance_setPri inst pdx =
379 Instance.pNode (Instance.setPri inst pdx) == pdx
380 where _types = (inst::Instance.Instance, pdx::Types.Ndx)
382 prop_Instance_setSec inst sdx =
383 Instance.sNode (Instance.setSec inst sdx) == sdx
384 where _types = (inst::Instance.Instance, sdx::Types.Ndx)
386 prop_Instance_setBoth inst pdx sdx =
387 Instance.pNode si == pdx && Instance.sNode si == sdx
388 where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
389 si = Instance.setBoth inst pdx sdx
391 prop_Instance_runStatus_True inst =
392 let run_st = Instance.running inst
393 run_tx = Instance.runSt inst
395 run_tx `elem` Instance.runningStates ==> run_st
397 prop_Instance_runStatus_False inst =
398 let run_st = Instance.running inst
399 run_tx = Instance.runSt inst
401 run_tx `notElem` Instance.runningStates ==> not run_st
403 prop_Instance_shrinkMG inst =
404 Instance.mem inst >= 2 * Types.unitMem ==>
405 case Instance.shrinkByType inst Types.FailMem of
407 Instance.mem inst' == Instance.mem inst - Types.unitMem
410 prop_Instance_shrinkMF inst =
411 Instance.mem inst < 2 * Types.unitMem ==>
412 Types.isBad $ Instance.shrinkByType inst Types.FailMem
414 prop_Instance_shrinkCG inst =
415 Instance.vcpus inst >= 2 * Types.unitCpu ==>
416 case Instance.shrinkByType inst Types.FailCPU of
418 Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
421 prop_Instance_shrinkCF inst =
422 Instance.vcpus inst < 2 * Types.unitCpu ==>
423 Types.isBad $ Instance.shrinkByType inst Types.FailCPU
425 prop_Instance_shrinkDG inst =
426 Instance.dsk inst >= 2 * Types.unitDsk ==>
427 case Instance.shrinkByType inst Types.FailDisk of
429 Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
432 prop_Instance_shrinkDF inst =
433 Instance.dsk inst < 2 * Types.unitDsk ==>
434 Types.isBad $ Instance.shrinkByType inst Types.FailDisk
436 prop_Instance_setMovable inst m =
437 Instance.movable inst' == m
438 where inst' = Instance.setMovable inst m
441 [ run prop_Instance_creat
442 , run prop_Instance_setIdx
443 , run prop_Instance_setName
444 , run prop_Instance_setAlias
445 , run prop_Instance_setPri
446 , run prop_Instance_setSec
447 , run prop_Instance_setBoth
448 , run prop_Instance_runStatus_True
449 , run prop_Instance_runStatus_False
450 , run prop_Instance_shrinkMG
451 , run prop_Instance_shrinkMF
452 , run prop_Instance_shrinkCG
453 , run prop_Instance_shrinkCF
454 , run prop_Instance_shrinkDG
455 , run prop_Instance_shrinkDF
456 , run prop_Instance_setMovable
459 -- Instance text loader tests
461 prop_Text_Load_Instance name mem dsk vcpus status pnode snode pdx sdx autobal =
462 not (null pnode) && pdx >= 0 && sdx >= 0 ==>
463 let vcpus_s = show vcpus
471 else [(pnode, pdx), (snode, rsdx)]
472 nl = Data.Map.fromList ndx
474 sbal = if autobal then "Y" else "N"
475 inst = Text.loadInst nl
476 [name, mem_s, dsk_s, vcpus_s, status,
477 sbal, pnode, snode, tags]:: Maybe (String, Instance.Instance)
478 fail1 = Text.loadInst nl
479 [name, mem_s, dsk_s, vcpus_s, status,
480 sbal, pnode, pnode, tags]:: Maybe (String, Instance.Instance)
481 _types = ( name::String, mem::Int, dsk::Int
482 , vcpus::Int, status::String
483 , pnode::String, snode::String
484 , pdx::Types.Ndx, sdx::Types.Ndx
490 Instance.name i == name &&
491 Instance.vcpus i == vcpus &&
492 Instance.mem i == mem &&
493 Instance.pNode i == pdx &&
494 Instance.sNode i == (if null snode
495 then Node.noSecondary
497 Instance.auto_balance i == autobal &&
500 prop_Text_Load_InstanceFail ktn fields =
501 length fields /= 9 ==>
502 case Text.loadInst nl fields of
504 Left msg -> isPrefixOf "Invalid/incomplete instance data: '" msg
505 where nl = Data.Map.fromList ktn
507 prop_Text_Load_Node name tm nm fm td fd tc fo =
508 let conv v = if v < 0
520 any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
521 gid = Group.uuid defGroup
522 in case Text.loadNode defGroupAssoc
523 [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
525 Just (name', node) ->
527 then Node.offline node
528 else Node.name node == name' && name' == name &&
529 Node.alias node == name &&
530 Node.tMem node == fromIntegral tm &&
531 Node.nMem node == nm &&
532 Node.fMem node == fm &&
533 Node.tDsk node == fromIntegral td &&
534 Node.fDsk node == fd &&
535 Node.tCpu node == fromIntegral tc
537 prop_Text_Load_NodeFail fields =
538 length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
540 prop_Text_NodeLSIdempotent node =
541 (Text.loadNode defGroupAssoc.
542 Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
543 Just (Node.name n, n)
544 -- override failN1 to what loadNode returns by default
545 where n = node { Node.failN1 = True, Node.offline = False }
548 [ run prop_Text_Load_Instance
549 , run prop_Text_Load_InstanceFail
550 , run prop_Text_Load_Node
551 , run prop_Text_Load_NodeFail
552 , run prop_Text_NodeLSIdempotent
557 prop_Node_setAlias node name =
558 Node.name newnode == Node.name node &&
559 Node.alias newnode == name
560 where _types = (node::Node.Node, name::String)
561 newnode = Node.setAlias node name
563 prop_Node_setOffline node status =
564 Node.offline newnode == status
565 where newnode = Node.setOffline node status
567 prop_Node_setXmem node xm =
568 Node.xMem newnode == xm
569 where newnode = Node.setXmem node xm
571 prop_Node_setMcpu node mc =
572 Node.mCpu newnode == mc
573 where newnode = Node.setMcpu node mc
575 -- | Check that an instance add with too high memory or disk will be rejected
576 prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
577 not (Node.failN1 node)
579 case Node.addPri node inst'' of
580 Types.OpFail Types.FailMem -> True
582 where _types = (node::Node.Node, inst::Instance.Instance)
583 inst' = setInstanceSmallerThanNode node inst
584 inst'' = inst' { Instance.mem = Instance.mem inst }
586 prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
587 not (Node.failN1 node)
589 case Node.addPri node inst'' of
590 Types.OpFail Types.FailDisk -> True
592 where _types = (node::Node.Node, inst::Instance.Instance)
593 inst' = setInstanceSmallerThanNode node inst
594 inst'' = inst' { Instance.dsk = Instance.dsk inst }
596 prop_Node_addPriFC node inst = Instance.vcpus inst > Node.availCpu node &&
597 not (Node.failN1 node)
599 case Node.addPri node inst'' of
600 Types.OpFail Types.FailCPU -> True
602 where _types = (node::Node.Node, inst::Instance.Instance)
603 inst' = setInstanceSmallerThanNode node inst
604 inst'' = inst' { Instance.vcpus = Instance.vcpus inst }
606 -- | Check that an instance add with too high memory or disk will be rejected
607 prop_Node_addSec node inst pdx =
608 (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
609 Instance.dsk inst >= Node.fDsk node) &&
610 not (Node.failN1 node)
611 ==> isFailure (Node.addSec node inst pdx)
612 where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
614 -- | Checks for memory reservation changes
615 prop_Node_rMem node inst =
616 -- ab = auto_balance, nb = non-auto_balance
617 -- we use -1 as the primary node of the instance
618 let inst' = inst { Instance.pNode = -1, Instance.auto_balance = True }
619 inst_ab = setInstanceSmallerThanNode node inst'
620 inst_nb = inst_ab { Instance.auto_balance = False }
621 -- now we have the two instances, identical except the
622 -- auto_balance attribute
623 orig_rmem = Node.rMem node
624 inst_idx = Instance.idx inst_ab
625 node_add_ab = Node.addSec node inst_ab (-1)
626 node_add_nb = Node.addSec node inst_nb (-1)
627 node_del_ab = liftM (flip Node.removeSec inst_ab) node_add_ab
628 node_del_nb = liftM (flip Node.removeSec inst_nb) node_add_nb
629 in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
630 (Types.OpGood a_ab, Types.OpGood a_nb,
631 Types.OpGood d_ab, Types.OpGood d_nb) ->
632 Node.rMem a_ab > orig_rmem &&
633 Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
634 Node.rMem a_nb == orig_rmem &&
635 Node.rMem d_ab == orig_rmem &&
636 Node.rMem d_nb == orig_rmem &&
637 -- this is not related to rMem, but as good a place to
639 inst_idx `elem` Node.sList a_ab &&
640 not (inst_idx `elem` Node.sList d_ab)
643 newtype SmallRatio = SmallRatio Double deriving Show
644 instance Arbitrary SmallRatio where
647 return $ SmallRatio v
649 -- | Check mdsk setting
650 prop_Node_setMdsk node mx =
651 Node.loDsk node' >= 0 &&
652 fromIntegral (Node.loDsk node') <= Node.tDsk node &&
653 Node.availDisk node' >= 0 &&
654 Node.availDisk node' <= Node.fDsk node' &&
655 fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
656 Node.mDsk node' == mx'
657 where _types = (node::Node.Node, mx::SmallRatio)
658 node' = Node.setMdsk node mx'
662 prop_Node_tagMaps_idempotent tags =
663 Node.delTags (Node.addTags m tags) tags == m
664 where m = Data.Map.empty
666 prop_Node_tagMaps_reject tags =
668 any (\t -> Node.rejectAddTags m [t]) tags
669 where m = Node.addTags Data.Map.empty tags
671 prop_Node_showField node =
672 forAll (elements Node.defaultFields) $ \ field ->
673 fst (Node.showHeader field) /= Types.unknownField &&
674 Node.showField node field /= Types.unknownField
677 prop_Node_computeGroups nodes =
678 let ng = Node.computeGroups nodes
679 onlyuuid = map fst ng
680 in length nodes == sum (map (length . snd) ng) &&
681 all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
682 length (nub onlyuuid) == length onlyuuid &&
683 (null nodes || not (null ng))
686 [ run prop_Node_setAlias
687 , run prop_Node_setOffline
688 , run prop_Node_setMcpu
689 , run prop_Node_setXmem
690 , run prop_Node_addPriFM
691 , run prop_Node_addPriFD
692 , run prop_Node_addPriFC
693 , run prop_Node_addSec
695 , run prop_Node_setMdsk
696 , run prop_Node_tagMaps_idempotent
697 , run prop_Node_tagMaps_reject
698 , run prop_Node_showField
699 , run prop_Node_computeGroups
705 -- | Check that the cluster score is close to zero for a homogeneous cluster
706 prop_Score_Zero node count =
707 (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
708 (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
709 let fn = Node.buildPeers node Container.empty
710 nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
711 nl = Container.fromList nlst
712 score = Cluster.compCV nl
713 -- we can't say == 0 here as the floating point errors accumulate;
714 -- this should be much lower than the default score in CLI.hs
717 -- | Check that cluster stats are sane
718 prop_CStats_sane node count =
719 (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
720 (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
721 let fn = Node.buildPeers node Container.empty
722 nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
723 nl = Container.fromList nlst
724 cstats = Cluster.totalResources nl
725 in Cluster.csAdsk cstats >= 0 &&
726 Cluster.csAdsk cstats <= Cluster.csFdsk cstats
728 -- | Check that one instance is allocated correctly, without
730 prop_ClusterAlloc_sane node inst =
731 forAll (choose (5, 20)) $ \count ->
732 not (Node.offline node)
733 && not (Node.failN1 node)
734 && Node.availDisk node > 0
735 && Node.availMem node > 0
737 let nl = makeSmallCluster node count
739 inst' = setInstanceSmallerThanNode node inst
740 in case Cluster.genAllocNodes defGroupList nl 2 True >>=
741 Cluster.tryAlloc nl il inst' of
744 case Cluster.asSolutions as of
746 (xnl, xi, _, cv):[] ->
747 let il' = Container.add (Instance.idx xi) xi il
748 tbl = Cluster.Table xnl il' cv []
749 in not (canBalance tbl True True False)
752 -- | Checks that on a 2-5 node cluster, we can allocate a random
753 -- instance spec via tiered allocation (whatever the original instance
754 -- spec), on either one or two nodes
755 prop_ClusterCanTieredAlloc node inst =
756 forAll (choose (2, 5)) $ \count ->
757 forAll (choose (1, 2)) $ \rqnodes ->
758 not (Node.offline node)
759 && not (Node.failN1 node)
762 let nl = makeSmallCluster node count
764 allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
765 in case allocnodes >>= \allocnodes' ->
766 Cluster.tieredAlloc nl il inst allocnodes' [] [] of
768 Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
769 IntMap.size il' == length ixes &&
770 length ixes == length cstats
772 -- | Checks that on a 4-8 node cluster, once we allocate an instance,
773 -- we can also evacuate it
774 prop_ClusterAllocEvac node inst =
775 forAll (choose (4, 8)) $ \count ->
776 not (Node.offline node)
777 && not (Node.failN1 node)
780 let nl = makeSmallCluster node count
782 inst' = setInstanceSmallerThanNode node inst
783 in case Cluster.genAllocNodes defGroupList nl 2 True >>=
784 Cluster.tryAlloc nl il inst' of
787 case Cluster.asSolutions as of
789 (xnl, xi, _, _):[] ->
790 let sdx = Instance.sNode xi
791 il' = Container.add (Instance.idx xi) xi il
792 in case Cluster.tryEvac xnl il' [Instance.idx xi] [sdx] of
797 -- | Check that allocating multiple instances on a cluster, then
798 -- adding an empty node, results in a valid rebalance
799 prop_ClusterAllocBalance node =
800 forAll (choose (3, 5)) $ \count ->
801 not (Node.offline node)
802 && not (Node.failN1 node)
804 && not (isNodeBig node 8)
806 let nl = makeSmallCluster node count
807 (hnode, nl') = IntMap.deleteFindMax nl
809 allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
810 i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
811 in case allocnodes >>= \allocnodes' ->
812 Cluster.iterateAlloc nl' il i_templ allocnodes' [] [] of
814 Types.Ok (_, xnl, il', _, _) ->
815 let ynl = Container.add (Node.idx hnode) hnode xnl
816 cv = Cluster.compCV ynl
817 tbl = Cluster.Table ynl il' cv []
818 in canBalance tbl True True False
820 -- | Checks consistency
821 prop_ClusterCheckConsistency node inst =
822 let nl = makeSmallCluster node 3
823 [node1, node2, node3] = Container.elems nl
824 node3' = node3 { Node.group = 1 }
825 nl' = Container.add (Node.idx node3') node3' nl
826 inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
827 inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
828 inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
829 ccheck = Cluster.findSplitInstances nl' . Container.fromList
830 in null (ccheck [(0, inst1)]) &&
831 null (ccheck [(0, inst2)]) &&
832 (not . null $ ccheck [(0, inst3)])
834 -- For now, we only test that we don't lose instances during the split
835 prop_ClusterSplitCluster node inst =
836 forAll (choose (0, 100)) $ \icnt ->
837 let nl = makeSmallCluster node 2
838 (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
839 (nl, Container.empty) [1..icnt]
840 gni = Cluster.splitCluster nl' il'
841 in sum (map (Container.size . snd . snd) gni) == icnt &&
842 all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
843 (Container.elems nl'')) gni
846 [ run prop_Score_Zero
847 , run prop_CStats_sane
848 , run prop_ClusterAlloc_sane
849 , run prop_ClusterCanTieredAlloc
850 , run prop_ClusterAllocEvac
851 , run prop_ClusterAllocBalance
852 , run prop_ClusterCheckConsistency
853 , run prop_ClusterSplitCluster
856 -- | Check that opcode serialization is idempotent
858 prop_OpCodes_serialization op =
859 case J.readJSON (J.showJSON op) of
861 J.Ok op' -> op == op'
862 where _types = op::OpCodes.OpCode
865 [ run prop_OpCodes_serialization
868 -- | Check that (queued) job\/opcode status serialization is idempotent
869 prop_OpStatus_serialization os =
870 case J.readJSON (J.showJSON os) of
872 J.Ok os' -> os == os'
873 where _types = os::Jobs.OpStatus
875 prop_JobStatus_serialization js =
876 case J.readJSON (J.showJSON js) of
878 J.Ok js' -> js == js'
879 where _types = js::Jobs.JobStatus
882 [ run prop_OpStatus_serialization
883 , run prop_JobStatus_serialization
888 prop_Loader_lookupNode ktn inst node =
889 Loader.lookupNode nl inst node == Data.Map.lookup node nl
890 where nl = Data.Map.fromList ktn
892 prop_Loader_lookupInstance kti inst =
893 Loader.lookupInstance il inst == Data.Map.lookup inst il
894 where il = Data.Map.fromList kti
896 prop_Loader_assignIndices nodes =
897 Data.Map.size nassoc == length nodes &&
898 Container.size kt == length nodes &&
900 then maximum (IntMap.keys kt) == length nodes - 1
902 where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
905 -- | Checks that the number of primary instances recorded on the nodes
907 prop_Loader_mergeData ns =
908 let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
909 in case Loader.mergeData [] [] [] []
910 (Loader.emptyCluster {Loader.cdNodes = na}) of
912 Types.Ok (Loader.ClusterData _ nl il _) ->
913 let nodes = Container.elems nl
914 instances = Container.elems il
915 in (sum . map (length . Node.pList)) nodes == 0 &&
919 [ run prop_Loader_lookupNode
920 , run prop_Loader_lookupInstance
921 , run prop_Loader_assignIndices
922 , run prop_Loader_mergeData