1 {-| Unittests for ganeti-htools
7 Copyright (C) 2009, 2010, 2011 Google Inc.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26 module Ganeti.HTools.QC
39 import Test.QuickCheck
40 import Test.QuickCheck.Batch
41 import Data.List (findIndex, intercalate, nub, isPrefixOf)
44 import qualified Text.JSON as J
45 import qualified Data.Map
46 import qualified Data.IntMap as IntMap
47 import qualified Ganeti.OpCodes as OpCodes
48 import qualified Ganeti.Jobs as Jobs
49 import qualified Ganeti.Luxi
50 import qualified Ganeti.HTools.CLI as CLI
51 import qualified Ganeti.HTools.Cluster as Cluster
52 import qualified Ganeti.HTools.Container as Container
53 import qualified Ganeti.HTools.ExtLoader
54 import qualified Ganeti.HTools.IAlloc as IAlloc
55 import qualified Ganeti.HTools.Instance as Instance
56 import qualified Ganeti.HTools.Loader as Loader
57 import qualified Ganeti.HTools.Luxi
58 import qualified Ganeti.HTools.Node as Node
59 import qualified Ganeti.HTools.Group as Group
60 import qualified Ganeti.HTools.PeerMap as PeerMap
61 import qualified Ganeti.HTools.Rapi
62 import qualified Ganeti.HTools.Simu
63 import qualified Ganeti.HTools.Text as Text
64 import qualified Ganeti.HTools.Types as Types
65 import qualified Ganeti.HTools.Utils as Utils
66 import qualified Ganeti.HTools.Version
70 -- | Maximum memory (1TiB, somewhat random value)
74 -- | Maximum disk (8TiB, somewhat random value)
76 maxDsk = 1024 * 1024 * 8
78 -- | Max CPUs (1024, somewhat random value)
82 defGroup :: Group.Group
83 defGroup = flip Group.setIdx 0 $
84 Group.create "default" Utils.defaultGroupID
87 defGroupList :: Group.List
88 defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
90 defGroupAssoc :: Data.Map.Map String Types.Gdx
91 defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
95 -- | Simple checker for whether OpResult is fail or pass
96 isFailure :: Types.OpResult a -> Bool
97 isFailure (Types.OpFail _) = True
100 -- | Update an instance to be smaller than a node
101 setInstanceSmallerThanNode node inst =
102 inst { Instance.mem = Node.availMem node `div` 2
103 , Instance.dsk = Node.availDisk node `div` 2
104 , Instance.vcpus = Node.availCpu node `div` 2
107 -- | Create an instance given its spec
108 createInstance mem dsk vcpus =
109 Instance.create "inst-unnamed" mem dsk vcpus "running" [] True (-1) (-1)
111 -- | Create a small cluster by repeating a node spec
112 makeSmallCluster :: Node.Node -> Int -> Node.List
113 makeSmallCluster node count =
114 let fn = Node.buildPeers node Container.empty
115 namelst = map (\n -> (Node.name n, n)) (replicate count fn)
116 (_, nlst) = Loader.assignIndices namelst
119 -- | Checks if a node is "big" enough
120 isNodeBig :: Node.Node -> Int -> Bool
121 isNodeBig node size = Node.availDisk node > size * Types.unitDsk
122 && Node.availMem node > size * Types.unitMem
123 && Node.availCpu node > size * Types.unitCpu
125 canBalance :: Cluster.Table -> Bool -> Bool -> Bool
126 canBalance tbl dm evac = isJust $ Cluster.tryBalance tbl dm evac 0 0
128 -- | Assigns a new fresh instance to a cluster; this is not
129 -- allocation, so no resource checks are done
130 assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
131 Types.Idx -> Types.Idx ->
132 (Node.List, Instance.List)
133 assignInstance nl il inst pdx sdx =
134 let pnode = Container.find pdx nl
135 snode = Container.find sdx nl
136 maxiidx = if Container.null il
138 else fst (Container.findMax il) + 1
139 inst' = inst { Instance.idx = maxiidx,
140 Instance.pNode = pdx, Instance.sNode = sdx }
141 pnode' = Node.setPri pnode inst'
142 snode' = Node.setSec snode inst'
143 nl' = Container.addTwo pdx pnode' sdx snode' nl
144 il' = Container.add maxiidx inst' il
147 -- * Arbitrary instances
149 -- copied from the introduction to quickcheck
150 instance Arbitrary Char where
151 arbitrary = choose ('\32', '\128')
153 newtype DNSChar = DNSChar { dnsGetChar::Char }
154 instance Arbitrary DNSChar where
156 x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
159 getName :: Gen String
162 dn <- vector n::Gen [DNSChar]
163 return (map dnsGetChar dn)
166 getFQDN :: Gen String
169 ncomps <- choose (1, 4)
170 frest <- vector ncomps::Gen [[DNSChar]]
171 let frest' = map (map dnsGetChar) frest
172 return (felem ++ "." ++ intercalate "." frest')
174 -- let's generate a random instance
175 instance Arbitrary Instance.Instance where
178 mem <- choose (0, maxMem)
179 dsk <- choose (0, maxDsk)
180 run_st <- elements ["ERROR_up", "ERROR_down", "ADMIN_down"
181 , "ERROR_nodedown", "ERROR_nodeoffline"
183 , "no_such_status1", "no_such_status2"]
186 vcpus <- choose (0, maxCpu)
187 return $ Instance.create name mem dsk vcpus run_st [] True pn sn
190 instance Arbitrary Node.Node where
193 mem_t <- choose (0, maxMem)
194 mem_f <- choose (0, mem_t)
195 mem_n <- choose (0, mem_t - mem_f)
196 dsk_t <- choose (0, maxDsk)
197 dsk_f <- choose (0, dsk_t)
198 cpu_t <- choose (0, maxCpu)
200 let n = Node.create name (fromIntegral mem_t) mem_n mem_f
201 (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl
203 n' = Node.buildPeers n Container.empty
207 instance Arbitrary OpCodes.ReplaceDisksMode where
208 arbitrary = elements [ OpCodes.ReplaceOnPrimary
209 , OpCodes.ReplaceOnSecondary
210 , OpCodes.ReplaceNewSecondary
211 , OpCodes.ReplaceAuto
214 instance Arbitrary OpCodes.OpCode where
216 op_id <- elements [ "OP_TEST_DELAY"
217 , "OP_INSTANCE_REPLACE_DISKS"
218 , "OP_INSTANCE_FAILOVER"
219 , "OP_INSTANCE_MIGRATE"
223 liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
224 "OP_INSTANCE_REPLACE_DISKS" ->
225 liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
226 arbitrary arbitrary arbitrary
227 "OP_INSTANCE_FAILOVER" ->
228 liftM2 OpCodes.OpInstanceFailover arbitrary arbitrary
229 "OP_INSTANCE_MIGRATE" ->
230 liftM4 OpCodes.OpInstanceMigrate arbitrary arbitrary arbitrary
232 _ -> fail "Wrong opcode")
234 instance Arbitrary Jobs.OpStatus where
235 arbitrary = elements [minBound..maxBound]
237 instance Arbitrary Jobs.JobStatus where
238 arbitrary = elements [minBound..maxBound]
242 -- If the list is not just an empty element, and if the elements do
243 -- not contain commas, then join+split should be idepotent
244 prop_Utils_commaJoinSplit lst = lst /= [""] &&
245 all (not . elem ',') lst ==>
246 Utils.sepSplit ',' (Utils.commaJoin lst) == lst
247 -- Split and join should always be idempotent
248 prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
250 -- | fromObjWithDefault, we test using the Maybe monad and an integer
252 prop_Utils_fromObjWithDefault def_value random_key =
253 -- a missing key will be returned with the default
254 Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
255 -- a found key will be returned as is, not with default
256 Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
257 random_key (def_value+1) == Just def_value
258 where _types = (def_value :: Integer)
261 [ run prop_Utils_commaJoinSplit
262 , run prop_Utils_commaSplitJoin
263 , run prop_Utils_fromObjWithDefault
266 -- | Make sure add is idempotent
267 prop_PeerMap_addIdempotent pmap key em =
268 fn puniq == fn (fn puniq)
269 where _types = (pmap::PeerMap.PeerMap,
270 key::PeerMap.Key, em::PeerMap.Elem)
271 fn = PeerMap.add key em
272 puniq = PeerMap.accumArray const pmap
274 -- | Make sure remove is idempotent
275 prop_PeerMap_removeIdempotent pmap key =
276 fn puniq == fn (fn puniq)
277 where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
278 fn = PeerMap.remove key
279 puniq = PeerMap.accumArray const pmap
281 -- | Make sure a missing item returns 0
282 prop_PeerMap_findMissing pmap key =
283 PeerMap.find key (PeerMap.remove key puniq) == 0
284 where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
285 puniq = PeerMap.accumArray const pmap
287 -- | Make sure an added item is found
288 prop_PeerMap_addFind pmap key em =
289 PeerMap.find key (PeerMap.add key em puniq) == em
290 where _types = (pmap::PeerMap.PeerMap,
291 key::PeerMap.Key, em::PeerMap.Elem)
292 puniq = PeerMap.accumArray const pmap
294 -- | Manual check that maxElem returns the maximum indeed, or 0 for null
295 prop_PeerMap_maxElem pmap =
296 PeerMap.maxElem puniq == if null puniq then 0
297 else (maximum . snd . unzip) puniq
298 where _types = pmap::PeerMap.PeerMap
299 puniq = PeerMap.accumArray const pmap
302 [ run prop_PeerMap_addIdempotent
303 , run prop_PeerMap_removeIdempotent
304 , run prop_PeerMap_maxElem
305 , run prop_PeerMap_addFind
306 , run prop_PeerMap_findMissing
311 prop_Container_addTwo cdata i1 i2 =
312 fn i1 i2 cont == fn i2 i1 cont &&
313 fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
314 where _types = (cdata::[Int],
316 cont = foldl (\c x -> Container.add x x c) Container.empty cdata
317 fn x1 x2 = Container.addTwo x1 x1 x2 x2
319 prop_Container_nameOf node =
320 let nl = makeSmallCluster node 1
321 fnode = head (Container.elems nl)
322 in Container.nameOf nl (Node.idx fnode) == Node.name fnode
324 -- We test that in a cluster, given a random node, we can find it by
325 -- its name and alias, as long as all names and aliases are unique,
326 -- and that we fail to find a non-existing name
327 prop_Container_findByName node othername =
328 forAll (choose (1, 20)) $ \ cnt ->
329 forAll (choose (0, cnt - 1)) $ \ fidx ->
330 forAll (vector cnt) $ \ names ->
331 (length . nub) (map fst names ++ map snd names) ==
333 not (othername `elem` (map fst names ++ map snd names)) ==>
334 let nl = makeSmallCluster node cnt
335 nodes = Container.elems nl
336 nodes' = map (\((name, alias), nn) -> (Node.idx nn,
337 nn { Node.name = name,
338 Node.alias = alias }))
340 nl' = Container.fromList nodes'
341 target = snd (nodes' !! fidx)
342 in Container.findByName nl' (Node.name target) == Just target &&
343 Container.findByName nl' (Node.alias target) == Just target &&
344 Container.findByName nl' othername == Nothing
347 [ run prop_Container_addTwo
348 , run prop_Container_nameOf
349 , run prop_Container_findByName
352 -- Simple instance tests, we only have setter/getters
354 prop_Instance_creat inst =
355 Instance.name inst == Instance.alias inst
357 prop_Instance_setIdx inst idx =
358 Instance.idx (Instance.setIdx inst idx) == idx
359 where _types = (inst::Instance.Instance, idx::Types.Idx)
361 prop_Instance_setName inst name =
362 Instance.name newinst == name &&
363 Instance.alias newinst == name
364 where _types = (inst::Instance.Instance, name::String)
365 newinst = Instance.setName inst name
367 prop_Instance_setAlias inst name =
368 Instance.name newinst == Instance.name inst &&
369 Instance.alias newinst == name
370 where _types = (inst::Instance.Instance, name::String)
371 newinst = Instance.setAlias inst name
373 prop_Instance_setPri inst pdx =
374 Instance.pNode (Instance.setPri inst pdx) == pdx
375 where _types = (inst::Instance.Instance, pdx::Types.Ndx)
377 prop_Instance_setSec inst sdx =
378 Instance.sNode (Instance.setSec inst sdx) == sdx
379 where _types = (inst::Instance.Instance, sdx::Types.Ndx)
381 prop_Instance_setBoth inst pdx sdx =
382 Instance.pNode si == pdx && Instance.sNode si == sdx
383 where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
384 si = Instance.setBoth inst pdx sdx
386 prop_Instance_runStatus_True inst =
387 let run_st = Instance.running inst
388 run_tx = Instance.runSt inst
390 run_tx `elem` Instance.runningStates ==> run_st
392 prop_Instance_runStatus_False inst =
393 let run_st = Instance.running inst
394 run_tx = Instance.runSt inst
396 run_tx `notElem` Instance.runningStates ==> not run_st
398 prop_Instance_shrinkMG inst =
399 Instance.mem inst >= 2 * Types.unitMem ==>
400 case Instance.shrinkByType inst Types.FailMem of
402 Instance.mem inst' == Instance.mem inst - Types.unitMem
405 prop_Instance_shrinkMF inst =
406 Instance.mem inst < 2 * Types.unitMem ==>
407 Types.isBad $ Instance.shrinkByType inst Types.FailMem
409 prop_Instance_shrinkCG inst =
410 Instance.vcpus inst >= 2 * Types.unitCpu ==>
411 case Instance.shrinkByType inst Types.FailCPU of
413 Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
416 prop_Instance_shrinkCF inst =
417 Instance.vcpus inst < 2 * Types.unitCpu ==>
418 Types.isBad $ Instance.shrinkByType inst Types.FailCPU
420 prop_Instance_shrinkDG inst =
421 Instance.dsk inst >= 2 * Types.unitDsk ==>
422 case Instance.shrinkByType inst Types.FailDisk of
424 Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
427 prop_Instance_shrinkDF inst =
428 Instance.dsk inst < 2 * Types.unitDsk ==>
429 Types.isBad $ Instance.shrinkByType inst Types.FailDisk
431 prop_Instance_setMovable inst m =
432 Instance.movable inst' == m
433 where inst' = Instance.setMovable inst m
436 [ run prop_Instance_creat
437 , run prop_Instance_setIdx
438 , run prop_Instance_setName
439 , run prop_Instance_setAlias
440 , run prop_Instance_setPri
441 , run prop_Instance_setSec
442 , run prop_Instance_setBoth
443 , run prop_Instance_runStatus_True
444 , run prop_Instance_runStatus_False
445 , run prop_Instance_shrinkMG
446 , run prop_Instance_shrinkMF
447 , run prop_Instance_shrinkCG
448 , run prop_Instance_shrinkCF
449 , run prop_Instance_shrinkDG
450 , run prop_Instance_shrinkDF
451 , run prop_Instance_setMovable
454 -- Instance text loader tests
456 prop_Text_Load_Instance name mem dsk vcpus status pnode snode pdx sdx autobal =
457 not (null pnode) && pdx >= 0 && sdx >= 0 ==>
458 let vcpus_s = show vcpus
466 else [(pnode, pdx), (snode, rsdx)]
467 nl = Data.Map.fromList ndx
469 sbal = if autobal then "Y" else "N"
470 inst = Text.loadInst nl
471 [name, mem_s, dsk_s, vcpus_s, status,
472 sbal, pnode, snode, tags]:: Maybe (String, Instance.Instance)
473 fail1 = Text.loadInst nl
474 [name, mem_s, dsk_s, vcpus_s, status,
475 sbal, pnode, pnode, tags]:: Maybe (String, Instance.Instance)
476 _types = ( name::String, mem::Int, dsk::Int
477 , vcpus::Int, status::String
478 , pnode::String, snode::String
479 , pdx::Types.Ndx, sdx::Types.Ndx
485 (Instance.name i == name &&
486 Instance.vcpus i == vcpus &&
487 Instance.mem i == mem &&
488 Instance.pNode i == pdx &&
489 Instance.sNode i == (if null snode
490 then Node.noSecondary
492 Instance.auto_balance i == autobal &&
495 prop_Text_Load_InstanceFail ktn fields =
496 length fields /= 9 ==>
497 case Text.loadInst nl fields of
499 Left msg -> isPrefixOf "Invalid/incomplete instance data: '" msg
500 where nl = Data.Map.fromList ktn
502 prop_Text_Load_Node name tm nm fm td fd tc fo =
503 let conv v = if v < 0
515 any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
516 gid = Group.uuid defGroup
517 in case Text.loadNode defGroupAssoc
518 [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
520 Just (name', node) ->
522 then Node.offline node
523 else Node.name node == name' && name' == name &&
524 Node.alias node == name &&
525 Node.tMem node == fromIntegral tm &&
526 Node.nMem node == nm &&
527 Node.fMem node == fm &&
528 Node.tDsk node == fromIntegral td &&
529 Node.fDsk node == fd &&
530 Node.tCpu node == fromIntegral tc
532 prop_Text_Load_NodeFail fields =
533 length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
535 prop_Text_NodeLSIdempotent node =
536 (Text.loadNode defGroupAssoc.
537 Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
538 Just (Node.name n, n)
539 -- override failN1 to what loadNode returns by default
540 where n = node { Node.failN1 = True, Node.offline = False }
543 [ run prop_Text_Load_Instance
544 , run prop_Text_Load_InstanceFail
545 , run prop_Text_Load_Node
546 , run prop_Text_Load_NodeFail
547 , run prop_Text_NodeLSIdempotent
552 prop_Node_setAlias node name =
553 Node.name newnode == Node.name node &&
554 Node.alias newnode == name
555 where _types = (node::Node.Node, name::String)
556 newnode = Node.setAlias node name
558 prop_Node_setOffline node status =
559 Node.offline newnode == status
560 where newnode = Node.setOffline node status
562 prop_Node_setXmem node xm =
563 Node.xMem newnode == xm
564 where newnode = Node.setXmem node xm
566 prop_Node_setMcpu node mc =
567 Node.mCpu newnode == mc
568 where newnode = Node.setMcpu node mc
570 -- | Check that an instance add with too high memory or disk will be rejected
571 prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
572 not (Node.failN1 node)
574 case Node.addPri node inst'' of
575 Types.OpFail Types.FailMem -> True
577 where _types = (node::Node.Node, inst::Instance.Instance)
578 inst' = setInstanceSmallerThanNode node inst
579 inst'' = inst' { Instance.mem = Instance.mem inst }
581 prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
582 not (Node.failN1 node)
584 case Node.addPri node inst'' of
585 Types.OpFail Types.FailDisk -> True
587 where _types = (node::Node.Node, inst::Instance.Instance)
588 inst' = setInstanceSmallerThanNode node inst
589 inst'' = inst' { Instance.dsk = Instance.dsk inst }
591 prop_Node_addPriFC node inst = Instance.vcpus inst > Node.availCpu node &&
592 not (Node.failN1 node)
594 case Node.addPri node inst'' of
595 Types.OpFail Types.FailCPU -> True
597 where _types = (node::Node.Node, inst::Instance.Instance)
598 inst' = setInstanceSmallerThanNode node inst
599 inst'' = inst' { Instance.vcpus = Instance.vcpus inst }
601 -- | Check that an instance add with too high memory or disk will be rejected
602 prop_Node_addSec node inst pdx =
603 (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
604 Instance.dsk inst >= Node.fDsk node) &&
605 not (Node.failN1 node)
606 ==> isFailure (Node.addSec node inst pdx)
607 where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
609 newtype SmallRatio = SmallRatio Double deriving Show
610 instance Arbitrary SmallRatio where
613 return $ SmallRatio v
615 -- | Check mdsk setting
616 prop_Node_setMdsk node mx =
617 Node.loDsk node' >= 0 &&
618 fromIntegral (Node.loDsk node') <= Node.tDsk node &&
619 Node.availDisk node' >= 0 &&
620 Node.availDisk node' <= Node.fDsk node' &&
621 fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
622 Node.mDsk node' == mx'
623 where _types = (node::Node.Node, mx::SmallRatio)
624 node' = Node.setMdsk node mx'
628 prop_Node_tagMaps_idempotent tags =
629 Node.delTags (Node.addTags m tags) tags == m
630 where m = Data.Map.empty
632 prop_Node_tagMaps_reject tags =
634 any (\t -> Node.rejectAddTags m [t]) tags
635 where m = Node.addTags Data.Map.empty tags
637 prop_Node_showField node =
638 forAll (elements Node.defaultFields) $ \ field ->
639 fst (Node.showHeader field) /= Types.unknownField &&
640 Node.showField node field /= Types.unknownField
643 prop_Node_computeGroups nodes =
644 let ng = Node.computeGroups nodes
645 onlyuuid = map fst ng
646 in length nodes == sum (map (length . snd) ng) &&
647 all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
648 length (nub onlyuuid) == length onlyuuid &&
649 if null nodes then True else not (null ng)
652 [ run prop_Node_setAlias
653 , run prop_Node_setOffline
654 , run prop_Node_setMcpu
655 , run prop_Node_setXmem
656 , run prop_Node_addPriFM
657 , run prop_Node_addPriFD
658 , run prop_Node_addPriFC
659 , run prop_Node_addSec
660 , run prop_Node_setMdsk
661 , run prop_Node_tagMaps_idempotent
662 , run prop_Node_tagMaps_reject
663 , run prop_Node_showField
664 , run prop_Node_computeGroups
670 -- | Check that the cluster score is close to zero for a homogeneous cluster
671 prop_Score_Zero node count =
672 (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
673 (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
674 let fn = Node.buildPeers node Container.empty
675 nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
676 nl = Container.fromList nlst
677 score = Cluster.compCV nl
678 -- we can't say == 0 here as the floating point errors accumulate;
679 -- this should be much lower than the default score in CLI.hs
682 -- | Check that cluster stats are sane
683 prop_CStats_sane node count =
684 (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
685 (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
686 let fn = Node.buildPeers node Container.empty
687 nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
688 nl = Container.fromList nlst
689 cstats = Cluster.totalResources nl
690 in Cluster.csAdsk cstats >= 0 &&
691 Cluster.csAdsk cstats <= Cluster.csFdsk cstats
693 -- | Check that one instance is allocated correctly, without
695 prop_ClusterAlloc_sane node inst =
696 forAll (choose (5, 20)) $ \count ->
697 not (Node.offline node)
698 && not (Node.failN1 node)
699 && Node.availDisk node > 0
700 && Node.availMem node > 0
702 let nl = makeSmallCluster node count
704 inst' = setInstanceSmallerThanNode node inst
705 in case Cluster.genAllocNodes defGroupList nl 2 True >>=
706 Cluster.tryAlloc nl il inst' of
709 case Cluster.asSolutions as of
711 (xnl, xi, _, cv):[] ->
712 let il' = Container.add (Instance.idx xi) xi il
713 tbl = Cluster.Table xnl il' cv []
714 in not (canBalance tbl True False)
717 -- | Checks that on a 2-5 node cluster, we can allocate a random
718 -- instance spec via tiered allocation (whatever the original instance
719 -- spec), on either one or two nodes
720 prop_ClusterCanTieredAlloc node inst =
721 forAll (choose (2, 5)) $ \count ->
722 forAll (choose (1, 2)) $ \rqnodes ->
723 not (Node.offline node)
724 && not (Node.failN1 node)
727 let nl = makeSmallCluster node count
729 allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
730 in case allocnodes >>= \allocnodes' ->
731 Cluster.tieredAlloc nl il inst allocnodes' [] [] of
733 Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
734 IntMap.size il' == length ixes &&
735 length ixes == length cstats
737 -- | Checks that on a 4-8 node cluster, once we allocate an instance,
738 -- we can also evacuate it
739 prop_ClusterAllocEvac node inst =
740 forAll (choose (4, 8)) $ \count ->
741 not (Node.offline node)
742 && not (Node.failN1 node)
745 let nl = makeSmallCluster node count
747 inst' = setInstanceSmallerThanNode node inst
748 in case Cluster.genAllocNodes defGroupList nl 2 True >>=
749 Cluster.tryAlloc nl il inst' of
752 case Cluster.asSolutions as of
754 (xnl, xi, _, _):[] ->
755 let sdx = Instance.sNode xi
756 il' = Container.add (Instance.idx xi) xi il
757 in case Cluster.tryEvac xnl il' [Instance.idx xi] [sdx] of
762 -- | Check that allocating multiple instances on a cluster, then
763 -- adding an empty node, results in a valid rebalance
764 prop_ClusterAllocBalance node =
765 forAll (choose (3, 5)) $ \count ->
766 not (Node.offline node)
767 && not (Node.failN1 node)
769 && not (isNodeBig node 8)
771 let nl = makeSmallCluster node count
772 (hnode, nl') = IntMap.deleteFindMax nl
774 allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
775 i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
776 in case allocnodes >>= \allocnodes' ->
777 Cluster.iterateAlloc nl' il i_templ allocnodes' [] [] of
779 Types.Ok (_, xnl, il', _, _) ->
780 let ynl = Container.add (Node.idx hnode) hnode xnl
781 cv = Cluster.compCV ynl
782 tbl = Cluster.Table ynl il' cv []
783 in canBalance tbl True False
785 -- | Checks consistency
786 prop_ClusterCheckConsistency node inst =
787 let nl = makeSmallCluster node 3
788 [node1, node2, node3] = Container.elems nl
789 node3' = node3 { Node.group = 1 }
790 nl' = Container.add (Node.idx node3') node3' nl
791 inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
792 inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
793 inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
794 ccheck = Cluster.findSplitInstances nl' . Container.fromList
795 in null (ccheck [(0, inst1)]) &&
796 null (ccheck [(0, inst2)]) &&
797 (not . null $ ccheck [(0, inst3)])
799 -- For now, we only test that we don't lose instances during the split
800 prop_ClusterSplitCluster node inst =
801 forAll (choose (0, 100)) $ \icnt ->
802 let nl = makeSmallCluster node 2
803 (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
804 (nl, Container.empty) [1..icnt]
805 gni = Cluster.splitCluster nl' il'
806 in sum (map (Container.size . snd . snd) gni) == icnt &&
807 all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
808 (Container.elems nl'')) gni
811 [ run prop_Score_Zero
812 , run prop_CStats_sane
813 , run prop_ClusterAlloc_sane
814 , run prop_ClusterCanTieredAlloc
815 , run prop_ClusterAllocEvac
816 , run prop_ClusterAllocBalance
817 , run prop_ClusterCheckConsistency
818 , run prop_ClusterSplitCluster
821 -- | Check that opcode serialization is idempotent
823 prop_OpCodes_serialization op =
824 case J.readJSON (J.showJSON op) of
826 J.Ok op' -> op == op'
827 where _types = op::OpCodes.OpCode
830 [ run prop_OpCodes_serialization
833 -- | Check that (queued) job\/opcode status serialization is idempotent
834 prop_OpStatus_serialization os =
835 case J.readJSON (J.showJSON os) of
837 J.Ok os' -> os == os'
838 where _types = os::Jobs.OpStatus
840 prop_JobStatus_serialization js =
841 case J.readJSON (J.showJSON js) of
843 J.Ok js' -> js == js'
844 where _types = js::Jobs.JobStatus
847 [ run prop_OpStatus_serialization
848 , run prop_JobStatus_serialization
853 prop_Loader_lookupNode ktn inst node =
854 Loader.lookupNode nl inst node == Data.Map.lookup node nl
855 where nl = Data.Map.fromList ktn
857 prop_Loader_lookupInstance kti inst =
858 Loader.lookupInstance il inst == Data.Map.lookup inst il
859 where il = Data.Map.fromList kti
861 prop_Loader_assignIndices nodes =
862 Data.Map.size nassoc == length nodes &&
863 Container.size kt == length nodes &&
865 then maximum (IntMap.keys kt) == length nodes - 1
867 where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
870 -- | Checks that the number of primary instances recorded on the nodes
872 prop_Loader_mergeData ns =
873 let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
874 in case Loader.mergeData [] [] []
875 (Loader.emptyCluster {Loader.cdNodes = na}) of
877 Types.Ok (Loader.ClusterData _ nl il _) ->
878 let nodes = Container.elems nl
879 instances = Container.elems il
880 in (sum . map (length . Node.pList)) nodes == 0 &&
884 [ run prop_Loader_lookupNode
885 , run prop_Loader_lookupInstance
886 , run prop_Loader_assignIndices
887 , run prop_Loader_mergeData