1 {-| Unittests for ganeti-htools
7 Copyright (C) 2009, 2010, 2011 Google Inc.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26 module Ganeti.HTools.QC
39 import Test.QuickCheck
40 import Test.QuickCheck.Batch
41 import Data.List (findIndex, intercalate, nub)
44 import qualified Text.JSON as J
45 import qualified Data.Map
46 import qualified Data.IntMap as IntMap
47 import qualified Ganeti.OpCodes as OpCodes
48 import qualified Ganeti.Jobs as Jobs
49 import qualified Ganeti.Luxi
50 import qualified Ganeti.HTools.CLI as CLI
51 import qualified Ganeti.HTools.Cluster as Cluster
52 import qualified Ganeti.HTools.Container as Container
53 import qualified Ganeti.HTools.ExtLoader
54 import qualified Ganeti.HTools.IAlloc as IAlloc
55 import qualified Ganeti.HTools.Instance as Instance
56 import qualified Ganeti.HTools.Loader as Loader
57 import qualified Ganeti.HTools.Luxi
58 import qualified Ganeti.HTools.Node as Node
59 import qualified Ganeti.HTools.Group as Group
60 import qualified Ganeti.HTools.PeerMap as PeerMap
61 import qualified Ganeti.HTools.Rapi
62 import qualified Ganeti.HTools.Simu
63 import qualified Ganeti.HTools.Text as Text
64 import qualified Ganeti.HTools.Types as Types
65 import qualified Ganeti.HTools.Utils as Utils
66 import qualified Ganeti.HTools.Version
70 -- | Maximum memory (1TiB, somewhat random value)
74 -- | Maximum disk (8TiB, somewhat random value)
76 maxDsk = 1024 * 1024 * 8
78 -- | Max CPUs (1024, somewhat random value)
82 defGroup :: Group.Group
83 defGroup = flip Group.setIdx 0 $
84 Group.create "default" Utils.defaultGroupID
87 defGroupList :: Group.List
88 defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
90 defGroupAssoc :: Data.Map.Map String Types.Gdx
91 defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
95 -- | Simple checker for whether OpResult is fail or pass
96 isFailure :: Types.OpResult a -> Bool
97 isFailure (Types.OpFail _) = True
100 -- | Update an instance to be smaller than a node
101 setInstanceSmallerThanNode node inst =
102 inst { Instance.mem = Node.availMem node `div` 2
103 , Instance.dsk = Node.availDisk node `div` 2
104 , Instance.vcpus = Node.availCpu node `div` 2
107 -- | Create an instance given its spec
108 createInstance mem dsk vcpus =
109 Instance.create "inst-unnamed" mem dsk vcpus "running" [] (-1) (-1)
111 -- | Create a small cluster by repeating a node spec
112 makeSmallCluster :: Node.Node -> Int -> Node.List
113 makeSmallCluster node count =
114 let fn = Node.buildPeers node Container.empty
115 namelst = map (\n -> (Node.name n, n)) (replicate count fn)
116 (_, nlst) = Loader.assignIndices namelst
119 -- | Checks if a node is "big" enough
120 isNodeBig :: Node.Node -> Int -> Bool
121 isNodeBig node size = Node.availDisk node > size * Types.unitDsk
122 && Node.availMem node > size * Types.unitMem
123 && Node.availCpu node > size * Types.unitCpu
125 canBalance :: Cluster.Table -> Bool -> Bool -> Bool
126 canBalance tbl dm evac = isJust $ Cluster.tryBalance tbl dm evac 0 0
128 -- | Assigns a new fresh instance to a cluster; this is not
129 -- allocation, so no resource checks are done
130 assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
131 Types.Idx -> Types.Idx ->
132 (Node.List, Instance.List)
133 assignInstance nl il inst pdx sdx =
134 let pnode = Container.find pdx nl
135 snode = Container.find sdx nl
136 maxiidx = if Container.null il
138 else fst (Container.findMax il) + 1
139 inst' = inst { Instance.idx = maxiidx,
140 Instance.pNode = pdx, Instance.sNode = sdx }
141 pnode' = Node.setPri pnode inst'
142 snode' = Node.setSec snode inst'
143 nl' = Container.addTwo pdx pnode' sdx snode' nl
144 il' = Container.add maxiidx inst' il
147 -- * Arbitrary instances
149 -- copied from the introduction to quickcheck
150 instance Arbitrary Char where
151 arbitrary = choose ('\32', '\128')
153 newtype DNSChar = DNSChar { dnsGetChar::Char }
154 instance Arbitrary DNSChar where
156 x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
159 getName :: Gen String
162 dn <- vector n::Gen [DNSChar]
163 return (map dnsGetChar dn)
166 getFQDN :: Gen String
169 ncomps <- choose (1, 4)
170 frest <- vector ncomps::Gen [[DNSChar]]
171 let frest' = map (map dnsGetChar) frest
172 return (felem ++ "." ++ intercalate "." frest')
174 -- let's generate a random instance
175 instance Arbitrary Instance.Instance where
178 mem <- choose (0, maxMem)
179 dsk <- choose (0, maxDsk)
180 run_st <- elements ["ERROR_up", "ERROR_down", "ADMIN_down"
181 , "ERROR_nodedown", "ERROR_nodeoffline"
183 , "no_such_status1", "no_such_status2"]
186 vcpus <- choose (0, maxCpu)
187 return $ Instance.create name mem dsk vcpus run_st [] pn sn
190 instance Arbitrary Node.Node where
193 mem_t <- choose (0, maxMem)
194 mem_f <- choose (0, mem_t)
195 mem_n <- choose (0, mem_t - mem_f)
196 dsk_t <- choose (0, maxDsk)
197 dsk_f <- choose (0, dsk_t)
198 cpu_t <- choose (0, maxCpu)
200 let n = Node.create name (fromIntegral mem_t) mem_n mem_f
201 (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl
203 n' = Node.buildPeers n Container.empty
207 instance Arbitrary OpCodes.ReplaceDisksMode where
208 arbitrary = elements [ OpCodes.ReplaceOnPrimary
209 , OpCodes.ReplaceOnSecondary
210 , OpCodes.ReplaceNewSecondary
211 , OpCodes.ReplaceAuto
214 instance Arbitrary OpCodes.OpCode where
216 op_id <- elements [ "OP_TEST_DELAY"
217 , "OP_INSTANCE_REPLACE_DISKS"
218 , "OP_INSTANCE_FAILOVER"
219 , "OP_INSTANCE_MIGRATE"
223 liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
224 "OP_INSTANCE_REPLACE_DISKS" ->
225 liftM5 OpCodes.OpReplaceDisks arbitrary arbitrary
226 arbitrary arbitrary arbitrary
227 "OP_INSTANCE_FAILOVER" ->
228 liftM2 OpCodes.OpFailoverInstance arbitrary arbitrary
229 "OP_INSTANCE_MIGRATE" ->
230 liftM4 OpCodes.OpMigrateInstance arbitrary arbitrary arbitrary
232 _ -> fail "Wrong opcode")
234 instance Arbitrary Jobs.OpStatus where
235 arbitrary = elements [minBound..maxBound]
237 instance Arbitrary Jobs.JobStatus where
238 arbitrary = elements [minBound..maxBound]
242 -- If the list is not just an empty element, and if the elements do
243 -- not contain commas, then join+split should be idepotent
244 prop_Utils_commaJoinSplit lst = lst /= [""] &&
245 all (not . elem ',') lst ==>
246 Utils.sepSplit ',' (Utils.commaJoin lst) == lst
247 -- Split and join should always be idempotent
248 prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
251 [ run prop_Utils_commaJoinSplit
252 , run prop_Utils_commaSplitJoin
255 -- | Make sure add is idempotent
256 prop_PeerMap_addIdempotent pmap key em =
257 fn puniq == fn (fn puniq)
258 where _types = (pmap::PeerMap.PeerMap,
259 key::PeerMap.Key, em::PeerMap.Elem)
260 fn = PeerMap.add key em
261 puniq = PeerMap.accumArray const pmap
263 -- | Make sure remove is idempotent
264 prop_PeerMap_removeIdempotent pmap key =
265 fn puniq == fn (fn puniq)
266 where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
267 fn = PeerMap.remove key
268 puniq = PeerMap.accumArray const pmap
270 -- | Make sure a missing item returns 0
271 prop_PeerMap_findMissing pmap key =
272 PeerMap.find key (PeerMap.remove key puniq) == 0
273 where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
274 puniq = PeerMap.accumArray const pmap
276 -- | Make sure an added item is found
277 prop_PeerMap_addFind pmap key em =
278 PeerMap.find key (PeerMap.add key em puniq) == em
279 where _types = (pmap::PeerMap.PeerMap,
280 key::PeerMap.Key, em::PeerMap.Elem)
281 puniq = PeerMap.accumArray const pmap
283 -- | Manual check that maxElem returns the maximum indeed, or 0 for null
284 prop_PeerMap_maxElem pmap =
285 PeerMap.maxElem puniq == if null puniq then 0
286 else (maximum . snd . unzip) puniq
287 where _types = pmap::PeerMap.PeerMap
288 puniq = PeerMap.accumArray const pmap
291 [ run prop_PeerMap_addIdempotent
292 , run prop_PeerMap_removeIdempotent
293 , run prop_PeerMap_maxElem
294 , run prop_PeerMap_addFind
295 , run prop_PeerMap_findMissing
300 prop_Container_addTwo cdata i1 i2 =
301 fn i1 i2 cont == fn i2 i1 cont &&
302 fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
303 where _types = (cdata::[Int],
305 cont = foldl (\c x -> Container.add x x c) Container.empty cdata
306 fn x1 x2 = Container.addTwo x1 x1 x2 x2
308 prop_Container_nameOf node =
309 let nl = makeSmallCluster node 1
310 fnode = head (Container.elems nl)
311 in Container.nameOf nl (Node.idx fnode) == Node.name fnode
313 -- We test that in a cluster, given a random node, we can find it by
314 -- its name and alias, as long as all names and aliases are unique,
315 -- and that we fail to find a non-existing name
316 prop_Container_findByName node othername =
317 forAll (choose (1, 20)) $ \ cnt ->
318 forAll (choose (0, cnt - 1)) $ \ fidx ->
319 forAll (vector cnt) $ \ names ->
320 (length . nub) (map fst names ++ map snd names) ==
322 not (othername `elem` (map fst names ++ map snd names)) ==>
323 let nl = makeSmallCluster node cnt
324 nodes = Container.elems nl
325 nodes' = map (\((name, alias), nn) -> (Node.idx nn,
326 nn { Node.name = name,
327 Node.alias = alias }))
329 nl' = Container.fromList nodes'
330 target = snd (nodes' !! fidx)
331 in Container.findByName nl' (Node.name target) == Just target &&
332 Container.findByName nl' (Node.alias target) == Just target &&
333 Container.findByName nl' othername == Nothing
336 [ run prop_Container_addTwo
337 , run prop_Container_nameOf
338 , run prop_Container_findByName
341 -- Simple instance tests, we only have setter/getters
343 prop_Instance_creat inst =
344 Instance.name inst == Instance.alias inst
346 prop_Instance_setIdx inst idx =
347 Instance.idx (Instance.setIdx inst idx) == idx
348 where _types = (inst::Instance.Instance, idx::Types.Idx)
350 prop_Instance_setName inst name =
351 Instance.name newinst == name &&
352 Instance.alias newinst == name
353 where _types = (inst::Instance.Instance, name::String)
354 newinst = Instance.setName inst name
356 prop_Instance_setAlias inst name =
357 Instance.name newinst == Instance.name inst &&
358 Instance.alias newinst == name
359 where _types = (inst::Instance.Instance, name::String)
360 newinst = Instance.setAlias inst name
362 prop_Instance_setPri inst pdx =
363 Instance.pNode (Instance.setPri inst pdx) == pdx
364 where _types = (inst::Instance.Instance, pdx::Types.Ndx)
366 prop_Instance_setSec inst sdx =
367 Instance.sNode (Instance.setSec inst sdx) == sdx
368 where _types = (inst::Instance.Instance, sdx::Types.Ndx)
370 prop_Instance_setBoth inst pdx sdx =
371 Instance.pNode si == pdx && Instance.sNode si == sdx
372 where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
373 si = Instance.setBoth inst pdx sdx
375 prop_Instance_runStatus_True inst =
376 let run_st = Instance.running inst
377 run_tx = Instance.runSt inst
379 run_tx `elem` Instance.runningStates ==> run_st
381 prop_Instance_runStatus_False inst =
382 let run_st = Instance.running inst
383 run_tx = Instance.runSt inst
385 run_tx `notElem` Instance.runningStates ==> not run_st
387 prop_Instance_shrinkMG inst =
388 Instance.mem inst >= 2 * Types.unitMem ==>
389 case Instance.shrinkByType inst Types.FailMem of
391 Instance.mem inst' == Instance.mem inst - Types.unitMem
394 prop_Instance_shrinkMF inst =
395 Instance.mem inst < 2 * Types.unitMem ==>
396 Types.isBad $ Instance.shrinkByType inst Types.FailMem
398 prop_Instance_shrinkCG inst =
399 Instance.vcpus inst >= 2 * Types.unitCpu ==>
400 case Instance.shrinkByType inst Types.FailCPU of
402 Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
405 prop_Instance_shrinkCF inst =
406 Instance.vcpus inst < 2 * Types.unitCpu ==>
407 Types.isBad $ Instance.shrinkByType inst Types.FailCPU
409 prop_Instance_shrinkDG inst =
410 Instance.dsk inst >= 2 * Types.unitDsk ==>
411 case Instance.shrinkByType inst Types.FailDisk of
413 Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
416 prop_Instance_shrinkDF inst =
417 Instance.dsk inst < 2 * Types.unitDsk ==>
418 Types.isBad $ Instance.shrinkByType inst Types.FailDisk
420 prop_Instance_setMovable inst m =
421 Instance.movable inst' == m
422 where inst' = Instance.setMovable inst m
425 [ run prop_Instance_creat
426 , run prop_Instance_setIdx
427 , run prop_Instance_setName
428 , run prop_Instance_setAlias
429 , run prop_Instance_setPri
430 , run prop_Instance_setSec
431 , run prop_Instance_setBoth
432 , run prop_Instance_runStatus_True
433 , run prop_Instance_runStatus_False
434 , run prop_Instance_shrinkMG
435 , run prop_Instance_shrinkMF
436 , run prop_Instance_shrinkCG
437 , run prop_Instance_shrinkCF
438 , run prop_Instance_shrinkDG
439 , run prop_Instance_shrinkDF
440 , run prop_Instance_setMovable
443 -- Instance text loader tests
445 prop_Text_Load_Instance name mem dsk vcpus status pnode snode pdx sdx =
446 not (null pnode) && pdx >= 0 && sdx >= 0 ==>
447 let vcpus_s = show vcpus
455 else [(pnode, pdx), (snode, rsdx)]
456 nl = Data.Map.fromList ndx
458 inst = Text.loadInst nl
459 [name, mem_s, dsk_s, vcpus_s, status, pnode, snode, tags]::
460 Maybe (String, Instance.Instance)
461 fail1 = Text.loadInst nl
462 [name, mem_s, dsk_s, vcpus_s, status, pnode, pnode, tags]::
463 Maybe (String, Instance.Instance)
464 _types = ( name::String, mem::Int, dsk::Int
465 , vcpus::Int, status::String
466 , pnode::String, snode::String
467 , pdx::Types.Ndx, sdx::Types.Ndx)
472 (Instance.name i == name &&
473 Instance.vcpus i == vcpus &&
474 Instance.mem i == mem &&
475 Instance.pNode i == pdx &&
476 Instance.sNode i == (if null snode
477 then Node.noSecondary
481 prop_Text_Load_InstanceFail ktn fields =
482 length fields /= 8 ==> isNothing $ Text.loadInst nl fields
483 where nl = Data.Map.fromList ktn
485 prop_Text_Load_Node name tm nm fm td fd tc fo =
486 let conv v = if v < 0
498 any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
499 gid = Group.uuid defGroup
500 in case Text.loadNode defGroupAssoc
501 [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
503 Just (name', node) ->
505 then Node.offline node
506 else Node.name node == name' && name' == name &&
507 Node.alias node == name &&
508 Node.tMem node == fromIntegral tm &&
509 Node.nMem node == nm &&
510 Node.fMem node == fm &&
511 Node.tDsk node == fromIntegral td &&
512 Node.fDsk node == fd &&
513 Node.tCpu node == fromIntegral tc
515 prop_Text_Load_NodeFail fields =
516 length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
518 prop_Text_NodeLSIdempotent node =
519 (Text.loadNode defGroupAssoc.
520 Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
521 Just (Node.name n, n)
522 -- override failN1 to what loadNode returns by default
523 where n = node { Node.failN1 = True, Node.offline = False }
526 [ run prop_Text_Load_Instance
527 , run prop_Text_Load_InstanceFail
528 , run prop_Text_Load_Node
529 , run prop_Text_Load_NodeFail
530 , run prop_Text_NodeLSIdempotent
535 prop_Node_setAlias node name =
536 Node.name newnode == Node.name node &&
537 Node.alias newnode == name
538 where _types = (node::Node.Node, name::String)
539 newnode = Node.setAlias node name
541 prop_Node_setOffline node status =
542 Node.offline newnode == status
543 where newnode = Node.setOffline node status
545 prop_Node_setXmem node xm =
546 Node.xMem newnode == xm
547 where newnode = Node.setXmem node xm
549 prop_Node_setMcpu node mc =
550 Node.mCpu newnode == mc
551 where newnode = Node.setMcpu node mc
553 -- | Check that an instance add with too high memory or disk will be rejected
554 prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
555 not (Node.failN1 node)
557 case Node.addPri node inst'' of
558 Types.OpFail Types.FailMem -> True
560 where _types = (node::Node.Node, inst::Instance.Instance)
561 inst' = setInstanceSmallerThanNode node inst
562 inst'' = inst' { Instance.mem = Instance.mem inst }
564 prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
565 not (Node.failN1 node)
567 case Node.addPri node inst'' of
568 Types.OpFail Types.FailDisk -> True
570 where _types = (node::Node.Node, inst::Instance.Instance)
571 inst' = setInstanceSmallerThanNode node inst
572 inst'' = inst' { Instance.dsk = Instance.dsk inst }
574 prop_Node_addPriFC node inst = Instance.vcpus inst > Node.availCpu node &&
575 not (Node.failN1 node)
577 case Node.addPri node inst'' of
578 Types.OpFail Types.FailCPU -> True
580 where _types = (node::Node.Node, inst::Instance.Instance)
581 inst' = setInstanceSmallerThanNode node inst
582 inst'' = inst' { Instance.vcpus = Instance.vcpus inst }
584 -- | Check that an instance add with too high memory or disk will be rejected
585 prop_Node_addSec node inst pdx =
586 (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
587 Instance.dsk inst >= Node.fDsk node) &&
588 not (Node.failN1 node)
589 ==> isFailure (Node.addSec node inst pdx)
590 where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
592 newtype SmallRatio = SmallRatio Double deriving Show
593 instance Arbitrary SmallRatio where
596 return $ SmallRatio v
598 -- | Check mdsk setting
599 prop_Node_setMdsk node mx =
600 Node.loDsk node' >= 0 &&
601 fromIntegral (Node.loDsk node') <= Node.tDsk node &&
602 Node.availDisk node' >= 0 &&
603 Node.availDisk node' <= Node.fDsk node' &&
604 fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
605 Node.mDsk node' == mx'
606 where _types = (node::Node.Node, mx::SmallRatio)
607 node' = Node.setMdsk node mx'
611 prop_Node_tagMaps_idempotent tags =
612 Node.delTags (Node.addTags m tags) tags == m
613 where m = Data.Map.empty
615 prop_Node_tagMaps_reject tags =
617 any (\t -> Node.rejectAddTags m [t]) tags
618 where m = Node.addTags Data.Map.empty tags
620 prop_Node_showField node =
621 forAll (elements Node.defaultFields) $ \ field ->
622 fst (Node.showHeader field) /= Types.unknownField &&
623 Node.showField node field /= Types.unknownField
626 prop_Node_computeGroups nodes =
627 let ng = Node.computeGroups nodes
628 onlyuuid = map fst ng
629 in length nodes == sum (map (length . snd) ng) &&
630 all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
631 length (nub onlyuuid) == length onlyuuid &&
632 if null nodes then True else not (null ng)
635 [ run prop_Node_setAlias
636 , run prop_Node_setOffline
637 , run prop_Node_setMcpu
638 , run prop_Node_setXmem
639 , run prop_Node_addPriFM
640 , run prop_Node_addPriFD
641 , run prop_Node_addPriFC
642 , run prop_Node_addSec
643 , run prop_Node_setMdsk
644 , run prop_Node_tagMaps_idempotent
645 , run prop_Node_tagMaps_reject
646 , run prop_Node_showField
647 , run prop_Node_computeGroups
653 -- | Check that the cluster score is close to zero for a homogeneous cluster
654 prop_Score_Zero node count =
655 (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
656 (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
657 let fn = Node.buildPeers node Container.empty
658 nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
659 nl = Container.fromList nlst
660 score = Cluster.compCV nl
661 -- we can't say == 0 here as the floating point errors accumulate;
662 -- this should be much lower than the default score in CLI.hs
665 -- | Check that cluster stats are sane
666 prop_CStats_sane node count =
667 (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
668 (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
669 let fn = Node.buildPeers node Container.empty
670 nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
671 nl = Container.fromList nlst
672 cstats = Cluster.totalResources nl
673 in Cluster.csAdsk cstats >= 0 &&
674 Cluster.csAdsk cstats <= Cluster.csFdsk cstats
676 -- | Check that one instance is allocated correctly, without
678 prop_ClusterAlloc_sane node inst =
679 forAll (choose (5, 20)) $ \count ->
680 not (Node.offline node)
681 && not (Node.failN1 node)
682 && Node.availDisk node > 0
683 && Node.availMem node > 0
685 let nl = makeSmallCluster node count
687 inst' = setInstanceSmallerThanNode node inst
688 in case Cluster.genAllocNodes defGroupList nl 2 True >>=
689 Cluster.tryAlloc nl il inst' of
692 case Cluster.asSolutions as of
694 (xnl, xi, _, cv):[] ->
695 let il' = Container.add (Instance.idx xi) xi il
696 tbl = Cluster.Table xnl il' cv []
697 in not (canBalance tbl True False)
700 -- | Checks that on a 2-5 node cluster, we can allocate a random
701 -- instance spec via tiered allocation (whatever the original instance
702 -- spec), on either one or two nodes
703 prop_ClusterCanTieredAlloc node inst =
704 forAll (choose (2, 5)) $ \count ->
705 forAll (choose (1, 2)) $ \rqnodes ->
706 not (Node.offline node)
707 && not (Node.failN1 node)
710 let nl = makeSmallCluster node count
712 allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
713 in case allocnodes >>= \allocnodes' ->
714 Cluster.tieredAlloc nl il inst allocnodes' [] [] of
716 Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
717 IntMap.size il' == length ixes &&
718 length ixes == length cstats
720 -- | Checks that on a 4-8 node cluster, once we allocate an instance,
721 -- we can also evacuate it
722 prop_ClusterAllocEvac node inst =
723 forAll (choose (4, 8)) $ \count ->
724 not (Node.offline node)
725 && not (Node.failN1 node)
728 let nl = makeSmallCluster node count
730 inst' = setInstanceSmallerThanNode node inst
731 in case Cluster.genAllocNodes defGroupList nl 2 True >>=
732 Cluster.tryAlloc nl il inst' of
735 case Cluster.asSolutions as of
737 (xnl, xi, _, _):[] ->
738 let sdx = Instance.sNode xi
739 il' = Container.add (Instance.idx xi) xi il
740 in case Cluster.tryEvac xnl il' [Instance.idx xi] [sdx] of
745 -- | Check that allocating multiple instances on a cluster, then
746 -- adding an empty node, results in a valid rebalance
747 prop_ClusterAllocBalance node =
748 forAll (choose (3, 5)) $ \count ->
749 not (Node.offline node)
750 && not (Node.failN1 node)
752 && not (isNodeBig node 8)
754 let nl = makeSmallCluster node count
755 (hnode, nl') = IntMap.deleteFindMax nl
757 allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
758 i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
759 in case allocnodes >>= \allocnodes' ->
760 Cluster.iterateAlloc nl' il i_templ allocnodes' [] [] of
762 Types.Ok (_, xnl, il', _, _) ->
763 let ynl = Container.add (Node.idx hnode) hnode xnl
764 cv = Cluster.compCV ynl
765 tbl = Cluster.Table ynl il' cv []
766 in canBalance tbl True False
768 -- | Checks consistency
769 prop_ClusterCheckConsistency node inst =
770 let nl = makeSmallCluster node 3
771 [node1, node2, node3] = Container.elems nl
772 node3' = node3 { Node.group = 1 }
773 nl' = Container.add (Node.idx node3') node3' nl
774 inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
775 inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
776 inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
777 ccheck = Cluster.findSplitInstances nl' . Container.fromList
778 in null (ccheck [(0, inst1)]) &&
779 null (ccheck [(0, inst2)]) &&
780 (not . null $ ccheck [(0, inst3)])
782 -- For now, we only test that we don't lose instances during the split
783 prop_ClusterSplitCluster node inst =
784 forAll (choose (0, 100)) $ \icnt ->
785 let nl = makeSmallCluster node 2
786 (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
787 (nl, Container.empty) [1..icnt]
788 gni = Cluster.splitCluster nl' il'
789 in sum (map (Container.size . snd . snd) gni) == icnt &&
790 all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
791 (Container.elems nl'')) gni
794 [ run prop_Score_Zero
795 , run prop_CStats_sane
796 , run prop_ClusterAlloc_sane
797 , run prop_ClusterCanTieredAlloc
798 , run prop_ClusterAllocEvac
799 , run prop_ClusterAllocBalance
800 , run prop_ClusterCheckConsistency
801 , run prop_ClusterSplitCluster
804 -- | Check that opcode serialization is idempotent
806 prop_OpCodes_serialization op =
807 case J.readJSON (J.showJSON op) of
809 J.Ok op' -> op == op'
810 where _types = op::OpCodes.OpCode
813 [ run prop_OpCodes_serialization
816 -- | Check that (queued) job\/opcode status serialization is idempotent
817 prop_OpStatus_serialization os =
818 case J.readJSON (J.showJSON os) of
820 J.Ok os' -> os == os'
821 where _types = os::Jobs.OpStatus
823 prop_JobStatus_serialization js =
824 case J.readJSON (J.showJSON js) of
826 J.Ok js' -> js == js'
827 where _types = js::Jobs.JobStatus
830 [ run prop_OpStatus_serialization
831 , run prop_JobStatus_serialization
836 prop_Loader_lookupNode ktn inst node =
837 Loader.lookupNode nl inst node == Data.Map.lookup node nl
838 where nl = Data.Map.fromList ktn
840 prop_Loader_lookupInstance kti inst =
841 Loader.lookupInstance il inst == Data.Map.lookup inst il
842 where il = Data.Map.fromList kti
844 prop_Loader_assignIndices nodes =
845 Data.Map.size nassoc == length nodes &&
846 Container.size kt == length nodes &&
848 then maximum (IntMap.keys kt) == length nodes - 1
850 where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
853 -- | Checks that the number of primary instances recorded on the nodes
855 prop_Loader_mergeData ns =
856 let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
857 in case Loader.mergeData [] [] []
858 (Loader.emptyCluster {Loader.cdNodes = na}) of
860 Types.Ok (Loader.ClusterData _ nl il _) ->
861 let nodes = Container.elems nl
862 instances = Container.elems il
863 in (sum . map (length . Node.pList)) nodes == 0 &&
867 [ run prop_Loader_lookupNode
868 , run prop_Loader_lookupInstance
869 , run prop_Loader_assignIndices
870 , run prop_Loader_mergeData