1 {-| Unittests for ganeti-htools
7 Copyright (C) 2009, 2010 Google Inc.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26 module Ganeti.HTools.QC
39 import Test.QuickCheck
40 import Test.QuickCheck.Batch
41 import Data.List (findIndex, intercalate, nub)
44 import qualified Text.JSON as J
45 import qualified Data.Map
46 import qualified Data.IntMap as IntMap
47 import qualified Ganeti.OpCodes as OpCodes
48 import qualified Ganeti.Jobs as Jobs
49 import qualified Ganeti.Luxi
50 import qualified Ganeti.HTools.CLI as CLI
51 import qualified Ganeti.HTools.Cluster as Cluster
52 import qualified Ganeti.HTools.Container as Container
53 import qualified Ganeti.HTools.ExtLoader
54 import qualified Ganeti.HTools.IAlloc as IAlloc
55 import qualified Ganeti.HTools.Instance as Instance
56 import qualified Ganeti.HTools.Loader as Loader
57 import qualified Ganeti.HTools.Luxi
58 import qualified Ganeti.HTools.Node as Node
59 import qualified Ganeti.HTools.Group as Group
60 import qualified Ganeti.HTools.PeerMap as PeerMap
61 import qualified Ganeti.HTools.Rapi
62 import qualified Ganeti.HTools.Simu
63 import qualified Ganeti.HTools.Text as Text
64 import qualified Ganeti.HTools.Types as Types
65 import qualified Ganeti.HTools.Utils as Utils
66 import qualified Ganeti.HTools.Version
70 -- | Maximum memory (1TiB, somewhat random value)
74 -- | Maximum disk (8TiB, somewhat random value)
76 maxDsk = 1024 * 1024 * 8
78 -- | Max CPUs (1024, somewhat random value)
82 defGroup :: Group.Group
83 defGroup = flip Group.setIdx 0 $
84 Group.create "default" Utils.defaultGroupID
87 defGroupList :: Group.List
88 defGroupList = Container.fromAssocList [(Group.idx defGroup, defGroup)]
90 defGroupAssoc :: Data.Map.Map String Types.Gdx
91 defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
95 -- | Simple checker for whether OpResult is fail or pass
96 isFailure :: Types.OpResult a -> Bool
97 isFailure (Types.OpFail _) = True
100 -- | Update an instance to be smaller than a node
101 setInstanceSmallerThanNode node inst =
102 inst { Instance.mem = Node.availMem node `div` 2
103 , Instance.dsk = Node.availDisk node `div` 2
104 , Instance.vcpus = Node.availCpu node `div` 2
107 -- | Create an instance given its spec
108 createInstance mem dsk vcpus =
109 Instance.create "inst-unnamed" mem dsk vcpus "running" [] (-1) (-1)
111 -- | Create a small cluster by repeating a node spec
112 makeSmallCluster :: Node.Node -> Int -> Node.List
113 makeSmallCluster node count =
114 let fn = Node.buildPeers node Container.empty
115 namelst = map (\n -> (Node.name n, n)) (replicate count fn)
116 (_, nlst) = Loader.assignIndices namelst
119 -- | Checks if a node is "big" enough
120 isNodeBig :: Node.Node -> Int -> Bool
121 isNodeBig node size = Node.availDisk node > size * Types.unitDsk
122 && Node.availMem node > size * Types.unitMem
123 && Node.availCpu node > size * Types.unitCpu
125 canBalance :: Cluster.Table -> Bool -> Bool -> Bool
126 canBalance tbl dm evac = isJust $ Cluster.tryBalance tbl dm evac 0 0
128 -- | Assigns a new fresh instance to a cluster; this is not
129 -- allocation, so no resource checks are done
130 assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
131 Types.Idx -> Types.Idx ->
132 (Node.List, Instance.List)
133 assignInstance nl il inst pdx sdx =
134 let pnode = Container.find pdx nl
135 snode = Container.find sdx nl
136 maxiidx = if Container.null il
138 else fst (Container.findMax il) + 1
139 inst' = inst { Instance.idx = maxiidx,
140 Instance.pNode = pdx, Instance.sNode = sdx }
141 pnode' = Node.setPri pnode inst'
142 snode' = Node.setSec snode inst'
143 nl' = Container.addTwo pdx pnode' sdx snode' nl
144 il' = Container.add maxiidx inst' il
147 -- * Arbitrary instances
149 -- copied from the introduction to quickcheck
150 instance Arbitrary Char where
151 arbitrary = choose ('\32', '\128')
153 newtype DNSChar = DNSChar { dnsGetChar::Char }
154 instance Arbitrary DNSChar where
156 x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
159 getName :: Gen String
162 dn <- vector n::Gen [DNSChar]
163 return (map dnsGetChar dn)
166 getFQDN :: Gen String
169 ncomps <- choose (1, 4)
170 frest <- vector ncomps::Gen [[DNSChar]]
171 let frest' = map (map dnsGetChar) frest
172 return (felem ++ "." ++ intercalate "." frest')
174 -- let's generate a random instance
175 instance Arbitrary Instance.Instance where
178 mem <- choose (0, maxMem)
179 dsk <- choose (0, maxDsk)
180 run_st <- elements ["ERROR_up", "ERROR_down", "ADMIN_down"
181 , "ERROR_nodedown", "ERROR_nodeoffline"
183 , "no_such_status1", "no_such_status2"]
186 vcpus <- choose (0, maxCpu)
187 return $ Instance.create name mem dsk vcpus run_st [] pn sn
190 instance Arbitrary Node.Node where
193 mem_t <- choose (0, maxMem)
194 mem_f <- choose (0, mem_t)
195 mem_n <- choose (0, mem_t - mem_f)
196 dsk_t <- choose (0, maxDsk)
197 dsk_f <- choose (0, dsk_t)
198 cpu_t <- choose (0, maxCpu)
200 let n = Node.create name (fromIntegral mem_t) mem_n mem_f
201 (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl
203 n' = Node.buildPeers n Container.empty
207 instance Arbitrary OpCodes.ReplaceDisksMode where
208 arbitrary = elements [ OpCodes.ReplaceOnPrimary
209 , OpCodes.ReplaceOnSecondary
210 , OpCodes.ReplaceNewSecondary
211 , OpCodes.ReplaceAuto
214 instance Arbitrary OpCodes.OpCode where
216 op_id <- elements [ "OP_TEST_DELAY"
217 , "OP_INSTANCE_REPLACE_DISKS"
218 , "OP_INSTANCE_FAILOVER"
219 , "OP_INSTANCE_MIGRATE"
223 liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
224 "OP_INSTANCE_REPLACE_DISKS" ->
225 liftM5 OpCodes.OpReplaceDisks arbitrary arbitrary
226 arbitrary arbitrary arbitrary
227 "OP_INSTANCE_FAILOVER" ->
228 liftM2 OpCodes.OpFailoverInstance arbitrary arbitrary
229 "OP_INSTANCE_MIGRATE" ->
230 liftM3 OpCodes.OpMigrateInstance arbitrary arbitrary arbitrary
231 _ -> fail "Wrong opcode")
233 instance Arbitrary Jobs.OpStatus where
234 arbitrary = elements [minBound..maxBound]
236 instance Arbitrary Jobs.JobStatus where
237 arbitrary = elements [minBound..maxBound]
241 -- If the list is not just an empty element, and if the elements do
242 -- not contain commas, then join+split should be idepotent
243 prop_Utils_commaJoinSplit lst = lst /= [""] &&
244 all (not . elem ',') lst ==>
245 Utils.sepSplit ',' (Utils.commaJoin lst) == lst
246 -- Split and join should always be idempotent
247 prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
250 [ run prop_Utils_commaJoinSplit
251 , run prop_Utils_commaSplitJoin
254 -- | Make sure add is idempotent
255 prop_PeerMap_addIdempotent pmap key em =
256 fn puniq == fn (fn puniq)
257 where _types = (pmap::PeerMap.PeerMap,
258 key::PeerMap.Key, em::PeerMap.Elem)
259 fn = PeerMap.add key em
260 puniq = PeerMap.accumArray const pmap
262 -- | Make sure remove is idempotent
263 prop_PeerMap_removeIdempotent pmap key =
264 fn puniq == fn (fn puniq)
265 where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
266 fn = PeerMap.remove key
267 puniq = PeerMap.accumArray const pmap
269 -- | Make sure a missing item returns 0
270 prop_PeerMap_findMissing pmap key =
271 PeerMap.find key (PeerMap.remove key puniq) == 0
272 where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
273 puniq = PeerMap.accumArray const pmap
275 -- | Make sure an added item is found
276 prop_PeerMap_addFind pmap key em =
277 PeerMap.find key (PeerMap.add key em puniq) == em
278 where _types = (pmap::PeerMap.PeerMap,
279 key::PeerMap.Key, em::PeerMap.Elem)
280 puniq = PeerMap.accumArray const pmap
282 -- | Manual check that maxElem returns the maximum indeed, or 0 for null
283 prop_PeerMap_maxElem pmap =
284 PeerMap.maxElem puniq == if null puniq then 0
285 else (maximum . snd . unzip) puniq
286 where _types = pmap::PeerMap.PeerMap
287 puniq = PeerMap.accumArray const pmap
290 [ run prop_PeerMap_addIdempotent
291 , run prop_PeerMap_removeIdempotent
292 , run prop_PeerMap_maxElem
293 , run prop_PeerMap_addFind
294 , run prop_PeerMap_findMissing
299 prop_Container_addTwo cdata i1 i2 =
300 fn i1 i2 cont == fn i2 i1 cont &&
301 fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
302 where _types = (cdata::[Int],
304 cont = foldl (\c x -> Container.add x x c) Container.empty cdata
305 fn x1 x2 = Container.addTwo x1 x1 x2 x2
307 prop_Container_nameOf node =
308 let nl = makeSmallCluster node 1
309 fnode = head (Container.elems nl)
310 in Container.nameOf nl (Node.idx fnode) == Node.name fnode
312 -- We test that in a cluster, given a random node, we can find it by
313 -- its name and alias, as long as all names and aliases are unique,
314 -- and that we fail to find a non-existing name
315 prop_Container_findByName node othername =
316 forAll (choose (1, 20)) $ \ cnt ->
317 forAll (choose (0, cnt - 1)) $ \ fidx ->
318 forAll (vector cnt) $ \ names ->
319 (length . nub) (map fst names ++ map snd names) ==
321 not (othername `elem` (map fst names ++ map snd names)) ==>
322 let nl = makeSmallCluster node cnt
323 nodes = Container.elems nl
324 nodes' = map (\((name, alias), nn) -> (Node.idx nn,
325 nn { Node.name = name,
326 Node.alias = alias }))
328 nl' = Container.fromAssocList nodes'
329 target = snd (nodes' !! fidx)
330 in Container.findByName nl' (Node.name target) == Just target &&
331 Container.findByName nl' (Node.alias target) == Just target &&
332 Container.findByName nl' othername == Nothing
335 [ run prop_Container_addTwo
336 , run prop_Container_nameOf
337 , run prop_Container_findByName
340 -- Simple instance tests, we only have setter/getters
342 prop_Instance_creat inst =
343 Instance.name inst == Instance.alias inst
345 prop_Instance_setIdx inst idx =
346 Instance.idx (Instance.setIdx inst idx) == idx
347 where _types = (inst::Instance.Instance, idx::Types.Idx)
349 prop_Instance_setName inst name =
350 Instance.name newinst == name &&
351 Instance.alias newinst == name
352 where _types = (inst::Instance.Instance, name::String)
353 newinst = Instance.setName inst name
355 prop_Instance_setAlias inst name =
356 Instance.name newinst == Instance.name inst &&
357 Instance.alias newinst == name
358 where _types = (inst::Instance.Instance, name::String)
359 newinst = Instance.setAlias inst name
361 prop_Instance_setPri inst pdx =
362 Instance.pNode (Instance.setPri inst pdx) == pdx
363 where _types = (inst::Instance.Instance, pdx::Types.Ndx)
365 prop_Instance_setSec inst sdx =
366 Instance.sNode (Instance.setSec inst sdx) == sdx
367 where _types = (inst::Instance.Instance, sdx::Types.Ndx)
369 prop_Instance_setBoth inst pdx sdx =
370 Instance.pNode si == pdx && Instance.sNode si == sdx
371 where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
372 si = Instance.setBoth inst pdx sdx
374 prop_Instance_runStatus_True inst =
375 let run_st = Instance.running inst
376 run_tx = Instance.runSt inst
378 run_tx `elem` Instance.runningStates ==> run_st
380 prop_Instance_runStatus_False inst =
381 let run_st = Instance.running inst
382 run_tx = Instance.runSt inst
384 run_tx `notElem` Instance.runningStates ==> not run_st
386 prop_Instance_shrinkMG inst =
387 Instance.mem inst >= 2 * Types.unitMem ==>
388 case Instance.shrinkByType inst Types.FailMem of
390 Instance.mem inst' == Instance.mem inst - Types.unitMem
393 prop_Instance_shrinkMF inst =
394 Instance.mem inst < 2 * Types.unitMem ==>
395 Types.isBad $ Instance.shrinkByType inst Types.FailMem
397 prop_Instance_shrinkCG inst =
398 Instance.vcpus inst >= 2 * Types.unitCpu ==>
399 case Instance.shrinkByType inst Types.FailCPU of
401 Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
404 prop_Instance_shrinkCF inst =
405 Instance.vcpus inst < 2 * Types.unitCpu ==>
406 Types.isBad $ Instance.shrinkByType inst Types.FailCPU
408 prop_Instance_shrinkDG inst =
409 Instance.dsk inst >= 2 * Types.unitDsk ==>
410 case Instance.shrinkByType inst Types.FailDisk of
412 Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
415 prop_Instance_shrinkDF inst =
416 Instance.dsk inst < 2 * Types.unitDsk ==>
417 Types.isBad $ Instance.shrinkByType inst Types.FailDisk
419 prop_Instance_setMovable inst m =
420 Instance.movable inst' == m
421 where inst' = Instance.setMovable inst m
424 [ run prop_Instance_creat
425 , run prop_Instance_setIdx
426 , run prop_Instance_setName
427 , run prop_Instance_setAlias
428 , run prop_Instance_setPri
429 , run prop_Instance_setSec
430 , run prop_Instance_setBoth
431 , run prop_Instance_runStatus_True
432 , run prop_Instance_runStatus_False
433 , run prop_Instance_shrinkMG
434 , run prop_Instance_shrinkMF
435 , run prop_Instance_shrinkCG
436 , run prop_Instance_shrinkCF
437 , run prop_Instance_shrinkDG
438 , run prop_Instance_shrinkDF
439 , run prop_Instance_setMovable
442 -- Instance text loader tests
444 prop_Text_Load_Instance name mem dsk vcpus status pnode snode pdx sdx =
445 not (null pnode) && pdx >= 0 && sdx >= 0 ==>
446 let vcpus_s = show vcpus
454 else [(pnode, pdx), (snode, rsdx)]
455 nl = Data.Map.fromList ndx
457 inst = Text.loadInst nl
458 [name, mem_s, dsk_s, vcpus_s, status, pnode, snode, tags]::
459 Maybe (String, Instance.Instance)
460 fail1 = Text.loadInst nl
461 [name, mem_s, dsk_s, vcpus_s, status, pnode, pnode, tags]::
462 Maybe (String, Instance.Instance)
463 _types = ( name::String, mem::Int, dsk::Int
464 , vcpus::Int, status::String
465 , pnode::String, snode::String
466 , pdx::Types.Ndx, sdx::Types.Ndx)
471 (Instance.name i == name &&
472 Instance.vcpus i == vcpus &&
473 Instance.mem i == mem &&
474 Instance.pNode i == pdx &&
475 Instance.sNode i == (if null snode
476 then Node.noSecondary
480 prop_Text_Load_InstanceFail ktn fields =
481 length fields /= 8 ==> isNothing $ Text.loadInst nl fields
482 where nl = Data.Map.fromList ktn
484 prop_Text_Load_Node name tm nm fm td fd tc fo =
485 let conv v = if v < 0
497 any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
498 gid = Group.uuid defGroup
499 in case Text.loadNode defGroupAssoc
500 [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
502 Just (name', node) ->
504 then Node.offline node
505 else Node.name node == name' && name' == name &&
506 Node.alias node == name &&
507 Node.tMem node == fromIntegral tm &&
508 Node.nMem node == nm &&
509 Node.fMem node == fm &&
510 Node.tDsk node == fromIntegral td &&
511 Node.fDsk node == fd &&
512 Node.tCpu node == fromIntegral tc
514 prop_Text_Load_NodeFail fields =
515 length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
517 prop_Text_NodeLSIdempotent node =
518 (Text.loadNode defGroupAssoc.
519 Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
520 Just (Node.name n, n)
521 -- override failN1 to what loadNode returns by default
522 where n = node { Node.failN1 = True, Node.offline = False }
525 [ run prop_Text_Load_Instance
526 , run prop_Text_Load_InstanceFail
527 , run prop_Text_Load_Node
528 , run prop_Text_Load_NodeFail
529 , run prop_Text_NodeLSIdempotent
534 prop_Node_setAlias node name =
535 Node.name newnode == Node.name node &&
536 Node.alias newnode == name
537 where _types = (node::Node.Node, name::String)
538 newnode = Node.setAlias node name
540 prop_Node_setOffline node status =
541 Node.offline newnode == status
542 where newnode = Node.setOffline node status
544 prop_Node_setXmem node xm =
545 Node.xMem newnode == xm
546 where newnode = Node.setXmem node xm
548 prop_Node_setMcpu node mc =
549 Node.mCpu newnode == mc
550 where newnode = Node.setMcpu node mc
552 -- | Check that an instance add with too high memory or disk will be rejected
553 prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
554 not (Node.failN1 node)
556 case Node.addPri node inst'' of
557 Types.OpFail Types.FailMem -> True
559 where _types = (node::Node.Node, inst::Instance.Instance)
560 inst' = setInstanceSmallerThanNode node inst
561 inst'' = inst' { Instance.mem = Instance.mem inst }
563 prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
564 not (Node.failN1 node)
566 case Node.addPri node inst'' of
567 Types.OpFail Types.FailDisk -> True
569 where _types = (node::Node.Node, inst::Instance.Instance)
570 inst' = setInstanceSmallerThanNode node inst
571 inst'' = inst' { Instance.dsk = Instance.dsk inst }
573 prop_Node_addPriFC node inst = Instance.vcpus inst > Node.availCpu node &&
574 not (Node.failN1 node)
576 case Node.addPri node inst'' of
577 Types.OpFail Types.FailCPU -> True
579 where _types = (node::Node.Node, inst::Instance.Instance)
580 inst' = setInstanceSmallerThanNode node inst
581 inst'' = inst' { Instance.vcpus = Instance.vcpus inst }
583 -- | Check that an instance add with too high memory or disk will be rejected
584 prop_Node_addSec node inst pdx =
585 (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
586 Instance.dsk inst >= Node.fDsk node) &&
587 not (Node.failN1 node)
588 ==> isFailure (Node.addSec node inst pdx)
589 where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
591 newtype SmallRatio = SmallRatio Double deriving Show
592 instance Arbitrary SmallRatio where
595 return $ SmallRatio v
597 -- | Check mdsk setting
598 prop_Node_setMdsk node mx =
599 Node.loDsk node' >= 0 &&
600 fromIntegral (Node.loDsk node') <= Node.tDsk node &&
601 Node.availDisk node' >= 0 &&
602 Node.availDisk node' <= Node.fDsk node' &&
603 fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
604 Node.mDsk node' == mx'
605 where _types = (node::Node.Node, mx::SmallRatio)
606 node' = Node.setMdsk node mx'
610 prop_Node_tagMaps_idempotent tags =
611 Node.delTags (Node.addTags m tags) tags == m
612 where m = Data.Map.empty
614 prop_Node_tagMaps_reject tags =
616 any (\t -> Node.rejectAddTags m [t]) tags
617 where m = Node.addTags Data.Map.empty tags
619 prop_Node_showField node =
620 forAll (elements Node.defaultFields) $ \ field ->
621 fst (Node.showHeader field) /= Types.unknownField &&
622 Node.showField node field /= Types.unknownField
625 prop_Node_computeGroups nodes =
626 let ng = Node.computeGroups nodes
627 onlyuuid = map fst ng
628 in length nodes == sum (map (length . snd) ng) &&
629 all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
630 length (nub onlyuuid) == length onlyuuid &&
631 if null nodes then True else not (null ng)
634 [ run prop_Node_setAlias
635 , run prop_Node_setOffline
636 , run prop_Node_setMcpu
637 , run prop_Node_setXmem
638 , run prop_Node_addPriFM
639 , run prop_Node_addPriFD
640 , run prop_Node_addPriFC
641 , run prop_Node_addSec
642 , run prop_Node_setMdsk
643 , run prop_Node_tagMaps_idempotent
644 , run prop_Node_tagMaps_reject
645 , run prop_Node_showField
646 , run prop_Node_computeGroups
652 -- | Check that the cluster score is close to zero for a homogeneous cluster
653 prop_Score_Zero node count =
654 (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
655 (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
656 let fn = Node.buildPeers node Container.empty
657 nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
658 nl = Container.fromAssocList nlst
659 score = Cluster.compCV nl
660 -- we can't say == 0 here as the floating point errors accumulate;
661 -- this should be much lower than the default score in CLI.hs
664 -- | Check that cluster stats are sane
665 prop_CStats_sane node count =
666 (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
667 (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
668 let fn = Node.buildPeers node Container.empty
669 nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
670 nl = Container.fromAssocList nlst
671 cstats = Cluster.totalResources nl
672 in Cluster.csAdsk cstats >= 0 &&
673 Cluster.csAdsk cstats <= Cluster.csFdsk cstats
675 -- | Check that one instance is allocated correctly, without
677 prop_ClusterAlloc_sane node inst =
678 forAll (choose (5, 20)) $ \count ->
679 not (Node.offline node)
680 && not (Node.failN1 node)
681 && Node.availDisk node > 0
682 && Node.availMem node > 0
684 let nl = makeSmallCluster node count
687 inst' = setInstanceSmallerThanNode node inst
688 in case Cluster.tryAlloc nl il inst' rqnodes of
691 case Cluster.asSolutions as of
693 (xnl, xi, _, cv):[] ->
694 let il' = Container.add (Instance.idx xi) xi il
695 tbl = Cluster.Table xnl il' cv []
696 in not (canBalance tbl True False)
699 -- | Checks that on a 2-5 node cluster, we can allocate a random
700 -- instance spec via tiered allocation (whatever the original instance
701 -- spec), on either one or two nodes
702 prop_ClusterCanTieredAlloc node inst =
703 forAll (choose (2, 5)) $ \count ->
704 forAll (choose (1, 2)) $ \rqnodes ->
705 not (Node.offline node)
706 && not (Node.failN1 node)
709 let nl = makeSmallCluster node count
711 in case Cluster.tieredAlloc nl il inst rqnodes [] of
713 Types.Ok (_, _, il', ixes) -> not (null ixes) &&
714 IntMap.size il' == length ixes
716 -- | Checks that on a 4-8 node cluster, once we allocate an instance,
717 -- we can also evacuate it
718 prop_ClusterAllocEvac node inst =
719 forAll (choose (4, 8)) $ \count ->
720 not (Node.offline node)
721 && not (Node.failN1 node)
724 let nl = makeSmallCluster node count
727 inst' = setInstanceSmallerThanNode node inst
728 in case Cluster.tryAlloc nl il inst' rqnodes of
731 case Cluster.asSolutions as of
733 (xnl, xi, _, _):[] ->
734 let sdx = Instance.sNode xi
735 il' = Container.add (Instance.idx xi) xi il
736 in case Cluster.tryEvac xnl il' [sdx] of
741 -- | Check that allocating multiple instances on a cluster, then
742 -- adding an empty node, results in a valid rebalance
743 prop_ClusterAllocBalance node =
744 forAll (choose (3, 5)) $ \count ->
745 not (Node.offline node)
746 && not (Node.failN1 node)
748 && not (isNodeBig node 8)
750 let nl = makeSmallCluster node count
751 (hnode, nl') = IntMap.deleteFindMax nl
754 i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
755 in case Cluster.iterateAlloc nl' il i_templ rqnodes [] of
757 Types.Ok (_, xnl, il', _) ->
758 let ynl = Container.add (Node.idx hnode) hnode xnl
759 cv = Cluster.compCV ynl
760 tbl = Cluster.Table ynl il' cv []
761 in canBalance tbl True False
763 -- | Checks consistency
764 prop_ClusterCheckConsistency node inst =
765 let nl = makeSmallCluster node 3
766 [node1, node2, node3] = Container.elems nl
767 node3' = node3 { Node.group = 1 }
768 nl' = Container.add (Node.idx node3') node3' nl
769 inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
770 inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
771 inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
772 ccheck = Cluster.findSplitInstances nl' . Container.fromAssocList
773 in null (ccheck [(0, inst1)]) &&
774 null (ccheck [(0, inst2)]) &&
775 (not . null $ ccheck [(0, inst3)])
777 -- For now, we only test that we don't lose instances during the split
778 prop_ClusterSplitCluster node inst =
779 forAll (choose (0, 100)) $ \icnt ->
780 let nl = makeSmallCluster node 2
781 (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
782 (nl, Container.empty) [1..icnt]
783 gni = Cluster.splitCluster nl' il'
784 in sum (map (Container.size . snd . snd) gni) == icnt &&
785 all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
786 (Container.elems nl'')) gni
789 [ run prop_Score_Zero
790 , run prop_CStats_sane
791 , run prop_ClusterAlloc_sane
792 , run prop_ClusterCanTieredAlloc
793 , run prop_ClusterAllocEvac
794 , run prop_ClusterAllocBalance
795 , run prop_ClusterCheckConsistency
796 , run prop_ClusterSplitCluster
799 -- | Check that opcode serialization is idempotent
801 prop_OpCodes_serialization op =
802 case J.readJSON (J.showJSON op) of
804 J.Ok op' -> op == op'
805 where _types = op::OpCodes.OpCode
808 [ run prop_OpCodes_serialization
811 -- | Check that (queued) job\/opcode status serialization is idempotent
812 prop_OpStatus_serialization os =
813 case J.readJSON (J.showJSON os) of
815 J.Ok os' -> os == os'
816 where _types = os::Jobs.OpStatus
818 prop_JobStatus_serialization js =
819 case J.readJSON (J.showJSON js) of
821 J.Ok js' -> js == js'
822 where _types = js::Jobs.JobStatus
825 [ run prop_OpStatus_serialization
826 , run prop_JobStatus_serialization
831 prop_Loader_lookupNode ktn inst node =
832 Loader.lookupNode nl inst node == Data.Map.lookup node nl
833 where nl = Data.Map.fromList ktn
835 prop_Loader_lookupInstance kti inst =
836 Loader.lookupInstance il inst == Data.Map.lookup inst il
837 where il = Data.Map.fromList kti
839 prop_Loader_assignIndices nodes =
840 Data.Map.size nassoc == length nodes &&
841 Container.size kt == length nodes &&
843 then maximum (IntMap.keys kt) == length nodes - 1
845 where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
848 -- | Checks that the number of primary instances recorded on the nodes
850 prop_Loader_mergeData ns =
851 let na = Container.fromAssocList $ map (\n -> (Node.idx n, n)) ns
852 in case Loader.mergeData [] [] []
853 (Loader.emptyCluster {Loader.cdNodes = na}) of
855 Types.Ok (Loader.ClusterData _ nl il _) ->
856 let nodes = Container.elems nl
857 instances = Container.elems il
858 in (sum . map (length . Node.pList)) nodes == 0 &&
862 [ run prop_Loader_lookupNode
863 , run prop_Loader_lookupInstance
864 , run prop_Loader_assignIndices
865 , run prop_Loader_mergeData