36 |
36 |
import Test.QuickCheck.Batch
|
37 |
37 |
import Data.Maybe
|
38 |
38 |
import qualified Data.Map
|
|
39 |
import qualified Data.IntMap as IntMap
|
39 |
40 |
import qualified Ganeti.HTools.CLI as CLI
|
40 |
41 |
import qualified Ganeti.HTools.Cluster as Cluster
|
41 |
42 |
import qualified Ganeti.HTools.Container as Container
|
... | ... | |
48 |
49 |
import qualified Ganeti.HTools.Types as Types
|
49 |
50 |
import qualified Ganeti.HTools.Utils as Utils
|
50 |
51 |
|
|
52 |
-- * Constants
|
|
53 |
|
51 |
54 |
-- | Maximum memory (1TiB, somewhat random value)
|
52 |
55 |
maxMem :: Int
|
53 |
56 |
maxMem = 1024 * 1024
|
... | ... | |
60 |
63 |
maxCpu :: Int
|
61 |
64 |
maxCpu = 1024
|
62 |
65 |
|
|
66 |
-- * Helper functions
|
|
67 |
|
63 |
68 |
-- | Simple checker for whether OpResult is fail or pass
|
64 |
69 |
isFailure :: Types.OpResult a -> Bool
|
65 |
70 |
isFailure (Types.OpFail _) = True
|
... | ... | |
70 |
75 |
isOk (Types.Ok _ ) = True
|
71 |
76 |
isOk _ = False
|
72 |
77 |
|
|
78 |
-- | Update an instance to be smaller than a node
|
|
79 |
setInstanceSmallerThanNode node inst =
|
|
80 |
inst { Instance.mem = (Node.availMem node) `div` 2
|
|
81 |
, Instance.dsk = (Node.availDisk node) `div` 2
|
|
82 |
, Instance.vcpus = (Node.availCpu node) `div` 2
|
|
83 |
}
|
|
84 |
|
|
85 |
-- | Create an instance given its spec
|
|
86 |
createInstance mem dsk vcpus =
|
|
87 |
Instance.create "inst-unnamed" mem dsk vcpus "running" [] (-1) (-1)
|
|
88 |
|
|
89 |
-- | Create a small cluster by repeating a node spec
|
|
90 |
makeSmallCluster :: Node.Node -> Int -> Node.List
|
|
91 |
makeSmallCluster node count =
|
|
92 |
let fn = Node.buildPeers node Container.empty
|
|
93 |
namelst = map (\n -> (Node.name n, n)) (replicate count fn)
|
|
94 |
(_, nlst) = Loader.assignIndices namelst
|
|
95 |
in Container.fromAssocList nlst
|
|
96 |
|
|
97 |
-- | Checks if a node is "big" enough
|
|
98 |
isNodeBig :: Node.Node -> Int -> Bool
|
|
99 |
isNodeBig node size = Node.availDisk node > size * Types.unitDsk
|
|
100 |
&& Node.availMem node > size * Types.unitMem
|
|
101 |
&& Node.availCpu node > size * Types.unitCpu
|
|
102 |
|
|
103 |
canBalance :: Cluster.Table -> Bool -> Bool -> Bool
|
|
104 |
canBalance tbl dm evac = isJust $ Cluster.tryBalance tbl dm evac
|
|
105 |
|
|
106 |
-- * Arbitrary instances
|
|
107 |
|
73 |
108 |
-- copied from the introduction to quickcheck
|
74 |
109 |
instance Arbitrary Char where
|
75 |
110 |
arbitrary = choose ('\32', '\128')
|
... | ... | |
105 |
140 |
n' = Node.buildPeers n Container.empty
|
106 |
141 |
return n'
|
107 |
142 |
|
108 |
|
setInstanceSmallerThanNode node inst =
|
109 |
|
inst { Instance.mem = (Node.availMem node) `div` 2
|
110 |
|
, Instance.dsk = (Node.availDisk node) `div` 2
|
111 |
|
, Instance.vcpus = (Node.availCpu node) `div` 2
|
112 |
|
}
|
|
143 |
-- * Actual tests
|
113 |
144 |
|
114 |
145 |
-- | Make sure add is idempotent
|
115 |
146 |
prop_PeerMap_addIdempotent pmap key em =
|
... | ... | |
394 |
425 |
-- | Check that cluster stats are sane
|
395 |
426 |
prop_CStats_sane node count =
|
396 |
427 |
(not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
|
397 |
|
(Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
|
|
428 |
(Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
|
398 |
429 |
let fn = Node.buildPeers node Container.empty
|
399 |
430 |
nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
|
400 |
431 |
nl = Container.fromAssocList nlst
|
... | ... | |
402 |
433 |
in Cluster.csAdsk cstats >= 0 &&
|
403 |
434 |
Cluster.csAdsk cstats <= Cluster.csFdsk cstats
|
404 |
435 |
|
|
436 |
-- | Check that one instance is allocated correctly, without
|
|
437 |
-- rebalances needed
|
|
438 |
prop_ClusterAlloc_sane node inst =
|
|
439 |
forAll (choose (5, 20)) $ \count ->
|
|
440 |
not (Node.offline node)
|
|
441 |
&& not (Node.failN1 node)
|
|
442 |
&& Node.availDisk node > 0
|
|
443 |
&& Node.availMem node > 0
|
|
444 |
==>
|
|
445 |
let nl = makeSmallCluster node count
|
|
446 |
il = Container.empty
|
|
447 |
rqnodes = 2
|
|
448 |
inst' = setInstanceSmallerThanNode node inst
|
|
449 |
in case Cluster.tryAlloc nl il inst' rqnodes of
|
|
450 |
Types.Bad _ -> False
|
|
451 |
Types.Ok (errs, _, sols3) ->
|
|
452 |
case sols3 of
|
|
453 |
[] -> False
|
|
454 |
(_, (xnl, xi, _)):[] ->
|
|
455 |
let cv = Cluster.compCV xnl
|
|
456 |
il' = Container.add (Instance.idx xi) xi il
|
|
457 |
tbl = Cluster.Table xnl il' cv []
|
|
458 |
in not (canBalance tbl True False)
|
|
459 |
_ -> False
|
|
460 |
|
|
461 |
-- | Checks that on a 2-5 node cluster, we can allocate a random
|
|
462 |
-- instance spec via tiered allocation (whatever the original instance
|
|
463 |
-- spec), on either one or two nodes
|
|
464 |
prop_ClusterCanTieredAlloc node inst =
|
|
465 |
forAll (choose (2, 5)) $ \count ->
|
|
466 |
forAll (choose (1, 2)) $ \rqnodes ->
|
|
467 |
not (Node.offline node)
|
|
468 |
&& not (Node.failN1 node)
|
|
469 |
&& isNodeBig node 4
|
|
470 |
==>
|
|
471 |
let nl = makeSmallCluster node count
|
|
472 |
il = Container.empty
|
|
473 |
in case Cluster.tieredAlloc nl il inst rqnodes [] of
|
|
474 |
Types.Bad _ -> False
|
|
475 |
Types.Ok (_, _, ixes) -> not (null ixes)
|
|
476 |
|
|
477 |
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
|
|
478 |
-- we can also evacuate it
|
|
479 |
prop_ClusterAllocEvac node inst =
|
|
480 |
forAll (choose (4, 8)) $ \count ->
|
|
481 |
not (Node.offline node)
|
|
482 |
&& not (Node.failN1 node)
|
|
483 |
&& isNodeBig node 4
|
|
484 |
==>
|
|
485 |
let nl = makeSmallCluster node count
|
|
486 |
il = Container.empty
|
|
487 |
rqnodes = 2
|
|
488 |
inst' = setInstanceSmallerThanNode node inst
|
|
489 |
in case Cluster.tryAlloc nl il inst' rqnodes of
|
|
490 |
Types.Bad _ -> False
|
|
491 |
Types.Ok (errs, _, sols3) ->
|
|
492 |
case sols3 of
|
|
493 |
[] -> False
|
|
494 |
(_, (xnl, xi, _)):[] ->
|
|
495 |
let sdx = Instance.sNode xi
|
|
496 |
il' = Container.add (Instance.idx xi) xi il
|
|
497 |
in case Cluster.tryEvac xnl il' [sdx] of
|
|
498 |
Just _ -> True
|
|
499 |
_ -> False
|
|
500 |
_ -> False
|
|
501 |
|
|
502 |
-- | Check that allocating multiple instances on a cluster, then
|
|
503 |
-- adding an empty node, results in a valid rebalance
|
|
504 |
prop_ClusterAllocBalance node =
|
|
505 |
forAll (choose (3, 5)) $ \count ->
|
|
506 |
not (Node.offline node)
|
|
507 |
&& not (Node.failN1 node)
|
|
508 |
&& isNodeBig node 4
|
|
509 |
&& not (isNodeBig node 8)
|
|
510 |
==>
|
|
511 |
let nl = makeSmallCluster node count
|
|
512 |
(hnode, nl') = IntMap.deleteFindMax nl
|
|
513 |
il = Container.empty
|
|
514 |
rqnodes = 2
|
|
515 |
i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
|
|
516 |
in case Cluster.iterateAlloc nl' il i_templ rqnodes [] of
|
|
517 |
Types.Bad _ -> False
|
|
518 |
Types.Ok (_, xnl, insts) ->
|
|
519 |
let ynl = Container.add (Node.idx hnode) hnode xnl
|
|
520 |
cv = Cluster.compCV ynl
|
|
521 |
il' = foldl (\l i ->
|
|
522 |
Container.add (Instance.idx i) i l)
|
|
523 |
il insts
|
|
524 |
tbl = Cluster.Table ynl il' cv []
|
|
525 |
in canBalance tbl True False
|
|
526 |
|
405 |
527 |
testCluster =
|
406 |
528 |
[ run prop_Score_Zero
|
407 |
529 |
, run prop_CStats_sane
|
|
530 |
, run prop_ClusterAlloc_sane
|
|
531 |
, run prop_ClusterCanTieredAlloc
|
|
532 |
, run prop_ClusterAllocEvac
|
|
533 |
, run prop_ClusterAllocBalance
|
408 |
534 |
]
|