1 {-# LANGUAGE TemplateHaskell #-}
3 {-| Unittests for ganeti-htools.
9 Copyright (C) 2009, 2010, 2011, 2012 Google Inc.
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2 of the License, or
14 (at your option) any later version.
16 This program is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
28 module Ganeti.HTools.QC
44 import Test.QuickCheck
45 import Text.Printf (printf)
46 import Data.List (findIndex, intercalate, nub, isPrefixOf)
47 import qualified Data.Set as Set
50 import qualified System.Console.GetOpt as GetOpt
51 import qualified Text.JSON as J
52 import qualified Data.Map
53 import qualified Data.IntMap as IntMap
55 import qualified Ganeti.OpCodes as OpCodes
56 import qualified Ganeti.Jobs as Jobs
57 import qualified Ganeti.Luxi
58 import qualified Ganeti.HTools.CLI as CLI
59 import qualified Ganeti.HTools.Cluster as Cluster
60 import qualified Ganeti.HTools.Container as Container
61 import qualified Ganeti.HTools.ExtLoader
62 import qualified Ganeti.HTools.IAlloc as IAlloc
63 import qualified Ganeti.HTools.Instance as Instance
64 import qualified Ganeti.HTools.JSON as JSON
65 import qualified Ganeti.HTools.Loader as Loader
66 import qualified Ganeti.HTools.Luxi
67 import qualified Ganeti.HTools.Node as Node
68 import qualified Ganeti.HTools.Group as Group
69 import qualified Ganeti.HTools.PeerMap as PeerMap
70 import qualified Ganeti.HTools.Rapi
71 import qualified Ganeti.HTools.Simu as Simu
72 import qualified Ganeti.HTools.Text as Text
73 import qualified Ganeti.HTools.Types as Types
74 import qualified Ganeti.HTools.Utils as Utils
75 import qualified Ganeti.HTools.Version
76 import qualified Ganeti.Constants as C
78 import qualified Ganeti.HTools.Program as Program
79 import qualified Ganeti.HTools.Program.Hail
80 import qualified Ganeti.HTools.Program.Hbal
81 import qualified Ganeti.HTools.Program.Hscan
82 import qualified Ganeti.HTools.Program.Hspace
84 import Ganeti.HTools.QCHelper (testSuite)
88 -- | Maximum memory (1TiB, somewhat random value).
92 -- | Maximum disk (8TiB, somewhat random value).
94 maxDsk = 1024 * 1024 * 8
96 -- | Max CPUs (1024, somewhat random value).
100 -- | Max vcpu ratio (random value).
101 maxVcpuRatio :: Double
102 maxVcpuRatio = 1024.0
104 -- | Max spindle ratio (random value).
105 maxSpindleRatio :: Double
106 maxSpindleRatio = 1024.0
108 -- | All disk templates (used later)
109 allDiskTemplates :: [Types.DiskTemplate]
110 allDiskTemplates = [minBound..maxBound]
112 -- | Null iPolicy, and by null we mean very liberal.
113 nullIPolicy = Types.IPolicy
114 { Types.iPolicyMinSpec = Types.ISpec { Types.iSpecMemorySize = 0
115 , Types.iSpecCpuCount = 0
116 , Types.iSpecDiskSize = 0
117 , Types.iSpecDiskCount = 0
118 , Types.iSpecNicCount = 0
120 , Types.iPolicyMaxSpec = Types.ISpec { Types.iSpecMemorySize = maxBound
121 , Types.iSpecCpuCount = maxBound
122 , Types.iSpecDiskSize = maxBound
123 , Types.iSpecDiskCount = C.maxDisks
124 , Types.iSpecNicCount = C.maxNics
126 , Types.iPolicyStdSpec = Types.ISpec { Types.iSpecMemorySize = Types.unitMem
127 , Types.iSpecCpuCount = Types.unitCpu
128 , Types.iSpecDiskSize = Types.unitDsk
129 , Types.iSpecDiskCount = 1
130 , Types.iSpecNicCount = 1
132 , Types.iPolicyDiskTemplates = [Types.DTDrbd8, Types.DTPlain]
133 , Types.iPolicyVcpuRatio = maxVcpuRatio -- somewhat random value, high
134 -- enough to not impact us
135 , Types.iPolicySpindleRatio = maxSpindleRatio
139 defGroup :: Group.Group
140 defGroup = flip Group.setIdx 0 $
141 Group.create "default" Types.defaultGroupID Types.AllocPreferred
144 defGroupList :: Group.List
145 defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
147 defGroupAssoc :: Data.Map.Map String Types.Gdx
148 defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
150 -- * Helper functions
152 -- | Simple checker for whether OpResult is fail or pass.
153 isFailure :: Types.OpResult a -> Bool
154 isFailure (Types.OpFail _) = True
157 -- | Checks for equality with proper annotation.
158 (==?) :: (Show a, Eq a) => a -> a -> Property
159 (==?) x y = printTestCase
160 ("Expected equality, but '" ++
161 show x ++ "' /= '" ++ show y ++ "'") (x == y)
164 -- | Show a message and fail the test.
165 failTest :: String -> Property
166 failTest msg = printTestCase msg False
168 -- | Update an instance to be smaller than a node.
169 setInstanceSmallerThanNode node inst =
170 inst { Instance.mem = Node.availMem node `div` 2
171 , Instance.dsk = Node.availDisk node `div` 2
172 , Instance.vcpus = Node.availCpu node `div` 2
175 -- | Create an instance given its spec.
176 createInstance mem dsk vcpus =
177 Instance.create "inst-unnamed" mem dsk vcpus Types.Running [] True (-1) (-1)
180 -- | Create a small cluster by repeating a node spec.
181 makeSmallCluster :: Node.Node -> Int -> Node.List
182 makeSmallCluster node count =
183 let origname = Node.name node
184 origalias = Node.alias node
185 nodes = map (\idx -> node { Node.name = origname ++ "-" ++ show idx
186 , Node.alias = origalias ++ "-" ++ show idx })
188 fn = flip Node.buildPeers Container.empty
189 namelst = map (\n -> (Node.name n, fn n)) nodes
190 (_, nlst) = Loader.assignIndices namelst
193 -- | Make a small cluster, both nodes and instances.
194 makeSmallEmptyCluster :: Node.Node -> Int -> Instance.Instance
195 -> (Node.List, Instance.List, Instance.Instance)
196 makeSmallEmptyCluster node count inst =
197 (makeSmallCluster node count, Container.empty,
198 setInstanceSmallerThanNode node inst)
200 -- | Checks if a node is "big" enough.
201 isNodeBig :: Int -> Node.Node -> Bool
202 isNodeBig size node = Node.availDisk node > size * Types.unitDsk
203 && Node.availMem node > size * Types.unitMem
204 && Node.availCpu node > size * Types.unitCpu
206 canBalance :: Cluster.Table -> Bool -> Bool -> Bool -> Bool
207 canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac 0 0
209 -- | Assigns a new fresh instance to a cluster; this is not
210 -- allocation, so no resource checks are done.
211 assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
212 Types.Idx -> Types.Idx ->
213 (Node.List, Instance.List)
214 assignInstance nl il inst pdx sdx =
215 let pnode = Container.find pdx nl
216 snode = Container.find sdx nl
217 maxiidx = if Container.null il
219 else fst (Container.findMax il) + 1
220 inst' = inst { Instance.idx = maxiidx,
221 Instance.pNode = pdx, Instance.sNode = sdx }
222 pnode' = Node.setPri pnode inst'
223 snode' = Node.setSec snode inst'
224 nl' = Container.addTwo pdx pnode' sdx snode' nl
225 il' = Container.add maxiidx inst' il
228 -- | Generates a list of a given size with non-duplicate elements.
229 genUniquesList :: (Eq a, Arbitrary a) => Int -> Gen [a]
232 newelem <- arbitrary `suchThat` (`notElem` lst)
233 return (newelem:lst)) [] [1..cnt]
235 -- | Checks if an instance is mirrored.
236 isMirrored :: Instance.Instance -> Bool
238 (/= Types.MirrorNone) . Types.templateMirrorType . Instance.diskTemplate
240 -- | Returns the possible change node types for a disk template.
241 evacModeOptions :: Types.MirrorType -> [Types.EvacMode]
242 evacModeOptions Types.MirrorNone = []
243 evacModeOptions Types.MirrorInternal = [minBound..maxBound] -- DRBD can do all
244 evacModeOptions Types.MirrorExternal = [Types.ChangePrimary, Types.ChangeAll]
246 -- * Arbitrary instances
248 -- | Defines a DNS name.
249 newtype DNSChar = DNSChar { dnsGetChar::Char }
251 instance Arbitrary DNSChar where
253 x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
256 -- | Generates a single name component.
257 getName :: Gen String
260 dn <- vector n::Gen [DNSChar]
261 return (map dnsGetChar dn)
263 -- | Generates an entire FQDN.
264 getFQDN :: Gen String
266 ncomps <- choose (1, 4)
267 names <- mapM (const getName) [1..ncomps::Int]
268 return $ intercalate "." names
270 -- | Defines a tag type.
271 newtype TagChar = TagChar { tagGetChar :: Char }
273 -- | All valid tag chars. This doesn't need to match _exactly_
274 -- Ganeti's own tag regex, just enough for it to be close.
276 tagChar = ['a'..'z'] ++ ['A'..'Z'] ++ ['0'..'9'] ++ ".+*/:@-"
278 instance Arbitrary TagChar where
280 c <- elements tagChar
284 genTag :: Gen [TagChar]
286 -- the correct value would be C.maxTagLen, but that's way too
287 -- verbose in unittests, and at the moment I don't see any possible
288 -- bugs with longer tags and the way we use tags in htools
292 -- | Generates a list of tags (correctly upper bounded).
293 genTags :: Gen [String]
295 -- the correct value would be C.maxTagsPerObj, but per the comment
296 -- in genTag, we don't use tags enough in htools to warrant testing
298 n <- choose (0, 10::Int)
299 tags <- mapM (const genTag) [1..n]
300 return $ map (map tagGetChar) tags
302 instance Arbitrary Types.InstanceStatus where
303 arbitrary = elements [minBound..maxBound]
305 -- | Generates a random instance with maximum disk/mem/cpu values.
306 genInstanceSmallerThan :: Int -> Int -> Int -> Gen Instance.Instance
307 genInstanceSmallerThan lim_mem lim_dsk lim_cpu = do
309 mem <- choose (0, lim_mem)
310 dsk <- choose (0, lim_dsk)
314 vcpus <- choose (0, lim_cpu)
315 return $ Instance.create name mem dsk vcpus run_st [] True pn sn
318 -- | Generates an instance smaller than a node.
319 genInstanceSmallerThanNode :: Node.Node -> Gen Instance.Instance
320 genInstanceSmallerThanNode node =
321 genInstanceSmallerThan (Node.availMem node `div` 2)
322 (Node.availDisk node `div` 2)
323 (Node.availCpu node `div` 2)
325 -- let's generate a random instance
326 instance Arbitrary Instance.Instance where
327 arbitrary = genInstanceSmallerThan maxMem maxDsk maxCpu
329 -- | Generas an arbitrary node based on sizing information.
330 genNode :: Maybe Int -- ^ Minimum node size in terms of units
331 -> Maybe Int -- ^ Maximum node size (when Nothing, bounded
332 -- just by the max... constants)
334 genNode min_multiplier max_multiplier = do
335 let (base_mem, base_dsk, base_cpu) =
336 case min_multiplier of
337 Just mm -> (mm * Types.unitMem,
341 (top_mem, top_dsk, top_cpu) =
342 case max_multiplier of
343 Just mm -> (mm * Types.unitMem,
346 Nothing -> (maxMem, maxDsk, maxCpu)
348 mem_t <- choose (base_mem, top_mem)
349 mem_f <- choose (base_mem, mem_t)
350 mem_n <- choose (0, mem_t - mem_f)
351 dsk_t <- choose (base_dsk, top_dsk)
352 dsk_f <- choose (base_dsk, dsk_t)
353 cpu_t <- choose (base_cpu, top_cpu)
355 let n = Node.create name (fromIntegral mem_t) mem_n mem_f
356 (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl 1 0
357 n' = Node.setPolicy nullIPolicy n
358 return $ Node.buildPeers n' Container.empty
360 -- | Helper function to generate a sane node.
361 genOnlineNode :: Gen Node.Node
363 arbitrary `suchThat` (\n -> not (Node.offline n) &&
364 not (Node.failN1 n) &&
365 Node.availDisk n > 0 &&
366 Node.availMem n > 0 &&
370 instance Arbitrary Node.Node where
371 arbitrary = genNode Nothing Nothing
374 instance Arbitrary OpCodes.ReplaceDisksMode where
375 arbitrary = elements [minBound..maxBound]
377 instance Arbitrary OpCodes.OpCode where
379 op_id <- elements [ "OP_TEST_DELAY"
380 , "OP_INSTANCE_REPLACE_DISKS"
381 , "OP_INSTANCE_FAILOVER"
382 , "OP_INSTANCE_MIGRATE"
386 liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
387 "OP_INSTANCE_REPLACE_DISKS" ->
388 liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
389 arbitrary arbitrary arbitrary
390 "OP_INSTANCE_FAILOVER" ->
391 liftM3 OpCodes.OpInstanceFailover arbitrary arbitrary
393 "OP_INSTANCE_MIGRATE" ->
394 liftM5 OpCodes.OpInstanceMigrate arbitrary arbitrary
395 arbitrary arbitrary arbitrary
396 _ -> fail "Wrong opcode"
398 instance Arbitrary Jobs.OpStatus where
399 arbitrary = elements [minBound..maxBound]
401 instance Arbitrary Jobs.JobStatus where
402 arbitrary = elements [minBound..maxBound]
404 newtype SmallRatio = SmallRatio Double deriving Show
405 instance Arbitrary SmallRatio where
408 return $ SmallRatio v
410 instance Arbitrary Types.AllocPolicy where
411 arbitrary = elements [minBound..maxBound]
413 instance Arbitrary Types.DiskTemplate where
414 arbitrary = elements [minBound..maxBound]
416 instance Arbitrary Types.FailMode where
417 arbitrary = elements [minBound..maxBound]
419 instance Arbitrary Types.EvacMode where
420 arbitrary = elements [minBound..maxBound]
422 instance Arbitrary a => Arbitrary (Types.OpResult a) where
423 arbitrary = arbitrary >>= \c ->
425 then liftM Types.OpGood arbitrary
426 else liftM Types.OpFail arbitrary
428 instance Arbitrary Types.ISpec where
430 mem_s <- arbitrary::Gen (NonNegative Int)
431 dsk_c <- arbitrary::Gen (NonNegative Int)
432 dsk_s <- arbitrary::Gen (NonNegative Int)
433 cpu_c <- arbitrary::Gen (NonNegative Int)
434 nic_c <- arbitrary::Gen (NonNegative Int)
435 return Types.ISpec { Types.iSpecMemorySize = fromIntegral mem_s
436 , Types.iSpecCpuCount = fromIntegral cpu_c
437 , Types.iSpecDiskSize = fromIntegral dsk_s
438 , Types.iSpecDiskCount = fromIntegral dsk_c
439 , Types.iSpecNicCount = fromIntegral nic_c
442 -- | Generates an ispec bigger than the given one.
443 genBiggerISpec :: Types.ISpec -> Gen Types.ISpec
444 genBiggerISpec imin = do
445 mem_s <- choose (Types.iSpecMemorySize imin, maxBound)
446 dsk_c <- choose (Types.iSpecDiskCount imin, maxBound)
447 dsk_s <- choose (Types.iSpecDiskSize imin, maxBound)
448 cpu_c <- choose (Types.iSpecCpuCount imin, maxBound)
449 nic_c <- choose (Types.iSpecNicCount imin, maxBound)
450 return Types.ISpec { Types.iSpecMemorySize = fromIntegral mem_s
451 , Types.iSpecCpuCount = fromIntegral cpu_c
452 , Types.iSpecDiskSize = fromIntegral dsk_s
453 , Types.iSpecDiskCount = fromIntegral dsk_c
454 , Types.iSpecNicCount = fromIntegral nic_c
457 instance Arbitrary Types.IPolicy where
460 istd <- genBiggerISpec imin
461 imax <- genBiggerISpec istd
462 num_tmpl <- choose (0, length allDiskTemplates)
463 dts <- genUniquesList num_tmpl
464 vcpu_ratio <- choose (1.0, maxVcpuRatio)
465 spindle_ratio <- choose (1.0, maxSpindleRatio)
466 return Types.IPolicy { Types.iPolicyMinSpec = imin
467 , Types.iPolicyStdSpec = istd
468 , Types.iPolicyMaxSpec = imax
469 , Types.iPolicyDiskTemplates = dts
470 , Types.iPolicyVcpuRatio = vcpu_ratio
471 , Types.iPolicySpindleRatio = spindle_ratio
478 -- | Helper to generate a small string that doesn't contain commas.
479 genNonCommaString = do
480 size <- choose (0, 20) -- arbitrary max size
481 vectorOf size (arbitrary `suchThat` ((/=) ','))
483 -- | If the list is not just an empty element, and if the elements do
484 -- not contain commas, then join+split should be idempotent.
485 prop_Utils_commaJoinSplit =
486 forAll (choose (0, 20)) $ \llen ->
487 forAll (vectorOf llen genNonCommaString `suchThat` ((/=) [""])) $ \lst ->
488 Utils.sepSplit ',' (Utils.commaJoin lst) ==? lst
490 -- | Split and join should always be idempotent.
491 prop_Utils_commaSplitJoin s =
492 Utils.commaJoin (Utils.sepSplit ',' s) ==? s
494 -- | fromObjWithDefault, we test using the Maybe monad and an integer
496 prop_Utils_fromObjWithDefault def_value random_key =
497 -- a missing key will be returned with the default
498 JSON.fromObjWithDefault [] random_key def_value == Just def_value &&
499 -- a found key will be returned as is, not with default
500 JSON.fromObjWithDefault [(random_key, J.showJSON def_value)]
501 random_key (def_value+1) == Just def_value
502 where _types = def_value :: Integer
504 -- | Test that functional if' behaves like the syntactic sugar if.
505 prop_Utils_if'if :: Bool -> Int -> Int -> Gen Prop
506 prop_Utils_if'if cnd a b =
507 Utils.if' cnd a b ==? if cnd then a else b
509 -- | Test basic select functionality
510 prop_Utils_select :: Int -- ^ Default result
511 -> [Int] -- ^ List of False values
512 -> [Int] -- ^ List of True values
513 -> Gen Prop -- ^ Test result
514 prop_Utils_select def lst1 lst2 =
515 Utils.select def (flist ++ tlist) ==? expectedresult
516 where expectedresult = Utils.if' (null lst2) def (head lst2)
517 flist = zip (repeat False) lst1
518 tlist = zip (repeat True) lst2
520 -- | Test basic select functionality with undefined default
521 prop_Utils_select_undefd :: [Int] -- ^ List of False values
522 -> NonEmptyList Int -- ^ List of True values
523 -> Gen Prop -- ^ Test result
524 prop_Utils_select_undefd lst1 (NonEmpty lst2) =
525 Utils.select undefined (flist ++ tlist) ==? head lst2
526 where flist = zip (repeat False) lst1
527 tlist = zip (repeat True) lst2
529 -- | Test basic select functionality with undefined list values
530 prop_Utils_select_undefv :: [Int] -- ^ List of False values
531 -> NonEmptyList Int -- ^ List of True values
532 -> Gen Prop -- ^ Test result
533 prop_Utils_select_undefv lst1 (NonEmpty lst2) =
534 Utils.select undefined cndlist ==? head lst2
535 where flist = zip (repeat False) lst1
536 tlist = zip (repeat True) lst2
537 cndlist = flist ++ tlist ++ [undefined]
539 prop_Utils_parseUnit (NonNegative n) =
540 Utils.parseUnit (show n) == Types.Ok n &&
541 Utils.parseUnit (show n ++ "m") == Types.Ok n &&
542 (case Utils.parseUnit (show n ++ "M") of
543 Types.Ok m -> if n > 0
544 then m < n -- for positive values, X MB is < than X MiB
545 else m == 0 -- but for 0, 0 MB == 0 MiB
546 Types.Bad _ -> False) &&
547 Utils.parseUnit (show n ++ "g") == Types.Ok (n*1024) &&
548 Utils.parseUnit (show n ++ "t") == Types.Ok (n*1048576) &&
549 Types.isBad (Utils.parseUnit (show n ++ "x")::Types.Result Int)
550 where _types = n::Int
552 -- | Test list for the Utils module.
554 [ 'prop_Utils_commaJoinSplit
555 , 'prop_Utils_commaSplitJoin
556 , 'prop_Utils_fromObjWithDefault
559 , 'prop_Utils_select_undefd
560 , 'prop_Utils_select_undefv
561 , 'prop_Utils_parseUnit
566 -- | Make sure add is idempotent.
567 prop_PeerMap_addIdempotent pmap key em =
568 fn puniq ==? fn (fn puniq)
569 where _types = (pmap::PeerMap.PeerMap,
570 key::PeerMap.Key, em::PeerMap.Elem)
571 fn = PeerMap.add key em
572 puniq = PeerMap.accumArray const pmap
574 -- | Make sure remove is idempotent.
575 prop_PeerMap_removeIdempotent pmap key =
576 fn puniq ==? fn (fn puniq)
577 where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
578 fn = PeerMap.remove key
579 puniq = PeerMap.accumArray const pmap
581 -- | Make sure a missing item returns 0.
582 prop_PeerMap_findMissing pmap key =
583 PeerMap.find key (PeerMap.remove key puniq) ==? 0
584 where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
585 puniq = PeerMap.accumArray const pmap
587 -- | Make sure an added item is found.
588 prop_PeerMap_addFind pmap key em =
589 PeerMap.find key (PeerMap.add key em puniq) ==? em
590 where _types = (pmap::PeerMap.PeerMap,
591 key::PeerMap.Key, em::PeerMap.Elem)
592 puniq = PeerMap.accumArray const pmap
594 -- | Manual check that maxElem returns the maximum indeed, or 0 for null.
595 prop_PeerMap_maxElem pmap =
596 PeerMap.maxElem puniq ==? if null puniq then 0
597 else (maximum . snd . unzip) puniq
598 where _types = pmap::PeerMap.PeerMap
599 puniq = PeerMap.accumArray const pmap
601 -- | List of tests for the PeerMap module.
603 [ 'prop_PeerMap_addIdempotent
604 , 'prop_PeerMap_removeIdempotent
605 , 'prop_PeerMap_maxElem
606 , 'prop_PeerMap_addFind
607 , 'prop_PeerMap_findMissing
610 -- ** Container tests
612 -- we silence the following due to hlint bug fixed in later versions
613 {-# ANN prop_Container_addTwo "HLint: ignore Avoid lambda" #-}
614 prop_Container_addTwo cdata i1 i2 =
615 fn i1 i2 cont == fn i2 i1 cont &&
616 fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
617 where _types = (cdata::[Int],
619 cont = foldl (\c x -> Container.add x x c) Container.empty cdata
620 fn x1 x2 = Container.addTwo x1 x1 x2 x2
622 prop_Container_nameOf node =
623 let nl = makeSmallCluster node 1
624 fnode = head (Container.elems nl)
625 in Container.nameOf nl (Node.idx fnode) ==? Node.name fnode
627 -- | We test that in a cluster, given a random node, we can find it by
628 -- its name and alias, as long as all names and aliases are unique,
629 -- and that we fail to find a non-existing name.
630 prop_Container_findByName node =
631 forAll (choose (1, 20)) $ \ cnt ->
632 forAll (choose (0, cnt - 1)) $ \ fidx ->
633 forAll (genUniquesList (cnt * 2)) $ \ allnames ->
634 forAll (arbitrary `suchThat` (`notElem` allnames)) $ \ othername ->
635 let names = zip (take cnt allnames) (drop cnt allnames)
636 nl = makeSmallCluster node cnt
637 nodes = Container.elems nl
638 nodes' = map (\((name, alias), nn) -> (Node.idx nn,
639 nn { Node.name = name,
640 Node.alias = alias }))
642 nl' = Container.fromList nodes'
643 target = snd (nodes' !! fidx)
644 in Container.findByName nl' (Node.name target) == Just target &&
645 Container.findByName nl' (Node.alias target) == Just target &&
646 isNothing (Container.findByName nl' othername)
648 testSuite "Container"
649 [ 'prop_Container_addTwo
650 , 'prop_Container_nameOf
651 , 'prop_Container_findByName
656 -- Simple instance tests, we only have setter/getters
658 prop_Instance_creat inst =
659 Instance.name inst ==? Instance.alias inst
661 prop_Instance_setIdx inst idx =
662 Instance.idx (Instance.setIdx inst idx) ==? idx
663 where _types = (inst::Instance.Instance, idx::Types.Idx)
665 prop_Instance_setName inst name =
666 Instance.name newinst == name &&
667 Instance.alias newinst == name
668 where _types = (inst::Instance.Instance, name::String)
669 newinst = Instance.setName inst name
671 prop_Instance_setAlias inst name =
672 Instance.name newinst == Instance.name inst &&
673 Instance.alias newinst == name
674 where _types = (inst::Instance.Instance, name::String)
675 newinst = Instance.setAlias inst name
677 prop_Instance_setPri inst pdx =
678 Instance.pNode (Instance.setPri inst pdx) ==? pdx
679 where _types = (inst::Instance.Instance, pdx::Types.Ndx)
681 prop_Instance_setSec inst sdx =
682 Instance.sNode (Instance.setSec inst sdx) ==? sdx
683 where _types = (inst::Instance.Instance, sdx::Types.Ndx)
685 prop_Instance_setBoth inst pdx sdx =
686 Instance.pNode si == pdx && Instance.sNode si == sdx
687 where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
688 si = Instance.setBoth inst pdx sdx
690 prop_Instance_shrinkMG inst =
691 Instance.mem inst >= 2 * Types.unitMem ==>
692 case Instance.shrinkByType inst Types.FailMem of
693 Types.Ok inst' -> Instance.mem inst' == Instance.mem inst - Types.unitMem
696 prop_Instance_shrinkMF inst =
697 forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
698 let inst' = inst { Instance.mem = mem}
699 in Types.isBad $ Instance.shrinkByType inst' Types.FailMem
701 prop_Instance_shrinkCG inst =
702 Instance.vcpus inst >= 2 * Types.unitCpu ==>
703 case Instance.shrinkByType inst Types.FailCPU of
705 Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
708 prop_Instance_shrinkCF inst =
709 forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
710 let inst' = inst { Instance.vcpus = vcpus }
711 in Types.isBad $ Instance.shrinkByType inst' Types.FailCPU
713 prop_Instance_shrinkDG inst =
714 Instance.dsk inst >= 2 * Types.unitDsk ==>
715 case Instance.shrinkByType inst Types.FailDisk of
717 Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
720 prop_Instance_shrinkDF inst =
721 forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
722 let inst' = inst { Instance.dsk = dsk }
723 in Types.isBad $ Instance.shrinkByType inst' Types.FailDisk
725 prop_Instance_setMovable inst m =
726 Instance.movable inst' ==? m
727 where inst' = Instance.setMovable inst m
730 [ 'prop_Instance_creat
731 , 'prop_Instance_setIdx
732 , 'prop_Instance_setName
733 , 'prop_Instance_setAlias
734 , 'prop_Instance_setPri
735 , 'prop_Instance_setSec
736 , 'prop_Instance_setBoth
737 , 'prop_Instance_shrinkMG
738 , 'prop_Instance_shrinkMF
739 , 'prop_Instance_shrinkCG
740 , 'prop_Instance_shrinkCF
741 , 'prop_Instance_shrinkDG
742 , 'prop_Instance_shrinkDF
743 , 'prop_Instance_setMovable
748 -- *** Text backend tests
750 -- Instance text loader tests
752 prop_Text_Load_Instance name mem dsk vcpus status
753 (NonEmpty pnode) snode
754 (NonNegative pdx) (NonNegative sdx) autobal dt =
755 pnode /= snode && pdx /= sdx ==>
756 let vcpus_s = show vcpus
759 status_s = Types.instanceStatusToRaw status
762 else [(pnode, pdx), (snode, sdx)]
763 nl = Data.Map.fromList ndx
765 sbal = if autobal then "Y" else "N"
766 sdt = Types.diskTemplateToRaw dt
767 inst = Text.loadInst nl
768 [name, mem_s, dsk_s, vcpus_s, status_s,
769 sbal, pnode, snode, sdt, tags]
770 fail1 = Text.loadInst nl
771 [name, mem_s, dsk_s, vcpus_s, status_s,
772 sbal, pnode, pnode, tags]
773 _types = ( name::String, mem::Int, dsk::Int
774 , vcpus::Int, status::Types.InstanceStatus
778 Types.Bad msg -> failTest $ "Failed to load instance: " ++ msg
779 Types.Ok (_, i) -> printTestCase "Mismatch in some field while\
780 \ loading the instance" $
781 Instance.name i == name &&
782 Instance.vcpus i == vcpus &&
783 Instance.mem i == mem &&
784 Instance.pNode i == pdx &&
785 Instance.sNode i == (if null snode
786 then Node.noSecondary
788 Instance.autoBalance i == autobal &&
791 prop_Text_Load_InstanceFail ktn fields =
792 length fields /= 10 ==>
793 case Text.loadInst nl fields of
794 Types.Ok _ -> failTest "Managed to load instance from invalid data"
795 Types.Bad msg -> printTestCase ("Unrecognised error message: " ++ msg) $
796 "Invalid/incomplete instance data: '" `isPrefixOf` msg
797 where nl = Data.Map.fromList ktn
799 prop_Text_Load_Node name tm nm fm td fd tc fo =
800 let conv v = if v < 0
812 any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
813 gid = Group.uuid defGroup
814 in case Text.loadNode defGroupAssoc
815 [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
817 Just (name', node) ->
819 then Node.offline node
820 else Node.name node == name' && name' == name &&
821 Node.alias node == name &&
822 Node.tMem node == fromIntegral tm &&
823 Node.nMem node == nm &&
824 Node.fMem node == fm &&
825 Node.tDsk node == fromIntegral td &&
826 Node.fDsk node == fd &&
827 Node.tCpu node == fromIntegral tc
829 prop_Text_Load_NodeFail fields =
830 length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
832 prop_Text_NodeLSIdempotent node =
833 (Text.loadNode defGroupAssoc.
834 Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==?
835 Just (Node.name n, n)
836 -- override failN1 to what loadNode returns by default
837 where n = Node.setPolicy Types.defIPolicy $
838 node { Node.failN1 = True, Node.offline = False }
840 prop_Text_ISpecIdempotent ispec =
841 case Text.loadISpec "dummy" . Utils.sepSplit ',' .
842 Text.serializeISpec $ ispec of
843 Types.Bad msg -> failTest $ "Failed to load ispec: " ++ msg
844 Types.Ok ispec' -> ispec ==? ispec'
846 prop_Text_IPolicyIdempotent ipol =
847 case Text.loadIPolicy . Utils.sepSplit '|' $
848 Text.serializeIPolicy owner ipol of
849 Types.Bad msg -> failTest $ "Failed to load ispec: " ++ msg
850 Types.Ok res -> (owner, ipol) ==? res
851 where owner = "dummy"
853 -- | This property, while being in the text tests, does more than just
854 -- test end-to-end the serialisation and loading back workflow; it
855 -- also tests the Loader.mergeData and the actuall
856 -- Cluster.iterateAlloc (for well-behaving w.r.t. instance
857 -- allocations, not for the business logic). As such, it's a quite
858 -- complex and slow test, and that's the reason we restrict it to
859 -- small cluster sizes.
860 prop_Text_CreateSerialise =
861 forAll genTags $ \ctags ->
862 forAll (choose (1, 20)) $ \maxiter ->
863 forAll (choose (2, 10)) $ \count ->
864 forAll genOnlineNode $ \node ->
865 forAll (genInstanceSmallerThanNode node) $ \inst ->
866 let nl = makeSmallCluster node count
867 reqnodes = Instance.requiredNodes $ Instance.diskTemplate inst
868 in case Cluster.genAllocNodes defGroupList nl reqnodes True >>= \allocn ->
869 Cluster.iterateAlloc nl Container.empty (Just maxiter) inst allocn [] []
871 Types.Bad msg -> failTest $ "Failed to allocate: " ++ msg
872 Types.Ok (_, _, _, [], _) -> printTestCase
873 "Failed to allocate: no allocations" False
874 Types.Ok (_, nl', il', _, _) ->
875 let cdata = Loader.ClusterData defGroupList nl' il' ctags
877 saved = Text.serializeCluster cdata
878 in case Text.parseData saved >>= Loader.mergeData [] [] [] [] of
879 Types.Bad msg -> failTest $ "Failed to load/merge: " ++ msg
880 Types.Ok (Loader.ClusterData gl2 nl2 il2 ctags2 cpol2) ->
881 ctags ==? ctags2 .&&.
882 Types.defIPolicy ==? cpol2 .&&.
884 defGroupList ==? gl2 .&&.
888 [ 'prop_Text_Load_Instance
889 , 'prop_Text_Load_InstanceFail
890 , 'prop_Text_Load_Node
891 , 'prop_Text_Load_NodeFail
892 , 'prop_Text_NodeLSIdempotent
893 , 'prop_Text_ISpecIdempotent
894 , 'prop_Text_IPolicyIdempotent
895 , 'prop_Text_CreateSerialise
900 -- | Generates a tuple of specs for simulation.
901 genSimuSpec :: Gen (String, Int, Int, Int, Int)
903 pol <- elements [C.allocPolicyPreferred,
904 C.allocPolicyLastResort, C.allocPolicyUnallocable,
906 -- should be reasonable (nodes/group), bigger values only complicate
907 -- the display of failed tests, and we don't care (in this particular
908 -- test) about big node groups
909 nodes <- choose (0, 20)
910 dsk <- choose (0, maxDsk)
911 mem <- choose (0, maxMem)
912 cpu <- choose (0, maxCpu)
913 return (pol, nodes, dsk, mem, cpu)
915 -- | Checks that given a set of corrects specs, we can load them
916 -- successfully, and that at high-level the values look right.
918 forAll (choose (0, 10)) $ \ngroups ->
919 forAll (replicateM ngroups genSimuSpec) $ \specs ->
920 let strspecs = map (\(p, n, d, m, c) -> printf "%s,%d,%d,%d,%d"
921 p n d m c::String) specs
922 totnodes = sum $ map (\(_, n, _, _, _) -> n) specs
923 mdc_in = concatMap (\(_, n, d, m, c) ->
924 replicate n (fromIntegral m, fromIntegral d,
926 fromIntegral m, fromIntegral d)) specs
927 in case Simu.parseData strspecs of
928 Types.Bad msg -> failTest $ "Failed to load specs: " ++ msg
929 Types.Ok (Loader.ClusterData gl nl il tags ipol) ->
930 let nodes = map snd $ IntMap.toAscList nl
931 nidx = map Node.idx nodes
932 mdc_out = map (\n -> (Node.tMem n, Node.tDsk n, Node.tCpu n,
933 Node.fMem n, Node.fDsk n)) nodes
935 Container.size gl ==? ngroups .&&.
936 Container.size nl ==? totnodes .&&.
937 Container.size il ==? 0 .&&.
938 length tags ==? 0 .&&.
939 ipol ==? Types.defIPolicy .&&.
940 nidx ==? [1..totnodes] .&&.
941 mdc_in ==? mdc_out .&&.
942 map Group.iPolicy (Container.elems gl) ==?
943 replicate ngroups Types.defIPolicy
951 prop_Node_setAlias node name =
952 Node.name newnode == Node.name node &&
953 Node.alias newnode == name
954 where _types = (node::Node.Node, name::String)
955 newnode = Node.setAlias node name
957 prop_Node_setOffline node status =
958 Node.offline newnode ==? status
959 where newnode = Node.setOffline node status
961 prop_Node_setXmem node xm =
962 Node.xMem newnode ==? xm
963 where newnode = Node.setXmem node xm
965 prop_Node_setMcpu node mc =
966 Types.iPolicyVcpuRatio (Node.iPolicy newnode) ==? mc
967 where newnode = Node.setMcpu node mc
969 -- | Check that an instance add with too high memory or disk will be
971 prop_Node_addPriFM node inst =
972 Instance.mem inst >= Node.fMem node && not (Node.failN1 node) &&
973 not (Instance.isOffline inst) ==>
974 case Node.addPri node inst'' of
975 Types.OpFail Types.FailMem -> True
977 where _types = (node::Node.Node, inst::Instance.Instance)
978 inst' = setInstanceSmallerThanNode node inst
979 inst'' = inst' { Instance.mem = Instance.mem inst }
981 prop_Node_addPriFD node inst =
982 Instance.dsk inst >= Node.fDsk node && not (Node.failN1 node) ==>
983 case Node.addPri node inst'' of
984 Types.OpFail Types.FailDisk -> True
986 where _types = (node::Node.Node, inst::Instance.Instance)
987 inst' = setInstanceSmallerThanNode node inst
988 inst'' = inst' { Instance.dsk = Instance.dsk inst }
991 forAll (choose (1, maxCpu)) $ \extra ->
992 forAll genOnlineNode $ \node ->
993 forAll (arbitrary `suchThat` Instance.notOffline) $ \inst ->
994 let inst' = setInstanceSmallerThanNode node inst
995 inst'' = inst' { Instance.vcpus = Node.availCpu node + extra }
996 in case Node.addPri node inst'' of
997 Types.OpFail Types.FailCPU -> property True
998 v -> failTest $ "Expected OpFail FailCPU, but got " ++ show v
1000 -- | Check that an instance add with too high memory or disk will be
1002 prop_Node_addSec node inst pdx =
1003 ((Instance.mem inst >= (Node.fMem node - Node.rMem node) &&
1004 not (Instance.isOffline inst)) ||
1005 Instance.dsk inst >= Node.fDsk node) &&
1006 not (Node.failN1 node) ==>
1007 isFailure (Node.addSec node inst pdx)
1008 where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
1010 -- | Check that an offline instance with reasonable disk size but
1011 -- extra mem/cpu can always be added.
1012 prop_Node_addOfflinePri (NonNegative extra_mem) (NonNegative extra_cpu) =
1013 forAll genOnlineNode $ \node ->
1014 forAll (genInstanceSmallerThanNode node) $ \inst ->
1015 let inst' = inst { Instance.runSt = Types.AdminOffline
1016 , Instance.mem = Node.availMem node + extra_mem
1017 , Instance.vcpus = Node.availCpu node + extra_cpu }
1018 in case Node.addPri node inst' of
1019 Types.OpGood _ -> property True
1020 v -> failTest $ "Expected OpGood, but got: " ++ show v
1022 -- | Check that an offline instance with reasonable disk size but
1023 -- extra mem/cpu can always be added.
1024 prop_Node_addOfflineSec (NonNegative extra_mem) (NonNegative extra_cpu) pdx =
1025 forAll genOnlineNode $ \node ->
1026 forAll (genInstanceSmallerThanNode node) $ \inst ->
1027 let inst' = inst { Instance.runSt = Types.AdminOffline
1028 , Instance.mem = Node.availMem node + extra_mem
1029 , Instance.vcpus = Node.availCpu node + extra_cpu
1030 , Instance.diskTemplate = Types.DTDrbd8 }
1031 in case Node.addSec node inst' pdx of
1032 Types.OpGood _ -> property True
1033 v -> failTest $ "Expected OpGood/OpGood, but got: " ++ show v
1035 -- | Checks for memory reservation changes.
1036 prop_Node_rMem inst =
1037 not (Instance.isOffline inst) ==>
1038 forAll (genOnlineNode `suchThat` ((> Types.unitMem) . Node.fMem)) $ \node ->
1039 -- ab = auto_balance, nb = non-auto_balance
1040 -- we use -1 as the primary node of the instance
1041 let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True
1042 , Instance.diskTemplate = Types.DTDrbd8 }
1043 inst_ab = setInstanceSmallerThanNode node inst'
1044 inst_nb = inst_ab { Instance.autoBalance = False }
1045 -- now we have the two instances, identical except the
1046 -- autoBalance attribute
1047 orig_rmem = Node.rMem node
1048 inst_idx = Instance.idx inst_ab
1049 node_add_ab = Node.addSec node inst_ab (-1)
1050 node_add_nb = Node.addSec node inst_nb (-1)
1051 node_del_ab = liftM (`Node.removeSec` inst_ab) node_add_ab
1052 node_del_nb = liftM (`Node.removeSec` inst_nb) node_add_nb
1053 in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
1054 (Types.OpGood a_ab, Types.OpGood a_nb,
1055 Types.OpGood d_ab, Types.OpGood d_nb) ->
1056 printTestCase "Consistency checks failed" $
1057 Node.rMem a_ab > orig_rmem &&
1058 Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
1059 Node.rMem a_nb == orig_rmem &&
1060 Node.rMem d_ab == orig_rmem &&
1061 Node.rMem d_nb == orig_rmem &&
1062 -- this is not related to rMem, but as good a place to
1064 inst_idx `elem` Node.sList a_ab &&
1065 inst_idx `notElem` Node.sList d_ab
1066 x -> failTest $ "Failed to add/remove instances: " ++ show x
1068 -- | Check mdsk setting.
1069 prop_Node_setMdsk node mx =
1070 Node.loDsk node' >= 0 &&
1071 fromIntegral (Node.loDsk node') <= Node.tDsk node &&
1072 Node.availDisk node' >= 0 &&
1073 Node.availDisk node' <= Node.fDsk node' &&
1074 fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
1075 Node.mDsk node' == mx'
1076 where _types = (node::Node.Node, mx::SmallRatio)
1077 node' = Node.setMdsk node mx'
1081 prop_Node_tagMaps_idempotent =
1082 forAll genTags $ \tags ->
1083 Node.delTags (Node.addTags m tags) tags ==? m
1084 where m = Data.Map.empty
1086 prop_Node_tagMaps_reject =
1087 forAll (genTags `suchThat` (not . null)) $ \tags ->
1088 let m = Node.addTags Data.Map.empty tags
1089 in all (\t -> Node.rejectAddTags m [t]) tags
1091 prop_Node_showField node =
1092 forAll (elements Node.defaultFields) $ \ field ->
1093 fst (Node.showHeader field) /= Types.unknownField &&
1094 Node.showField node field /= Types.unknownField
1096 prop_Node_computeGroups nodes =
1097 let ng = Node.computeGroups nodes
1098 onlyuuid = map fst ng
1099 in length nodes == sum (map (length . snd) ng) &&
1100 all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
1101 length (nub onlyuuid) == length onlyuuid &&
1102 (null nodes || not (null ng))
1104 -- Check idempotence of add/remove operations
1105 prop_Node_addPri_idempotent =
1106 forAll genOnlineNode $ \node ->
1107 forAll (genInstanceSmallerThanNode node) $ \inst ->
1108 case Node.addPri node inst of
1109 Types.OpGood node' -> Node.removePri node' inst ==? node
1110 _ -> failTest "Can't add instance"
1112 prop_Node_addSec_idempotent =
1113 forAll genOnlineNode $ \node ->
1114 forAll (genInstanceSmallerThanNode node) $ \inst ->
1115 let pdx = Node.idx node + 1
1116 inst' = Instance.setPri inst pdx
1117 inst'' = inst' { Instance.diskTemplate = Types.DTDrbd8 }
1118 in case Node.addSec node inst'' pdx of
1119 Types.OpGood node' -> Node.removeSec node' inst'' ==? node
1120 _ -> failTest "Can't add instance"
1123 [ 'prop_Node_setAlias
1124 , 'prop_Node_setOffline
1125 , 'prop_Node_setMcpu
1126 , 'prop_Node_setXmem
1127 , 'prop_Node_addPriFM
1128 , 'prop_Node_addPriFD
1129 , 'prop_Node_addPriFC
1131 , 'prop_Node_addOfflinePri
1132 , 'prop_Node_addOfflineSec
1134 , 'prop_Node_setMdsk
1135 , 'prop_Node_tagMaps_idempotent
1136 , 'prop_Node_tagMaps_reject
1137 , 'prop_Node_showField
1138 , 'prop_Node_computeGroups
1139 , 'prop_Node_addPri_idempotent
1140 , 'prop_Node_addSec_idempotent
1145 -- | Check that the cluster score is close to zero for a homogeneous
1147 prop_Score_Zero node =
1148 forAll (choose (1, 1024)) $ \count ->
1149 (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
1150 (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
1151 let fn = Node.buildPeers node Container.empty
1152 nlst = replicate count fn
1153 score = Cluster.compCVNodes nlst
1154 -- we can't say == 0 here as the floating point errors accumulate;
1155 -- this should be much lower than the default score in CLI.hs
1158 -- | Check that cluster stats are sane.
1160 forAll (choose (1, 1024)) $ \count ->
1161 forAll genOnlineNode $ \node ->
1162 let fn = Node.buildPeers node Container.empty
1163 nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
1164 nl = Container.fromList nlst
1165 cstats = Cluster.totalResources nl
1166 in Cluster.csAdsk cstats >= 0 &&
1167 Cluster.csAdsk cstats <= Cluster.csFdsk cstats
1169 -- | Check that one instance is allocated correctly, without
1170 -- rebalances needed.
1171 prop_ClusterAlloc_sane inst =
1172 forAll (choose (5, 20)) $ \count ->
1173 forAll genOnlineNode $ \node ->
1174 let (nl, il, inst') = makeSmallEmptyCluster node count inst
1175 reqnodes = Instance.requiredNodes $ Instance.diskTemplate inst
1176 in case Cluster.genAllocNodes defGroupList nl reqnodes True >>=
1177 Cluster.tryAlloc nl il inst' of
1178 Types.Bad _ -> False
1180 case Cluster.asSolution as of
1182 Just (xnl, xi, _, cv) ->
1183 let il' = Container.add (Instance.idx xi) xi il
1184 tbl = Cluster.Table xnl il' cv []
1185 in not (canBalance tbl True True False)
1187 -- | Checks that on a 2-5 node cluster, we can allocate a random
1188 -- instance spec via tiered allocation (whatever the original instance
1189 -- spec), on either one or two nodes. Furthermore, we test that
1190 -- computed allocation statistics are correct.
1191 prop_ClusterCanTieredAlloc inst =
1192 forAll (choose (2, 5)) $ \count ->
1193 forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
1194 let nl = makeSmallCluster node count
1195 il = Container.empty
1196 rqnodes = Instance.requiredNodes $ Instance.diskTemplate inst
1197 allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
1198 in case allocnodes >>= \allocnodes' ->
1199 Cluster.tieredAlloc nl il (Just 1) inst allocnodes' [] [] of
1200 Types.Bad msg -> failTest $ "Failed to tiered alloc: " ++ msg
1201 Types.Ok (_, nl', il', ixes, cstats) ->
1202 let (ai_alloc, ai_pool, ai_unav) =
1203 Cluster.computeAllocationDelta
1204 (Cluster.totalResources nl)
1205 (Cluster.totalResources nl')
1206 all_nodes = Container.elems nl
1207 in property (not (null ixes)) .&&.
1208 IntMap.size il' ==? length ixes .&&.
1209 length ixes ==? length cstats .&&.
1210 sum (map Types.allocInfoVCpus [ai_alloc, ai_pool, ai_unav]) ==?
1211 sum (map Node.hiCpu all_nodes) .&&.
1212 sum (map Types.allocInfoNCpus [ai_alloc, ai_pool, ai_unav]) ==?
1213 sum (map Node.tCpu all_nodes) .&&.
1214 sum (map Types.allocInfoMem [ai_alloc, ai_pool, ai_unav]) ==?
1215 truncate (sum (map Node.tMem all_nodes)) .&&.
1216 sum (map Types.allocInfoDisk [ai_alloc, ai_pool, ai_unav]) ==?
1217 truncate (sum (map Node.tDsk all_nodes))
1219 -- | Helper function to create a cluster with the given range of nodes
1220 -- and allocate an instance on it.
1221 genClusterAlloc count node inst =
1222 let nl = makeSmallCluster node count
1223 reqnodes = Instance.requiredNodes $ Instance.diskTemplate inst
1224 in case Cluster.genAllocNodes defGroupList nl reqnodes True >>=
1225 Cluster.tryAlloc nl Container.empty inst of
1226 Types.Bad _ -> Types.Bad "Can't allocate"
1228 case Cluster.asSolution as of
1229 Nothing -> Types.Bad "Empty solution?"
1230 Just (xnl, xi, _, _) ->
1231 let xil = Container.add (Instance.idx xi) xi Container.empty
1232 in Types.Ok (xnl, xil, xi)
1234 -- | Checks that on a 4-8 node cluster, once we allocate an instance,
1235 -- we can also relocate it.
1236 prop_ClusterAllocRelocate =
1237 forAll (choose (4, 8)) $ \count ->
1238 forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
1239 forAll (genInstanceSmallerThanNode node `suchThat` isMirrored) $ \inst ->
1240 case genClusterAlloc count node inst of
1241 Types.Bad msg -> failTest msg
1242 Types.Ok (nl, il, inst') ->
1243 case IAlloc.processRelocate defGroupList nl il
1244 (Instance.idx inst) 1
1245 [(if Instance.diskTemplate inst' == Types.DTDrbd8
1247 else Instance.pNode) inst'] of
1248 Types.Ok _ -> property True
1249 Types.Bad msg -> failTest $ "Failed to relocate: " ++ msg
1251 -- | Helper property checker for the result of a nodeEvac or
1252 -- changeGroup operation.
1253 check_EvacMode grp inst result =
1255 Types.Bad msg -> failTest $ "Couldn't evacuate/change group:" ++ msg
1256 Types.Ok (_, _, es) ->
1257 let moved = Cluster.esMoved es
1258 failed = Cluster.esFailed es
1259 opcodes = not . null $ Cluster.esOpCodes es
1260 in failmsg ("'failed' not empty: " ++ show failed) (null failed) .&&.
1261 failmsg "'opcodes' is null" opcodes .&&.
1263 [(idx', gdx, _)] -> failmsg "invalid instance moved" (idx == idx')
1265 failmsg "wrong target group"
1266 (gdx == Group.idx grp)
1267 v -> failmsg ("invalid solution: " ++ show v) False
1268 where failmsg = \msg -> printTestCase ("Failed to evacuate: " ++ msg)
1269 idx = Instance.idx inst
1271 -- | Checks that on a 4-8 node cluster, once we allocate an instance,
1272 -- we can also node-evacuate it.
1273 prop_ClusterAllocEvacuate =
1274 forAll (choose (4, 8)) $ \count ->
1275 forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
1276 forAll (genInstanceSmallerThanNode node `suchThat` isMirrored) $ \inst ->
1277 case genClusterAlloc count node inst of
1278 Types.Bad msg -> failTest msg
1279 Types.Ok (nl, il, inst') ->
1280 conjoin $ map (\mode -> check_EvacMode defGroup inst' $
1281 Cluster.tryNodeEvac defGroupList nl il mode
1282 [Instance.idx inst']) .
1283 evacModeOptions . Types.templateMirrorType .
1284 Instance.diskTemplate $ inst'
1286 -- | Checks that on a 4-8 node cluster with two node groups, once we
1287 -- allocate an instance on the first node group, we can also change
1289 prop_ClusterAllocChangeGroup =
1290 forAll (choose (4, 8)) $ \count ->
1291 forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
1292 forAll (genInstanceSmallerThanNode node `suchThat` isMirrored) $ \inst ->
1293 case genClusterAlloc count node inst of
1294 Types.Bad msg -> failTest msg
1295 Types.Ok (nl, il, inst') ->
1296 -- we need to add a second node group and nodes to the cluster
1297 let nl2 = Container.elems $ makeSmallCluster node count
1298 grp2 = Group.setIdx defGroup (Group.idx defGroup + 1)
1299 maxndx = maximum . map Node.idx $ nl2
1300 nl3 = map (\n -> n { Node.group = Group.idx grp2
1301 , Node.idx = Node.idx n + maxndx }) nl2
1302 nl4 = Container.fromList . map (\n -> (Node.idx n, n)) $ nl3
1303 gl' = Container.add (Group.idx grp2) grp2 defGroupList
1304 nl' = IntMap.union nl nl4
1305 in check_EvacMode grp2 inst' $
1306 Cluster.tryChangeGroup gl' nl' il [] [Instance.idx inst']
1308 -- | Check that allocating multiple instances on a cluster, then
1309 -- adding an empty node, results in a valid rebalance.
1310 prop_ClusterAllocBalance =
1311 forAll (genNode (Just 5) (Just 128)) $ \node ->
1312 forAll (choose (3, 5)) $ \count ->
1313 not (Node.offline node) && not (Node.failN1 node) ==>
1314 let nl = makeSmallCluster node count
1315 (hnode, nl') = IntMap.deleteFindMax nl
1316 il = Container.empty
1317 allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
1318 i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
1319 in case allocnodes >>= \allocnodes' ->
1320 Cluster.iterateAlloc nl' il (Just 5) i_templ allocnodes' [] [] of
1321 Types.Bad msg -> failTest $ "Failed to allocate: " ++ msg
1322 Types.Ok (_, _, _, [], _) -> failTest "Failed to allocate: no instances"
1323 Types.Ok (_, xnl, il', _, _) ->
1324 let ynl = Container.add (Node.idx hnode) hnode xnl
1325 cv = Cluster.compCV ynl
1326 tbl = Cluster.Table ynl il' cv []
1327 in printTestCase "Failed to rebalance" $
1328 canBalance tbl True True False
1330 -- | Checks consistency.
1331 prop_ClusterCheckConsistency node inst =
1332 let nl = makeSmallCluster node 3
1333 [node1, node2, node3] = Container.elems nl
1334 node3' = node3 { Node.group = 1 }
1335 nl' = Container.add (Node.idx node3') node3' nl
1336 inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
1337 inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
1338 inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
1339 ccheck = Cluster.findSplitInstances nl' . Container.fromList
1340 in null (ccheck [(0, inst1)]) &&
1341 null (ccheck [(0, inst2)]) &&
1342 (not . null $ ccheck [(0, inst3)])
1344 -- | For now, we only test that we don't lose instances during the split.
1345 prop_ClusterSplitCluster node inst =
1346 forAll (choose (0, 100)) $ \icnt ->
1347 let nl = makeSmallCluster node 2
1348 (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
1349 (nl, Container.empty) [1..icnt]
1350 gni = Cluster.splitCluster nl' il'
1351 in sum (map (Container.size . snd . snd) gni) == icnt &&
1352 all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
1353 (Container.elems nl'')) gni
1355 -- | Helper function to check if we can allocate an instance on a
1357 canAllocOn :: Node.List -> Int -> Instance.Instance -> Bool
1358 canAllocOn nl reqnodes inst =
1359 case Cluster.genAllocNodes defGroupList nl reqnodes True >>=
1360 Cluster.tryAlloc nl (Container.empty) inst of
1361 Types.Bad _ -> False
1363 case Cluster.asSolution as of
1367 -- | Checks that allocation obeys minimum and maximum instance
1368 -- policies. The unittest generates a random node, duplicates it count
1369 -- times, and generates a random instance that can be allocated on
1370 -- this mini-cluster; it then checks that after applying a policy that
1371 -- the instance doesn't fits, the allocation fails.
1372 prop_ClusterAllocPolicy node =
1373 -- rqn is the required nodes (1 or 2)
1374 forAll (choose (1, 2)) $ \rqn ->
1375 forAll (choose (5, 20)) $ \count ->
1376 forAll (arbitrary `suchThat` (canAllocOn (makeSmallCluster node count) rqn))
1378 forAll (arbitrary `suchThat` (isFailure .
1379 Instance.instMatchesPolicy inst)) $ \ipol ->
1380 let node' = Node.setPolicy ipol node
1381 nl = makeSmallCluster node' count
1382 in not $ canAllocOn nl rqn inst
1387 , 'prop_ClusterAlloc_sane
1388 , 'prop_ClusterCanTieredAlloc
1389 , 'prop_ClusterAllocRelocate
1390 , 'prop_ClusterAllocEvacuate
1391 , 'prop_ClusterAllocChangeGroup
1392 , 'prop_ClusterAllocBalance
1393 , 'prop_ClusterCheckConsistency
1394 , 'prop_ClusterSplitCluster
1395 , 'prop_ClusterAllocPolicy
1400 -- | Check that opcode serialization is idempotent.
1401 prop_OpCodes_serialization op =
1402 case J.readJSON (J.showJSON op) of
1403 J.Error e -> failTest $ "Cannot deserialise: " ++ e
1404 J.Ok op' -> op ==? op'
1405 where _types = op::OpCodes.OpCode
1408 [ 'prop_OpCodes_serialization ]
1412 -- | Check that (queued) job\/opcode status serialization is idempotent.
1413 prop_OpStatus_serialization os =
1414 case J.readJSON (J.showJSON os) of
1415 J.Error e -> failTest $ "Cannot deserialise: " ++ e
1416 J.Ok os' -> os ==? os'
1417 where _types = os::Jobs.OpStatus
1419 prop_JobStatus_serialization js =
1420 case J.readJSON (J.showJSON js) of
1421 J.Error e -> failTest $ "Cannot deserialise: " ++ e
1422 J.Ok js' -> js ==? js'
1423 where _types = js::Jobs.JobStatus
1426 [ 'prop_OpStatus_serialization
1427 , 'prop_JobStatus_serialization
1432 prop_Loader_lookupNode ktn inst node =
1433 Loader.lookupNode nl inst node ==? Data.Map.lookup node nl
1434 where nl = Data.Map.fromList ktn
1436 prop_Loader_lookupInstance kti inst =
1437 Loader.lookupInstance il inst ==? Data.Map.lookup inst il
1438 where il = Data.Map.fromList kti
1440 prop_Loader_assignIndices =
1441 -- generate nodes with unique names
1442 forAll (arbitrary `suchThat`
1444 let names = map Node.name nodes
1445 in length names == length (nub names))) $ \nodes ->
1447 Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
1448 in Data.Map.size nassoc == length nodes &&
1449 Container.size kt == length nodes &&
1451 then maximum (IntMap.keys kt) == length nodes - 1
1454 -- | Checks that the number of primary instances recorded on the nodes
1456 prop_Loader_mergeData ns =
1457 let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
1458 in case Loader.mergeData [] [] [] []
1459 (Loader.emptyCluster {Loader.cdNodes = na}) of
1460 Types.Bad _ -> False
1461 Types.Ok (Loader.ClusterData _ nl il _ _) ->
1462 let nodes = Container.elems nl
1463 instances = Container.elems il
1464 in (sum . map (length . Node.pList)) nodes == 0 &&
1467 -- | Check that compareNameComponent on equal strings works.
1468 prop_Loader_compareNameComponent_equal :: String -> Bool
1469 prop_Loader_compareNameComponent_equal s =
1470 Loader.compareNameComponent s s ==
1471 Loader.LookupResult Loader.ExactMatch s
1473 -- | Check that compareNameComponent on prefix strings works.
1474 prop_Loader_compareNameComponent_prefix :: NonEmptyList Char -> String -> Bool
1475 prop_Loader_compareNameComponent_prefix (NonEmpty s1) s2 =
1476 Loader.compareNameComponent (s1 ++ "." ++ s2) s1 ==
1477 Loader.LookupResult Loader.PartialMatch s1
1480 [ 'prop_Loader_lookupNode
1481 , 'prop_Loader_lookupInstance
1482 , 'prop_Loader_assignIndices
1483 , 'prop_Loader_mergeData
1484 , 'prop_Loader_compareNameComponent_equal
1485 , 'prop_Loader_compareNameComponent_prefix
1490 prop_Types_AllocPolicy_serialisation apol =
1491 case J.readJSON (J.showJSON apol) of
1492 J.Ok p -> p ==? apol
1493 J.Error s -> failTest $ "Failed to deserialise: " ++ s
1494 where _types = apol::Types.AllocPolicy
1496 prop_Types_DiskTemplate_serialisation dt =
1497 case J.readJSON (J.showJSON dt) of
1499 J.Error s -> failTest $ "Failed to deserialise: " ++ s
1500 where _types = dt::Types.DiskTemplate
1502 prop_Types_ISpec_serialisation ispec =
1503 case J.readJSON (J.showJSON ispec) of
1504 J.Ok p -> p ==? ispec
1505 J.Error s -> failTest $ "Failed to deserialise: " ++ s
1506 where _types = ispec::Types.ISpec
1508 prop_Types_IPolicy_serialisation ipol =
1509 case J.readJSON (J.showJSON ipol) of
1510 J.Ok p -> p ==? ipol
1511 J.Error s -> failTest $ "Failed to deserialise: " ++ s
1512 where _types = ipol::Types.IPolicy
1514 prop_Types_EvacMode_serialisation em =
1515 case J.readJSON (J.showJSON em) of
1517 J.Error s -> failTest $ "Failed to deserialise: " ++ s
1518 where _types = em::Types.EvacMode
1520 prop_Types_opToResult op =
1522 Types.OpFail _ -> Types.isBad r
1523 Types.OpGood v -> case r of
1524 Types.Bad _ -> False
1525 Types.Ok v' -> v == v'
1526 where r = Types.opToResult op
1527 _types = op::Types.OpResult Int
1529 prop_Types_eitherToResult ei =
1531 Left _ -> Types.isBad r
1532 Right v -> case r of
1533 Types.Bad _ -> False
1534 Types.Ok v' -> v == v'
1535 where r = Types.eitherToResult ei
1536 _types = ei::Either String Int
1539 [ 'prop_Types_AllocPolicy_serialisation
1540 , 'prop_Types_DiskTemplate_serialisation
1541 , 'prop_Types_ISpec_serialisation
1542 , 'prop_Types_IPolicy_serialisation
1543 , 'prop_Types_EvacMode_serialisation
1544 , 'prop_Types_opToResult
1545 , 'prop_Types_eitherToResult
1550 -- | Test correct parsing.
1551 prop_CLI_parseISpec descr dsk mem cpu =
1552 let str = printf "%d,%d,%d" dsk mem cpu
1553 in CLI.parseISpecString descr str ==? Types.Ok (Types.RSpec cpu mem dsk)
1555 -- | Test parsing failure due to wrong section count.
1556 prop_CLI_parseISpecFail descr =
1557 forAll (choose (0,100) `suchThat` ((/=) 3)) $ \nelems ->
1558 forAll (replicateM nelems arbitrary) $ \values ->
1559 let str = intercalate "," $ map show (values::[Int])
1560 in case CLI.parseISpecString descr str of
1561 Types.Ok v -> failTest $ "Expected failure, got " ++ show v
1564 -- | Test parseYesNo.
1565 prop_CLI_parseYesNo def testval val =
1566 forAll (elements [val, "yes", "no"]) $ \actual_val ->
1568 then CLI.parseYesNo def Nothing ==? Types.Ok def
1569 else let result = CLI.parseYesNo def (Just actual_val)
1570 in if actual_val `elem` ["yes", "no"]
1571 then result ==? Types.Ok (actual_val == "yes")
1572 else property $ Types.isBad result
1574 -- | Helper to check for correct parsing of string arg.
1575 checkStringArg val (opt, fn) =
1576 let GetOpt.Option _ longs _ _ = opt
1578 [] -> failTest "no long options?"
1580 case CLI.parseOptsInner ["--" ++ cmdarg ++ "=" ++ val] "prog" [opt] of
1581 Left e -> failTest $ "Failed to parse option: " ++ show e
1582 Right (options, _) -> fn options ==? Just val
1584 -- | Test a few string arguments.
1585 prop_CLI_StringArg argument =
1586 let args = [ (CLI.oDataFile, CLI.optDataFile)
1587 , (CLI.oDynuFile, CLI.optDynuFile)
1588 , (CLI.oSaveCluster, CLI.optSaveCluster)
1589 , (CLI.oReplay, CLI.optReplay)
1590 , (CLI.oPrintCommands, CLI.optShowCmds)
1591 , (CLI.oLuxiSocket, CLI.optLuxi)
1593 in conjoin $ map (checkStringArg argument) args
1595 -- | Helper to test that a given option is accepted OK with quick exit.
1596 checkEarlyExit name options param =
1597 case CLI.parseOptsInner [param] name options of
1598 Left (code, _) -> if code == 0
1600 else failTest $ "Program " ++ name ++
1601 " returns invalid code " ++ show code ++
1602 " for option " ++ param
1603 _ -> failTest $ "Program " ++ name ++ " doesn't consider option " ++
1604 param ++ " as early exit one"
1606 -- | Test that all binaries support some common options. There is
1607 -- nothing actually random about this test...
1609 let params = ["-h", "--help", "-V", "--version"]
1610 opts = map (\(name, (_, o)) -> (name, o)) Program.personalities
1611 -- apply checkEarlyExit across the cartesian product of params and opts
1612 in conjoin [checkEarlyExit n o p | p <- params, (n, o) <- opts]
1615 [ 'prop_CLI_parseISpec
1616 , 'prop_CLI_parseISpecFail
1617 , 'prop_CLI_parseYesNo
1618 , 'prop_CLI_StringArg