1 {-# LANGUAGE TemplateHaskell #-}
3 {-| Unittests for ganeti-htools.
9 Copyright (C) 2009, 2010, 2011, 2012 Google Inc.
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2 of the License, or
14 (at your option) any later version.
16 This program is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
28 module Ganeti.HTools.QC
47 import Test.QuickCheck
48 import Text.Printf (printf)
49 import Data.List (findIndex, intercalate, nub, isPrefixOf)
50 import qualified Data.Set as Set
53 import Control.Applicative
54 import qualified System.Console.GetOpt as GetOpt
55 import qualified Text.JSON as J
56 import qualified Data.Map
57 import qualified Data.IntMap as IntMap
59 import qualified Ganeti.OpCodes as OpCodes
60 import qualified Ganeti.Jobs as Jobs
61 import qualified Ganeti.Luxi as Luxi
62 import qualified Ganeti.Ssconf as Ssconf
63 import qualified Ganeti.HTools.CLI as CLI
64 import qualified Ganeti.HTools.Cluster as Cluster
65 import qualified Ganeti.HTools.Container as Container
66 import qualified Ganeti.HTools.ExtLoader
67 import qualified Ganeti.HTools.IAlloc as IAlloc
68 import qualified Ganeti.HTools.Instance as Instance
69 import qualified Ganeti.HTools.JSON as JSON
70 import qualified Ganeti.HTools.Loader as Loader
71 import qualified Ganeti.HTools.Luxi as HTools.Luxi
72 import qualified Ganeti.HTools.Node as Node
73 import qualified Ganeti.HTools.Group as Group
74 import qualified Ganeti.HTools.PeerMap as PeerMap
75 import qualified Ganeti.HTools.Rapi
76 import qualified Ganeti.HTools.Simu as Simu
77 import qualified Ganeti.HTools.Text as Text
78 import qualified Ganeti.HTools.Types as Types
79 import qualified Ganeti.HTools.Utils as Utils
80 import qualified Ganeti.HTools.Version
81 import qualified Ganeti.Constants as C
83 import qualified Ganeti.HTools.Program as Program
84 import qualified Ganeti.HTools.Program.Hail
85 import qualified Ganeti.HTools.Program.Hbal
86 import qualified Ganeti.HTools.Program.Hscan
87 import qualified Ganeti.HTools.Program.Hspace
89 import Ganeti.HTools.QCHelper (testSuite)
93 -- | Maximum memory (1TiB, somewhat random value).
97 -- | Maximum disk (8TiB, somewhat random value).
99 maxDsk = 1024 * 1024 * 8
101 -- | Max CPUs (1024, somewhat random value).
105 -- | Max vcpu ratio (random value).
106 maxVcpuRatio :: Double
107 maxVcpuRatio = 1024.0
109 -- | Max spindle ratio (random value).
110 maxSpindleRatio :: Double
111 maxSpindleRatio = 1024.0
113 -- | Max nodes, used just to limit arbitrary instances for smaller
114 -- opcode definitions (e.g. list of nodes in OpTestDelay).
118 -- | Max opcodes or jobs in a submit job and submit many jobs.
122 -- | All disk templates (used later)
123 allDiskTemplates :: [Types.DiskTemplate]
124 allDiskTemplates = [minBound..maxBound]
126 -- | Null iPolicy, and by null we mean very liberal.
127 nullIPolicy = Types.IPolicy
128 { Types.iPolicyMinSpec = Types.ISpec { Types.iSpecMemorySize = 0
129 , Types.iSpecCpuCount = 0
130 , Types.iSpecDiskSize = 0
131 , Types.iSpecDiskCount = 0
132 , Types.iSpecNicCount = 0
133 , Types.iSpecSpindleUse = 0
135 , Types.iPolicyMaxSpec = Types.ISpec { Types.iSpecMemorySize = maxBound
136 , Types.iSpecCpuCount = maxBound
137 , Types.iSpecDiskSize = maxBound
138 , Types.iSpecDiskCount = C.maxDisks
139 , Types.iSpecNicCount = C.maxNics
140 , Types.iSpecSpindleUse = maxBound
142 , Types.iPolicyStdSpec = Types.ISpec { Types.iSpecMemorySize = Types.unitMem
143 , Types.iSpecCpuCount = Types.unitCpu
144 , Types.iSpecDiskSize = Types.unitDsk
145 , Types.iSpecDiskCount = 1
146 , Types.iSpecNicCount = 1
147 , Types.iSpecSpindleUse = 1
149 , Types.iPolicyDiskTemplates = [minBound..maxBound]
150 , Types.iPolicyVcpuRatio = maxVcpuRatio -- somewhat random value, high
151 -- enough to not impact us
152 , Types.iPolicySpindleRatio = maxSpindleRatio
156 defGroup :: Group.Group
157 defGroup = flip Group.setIdx 0 $
158 Group.create "default" Types.defaultGroupID Types.AllocPreferred
161 defGroupList :: Group.List
162 defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
164 defGroupAssoc :: Data.Map.Map String Types.Gdx
165 defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
167 -- * Helper functions
169 -- | Simple checker for whether OpResult is fail or pass.
170 isFailure :: Types.OpResult a -> Bool
171 isFailure (Types.OpFail _) = True
174 -- | Checks for equality with proper annotation.
175 (==?) :: (Show a, Eq a) => a -> a -> Property
176 (==?) x y = printTestCase
177 ("Expected equality, but '" ++
178 show x ++ "' /= '" ++ show y ++ "'") (x == y)
181 -- | Show a message and fail the test.
182 failTest :: String -> Property
183 failTest msg = printTestCase msg False
185 -- | Update an instance to be smaller than a node.
186 setInstanceSmallerThanNode node inst =
187 inst { Instance.mem = Node.availMem node `div` 2
188 , Instance.dsk = Node.availDisk node `div` 2
189 , Instance.vcpus = Node.availCpu node `div` 2
192 -- | Create an instance given its spec.
193 createInstance mem dsk vcpus =
194 Instance.create "inst-unnamed" mem dsk vcpus Types.Running [] True (-1) (-1)
197 -- | Create a small cluster by repeating a node spec.
198 makeSmallCluster :: Node.Node -> Int -> Node.List
199 makeSmallCluster node count =
200 let origname = Node.name node
201 origalias = Node.alias node
202 nodes = map (\idx -> node { Node.name = origname ++ "-" ++ show idx
203 , Node.alias = origalias ++ "-" ++ show idx })
205 fn = flip Node.buildPeers Container.empty
206 namelst = map (\n -> (Node.name n, fn n)) nodes
207 (_, nlst) = Loader.assignIndices namelst
210 -- | Make a small cluster, both nodes and instances.
211 makeSmallEmptyCluster :: Node.Node -> Int -> Instance.Instance
212 -> (Node.List, Instance.List, Instance.Instance)
213 makeSmallEmptyCluster node count inst =
214 (makeSmallCluster node count, Container.empty,
215 setInstanceSmallerThanNode node inst)
217 -- | Checks if a node is "big" enough.
218 isNodeBig :: Int -> Node.Node -> Bool
219 isNodeBig size node = Node.availDisk node > size * Types.unitDsk
220 && Node.availMem node > size * Types.unitMem
221 && Node.availCpu node > size * Types.unitCpu
223 canBalance :: Cluster.Table -> Bool -> Bool -> Bool -> Bool
224 canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac 0 0
226 -- | Assigns a new fresh instance to a cluster; this is not
227 -- allocation, so no resource checks are done.
228 assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
229 Types.Idx -> Types.Idx ->
230 (Node.List, Instance.List)
231 assignInstance nl il inst pdx sdx =
232 let pnode = Container.find pdx nl
233 snode = Container.find sdx nl
234 maxiidx = if Container.null il
236 else fst (Container.findMax il) + 1
237 inst' = inst { Instance.idx = maxiidx,
238 Instance.pNode = pdx, Instance.sNode = sdx }
239 pnode' = Node.setPri pnode inst'
240 snode' = Node.setSec snode inst'
241 nl' = Container.addTwo pdx pnode' sdx snode' nl
242 il' = Container.add maxiidx inst' il
245 -- | Generates a list of a given size with non-duplicate elements.
246 genUniquesList :: (Eq a, Arbitrary a) => Int -> Gen [a]
249 newelem <- arbitrary `suchThat` (`notElem` lst)
250 return (newelem:lst)) [] [1..cnt]
252 -- | Checks if an instance is mirrored.
253 isMirrored :: Instance.Instance -> Bool
254 isMirrored = (/= Types.MirrorNone) . Instance.mirrorType
256 -- | Returns the possible change node types for a disk template.
257 evacModeOptions :: Types.MirrorType -> [Types.EvacMode]
258 evacModeOptions Types.MirrorNone = []
259 evacModeOptions Types.MirrorInternal = [minBound..maxBound] -- DRBD can do all
260 evacModeOptions Types.MirrorExternal = [Types.ChangePrimary, Types.ChangeAll]
262 -- * Arbitrary instances
264 -- | Defines a DNS name.
265 newtype DNSChar = DNSChar { dnsGetChar::Char }
267 instance Arbitrary DNSChar where
269 x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
272 -- | Generates a single name component.
273 getName :: Gen String
277 return (map dnsGetChar dn)
279 -- | Generates an entire FQDN.
280 getFQDN :: Gen String
282 ncomps <- choose (1, 4)
283 names <- vectorOf ncomps getName
284 return $ intercalate "." names
286 -- | Combinator that generates a 'Maybe' using a sub-combinator.
287 getMaybe :: Gen a -> Gen (Maybe a)
294 -- | Generates a fields list. This uses the same character set as a
295 -- DNS name (just for simplicity).
296 getFields :: Gen [String]
301 -- | Defines a tag type.
302 newtype TagChar = TagChar { tagGetChar :: Char }
304 -- | All valid tag chars. This doesn't need to match _exactly_
305 -- Ganeti's own tag regex, just enough for it to be close.
307 tagChar = ['a'..'z'] ++ ['A'..'Z'] ++ ['0'..'9'] ++ ".+*/:@-"
309 instance Arbitrary TagChar where
311 c <- elements tagChar
315 genTag :: Gen [TagChar]
317 -- the correct value would be C.maxTagLen, but that's way too
318 -- verbose in unittests, and at the moment I don't see any possible
319 -- bugs with longer tags and the way we use tags in htools
323 -- | Generates a list of tags (correctly upper bounded).
324 genTags :: Gen [String]
326 -- the correct value would be C.maxTagsPerObj, but per the comment
327 -- in genTag, we don't use tags enough in htools to warrant testing
329 n <- choose (0, 10::Int)
330 tags <- mapM (const genTag) [1..n]
331 return $ map (map tagGetChar) tags
333 instance Arbitrary Types.InstanceStatus where
334 arbitrary = elements [minBound..maxBound]
336 -- | Generates a random instance with maximum disk/mem/cpu values.
337 genInstanceSmallerThan :: Int -> Int -> Int -> Gen Instance.Instance
338 genInstanceSmallerThan lim_mem lim_dsk lim_cpu = do
340 mem <- choose (0, lim_mem)
341 dsk <- choose (0, lim_dsk)
345 vcpus <- choose (0, lim_cpu)
347 return $ Instance.create name mem dsk vcpus run_st [] True pn sn dt 1
349 -- | Generates an instance smaller than a node.
350 genInstanceSmallerThanNode :: Node.Node -> Gen Instance.Instance
351 genInstanceSmallerThanNode node =
352 genInstanceSmallerThan (Node.availMem node `div` 2)
353 (Node.availDisk node `div` 2)
354 (Node.availCpu node `div` 2)
356 -- let's generate a random instance
357 instance Arbitrary Instance.Instance where
358 arbitrary = genInstanceSmallerThan maxMem maxDsk maxCpu
360 -- | Generas an arbitrary node based on sizing information.
361 genNode :: Maybe Int -- ^ Minimum node size in terms of units
362 -> Maybe Int -- ^ Maximum node size (when Nothing, bounded
363 -- just by the max... constants)
365 genNode min_multiplier max_multiplier = do
366 let (base_mem, base_dsk, base_cpu) =
367 case min_multiplier of
368 Just mm -> (mm * Types.unitMem,
372 (top_mem, top_dsk, top_cpu) =
373 case max_multiplier of
374 Just mm -> (mm * Types.unitMem,
377 Nothing -> (maxMem, maxDsk, maxCpu)
379 mem_t <- choose (base_mem, top_mem)
380 mem_f <- choose (base_mem, mem_t)
381 mem_n <- choose (0, mem_t - mem_f)
382 dsk_t <- choose (base_dsk, top_dsk)
383 dsk_f <- choose (base_dsk, dsk_t)
384 cpu_t <- choose (base_cpu, top_cpu)
386 let n = Node.create name (fromIntegral mem_t) mem_n mem_f
387 (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl 1 0
388 n' = Node.setPolicy nullIPolicy n
389 return $ Node.buildPeers n' Container.empty
391 -- | Helper function to generate a sane node.
392 genOnlineNode :: Gen Node.Node
394 arbitrary `suchThat` (\n -> not (Node.offline n) &&
395 not (Node.failN1 n) &&
396 Node.availDisk n > 0 &&
397 Node.availMem n > 0 &&
401 instance Arbitrary Node.Node where
402 arbitrary = genNode Nothing Nothing
405 instance Arbitrary OpCodes.ReplaceDisksMode where
406 arbitrary = elements [minBound..maxBound]
408 instance Arbitrary OpCodes.OpCode where
410 op_id <- elements [ "OP_TEST_DELAY"
411 , "OP_INSTANCE_REPLACE_DISKS"
412 , "OP_INSTANCE_FAILOVER"
413 , "OP_INSTANCE_MIGRATE"
417 OpCodes.OpTestDelay <$> arbitrary <*> arbitrary
418 <*> resize maxNodes (listOf getFQDN)
419 "OP_INSTANCE_REPLACE_DISKS" ->
420 OpCodes.OpInstanceReplaceDisks <$> getFQDN <*> getMaybe getFQDN <*>
421 arbitrary <*> resize C.maxDisks arbitrary <*> getMaybe getName
422 "OP_INSTANCE_FAILOVER" ->
423 OpCodes.OpInstanceFailover <$> getFQDN <*> arbitrary <*>
425 "OP_INSTANCE_MIGRATE" ->
426 OpCodes.OpInstanceMigrate <$> getFQDN <*> arbitrary <*>
427 arbitrary <*> arbitrary <*> getMaybe getFQDN
428 _ -> fail "Wrong opcode"
430 instance Arbitrary Jobs.OpStatus where
431 arbitrary = elements [minBound..maxBound]
433 instance Arbitrary Jobs.JobStatus where
434 arbitrary = elements [minBound..maxBound]
436 newtype SmallRatio = SmallRatio Double deriving Show
437 instance Arbitrary SmallRatio where
440 return $ SmallRatio v
442 instance Arbitrary Types.AllocPolicy where
443 arbitrary = elements [minBound..maxBound]
445 instance Arbitrary Types.DiskTemplate where
446 arbitrary = elements [minBound..maxBound]
448 instance Arbitrary Types.FailMode where
449 arbitrary = elements [minBound..maxBound]
451 instance Arbitrary Types.EvacMode where
452 arbitrary = elements [minBound..maxBound]
454 instance Arbitrary a => Arbitrary (Types.OpResult a) where
455 arbitrary = arbitrary >>= \c ->
457 then Types.OpGood <$> arbitrary
458 else Types.OpFail <$> arbitrary
460 instance Arbitrary Types.ISpec where
462 mem_s <- arbitrary::Gen (NonNegative Int)
463 dsk_c <- arbitrary::Gen (NonNegative Int)
464 dsk_s <- arbitrary::Gen (NonNegative Int)
465 cpu_c <- arbitrary::Gen (NonNegative Int)
466 nic_c <- arbitrary::Gen (NonNegative Int)
467 su <- arbitrary::Gen (NonNegative Int)
468 return Types.ISpec { Types.iSpecMemorySize = fromIntegral mem_s
469 , Types.iSpecCpuCount = fromIntegral cpu_c
470 , Types.iSpecDiskSize = fromIntegral dsk_s
471 , Types.iSpecDiskCount = fromIntegral dsk_c
472 , Types.iSpecNicCount = fromIntegral nic_c
473 , Types.iSpecSpindleUse = fromIntegral su
476 -- | Generates an ispec bigger than the given one.
477 genBiggerISpec :: Types.ISpec -> Gen Types.ISpec
478 genBiggerISpec imin = do
479 mem_s <- choose (Types.iSpecMemorySize imin, maxBound)
480 dsk_c <- choose (Types.iSpecDiskCount imin, maxBound)
481 dsk_s <- choose (Types.iSpecDiskSize imin, maxBound)
482 cpu_c <- choose (Types.iSpecCpuCount imin, maxBound)
483 nic_c <- choose (Types.iSpecNicCount imin, maxBound)
484 su <- choose (Types.iSpecSpindleUse imin, maxBound)
485 return Types.ISpec { Types.iSpecMemorySize = fromIntegral mem_s
486 , Types.iSpecCpuCount = fromIntegral cpu_c
487 , Types.iSpecDiskSize = fromIntegral dsk_s
488 , Types.iSpecDiskCount = fromIntegral dsk_c
489 , Types.iSpecNicCount = fromIntegral nic_c
490 , Types.iSpecSpindleUse = fromIntegral su
493 instance Arbitrary Types.IPolicy where
496 istd <- genBiggerISpec imin
497 imax <- genBiggerISpec istd
498 num_tmpl <- choose (0, length allDiskTemplates)
499 dts <- genUniquesList num_tmpl
500 vcpu_ratio <- choose (1.0, maxVcpuRatio)
501 spindle_ratio <- choose (1.0, maxSpindleRatio)
502 return Types.IPolicy { Types.iPolicyMinSpec = imin
503 , Types.iPolicyStdSpec = istd
504 , Types.iPolicyMaxSpec = imax
505 , Types.iPolicyDiskTemplates = dts
506 , Types.iPolicyVcpuRatio = vcpu_ratio
507 , Types.iPolicySpindleRatio = spindle_ratio
514 -- | Helper to generate a small string that doesn't contain commas.
515 genNonCommaString = do
516 size <- choose (0, 20) -- arbitrary max size
517 vectorOf size (arbitrary `suchThat` ((/=) ','))
519 -- | If the list is not just an empty element, and if the elements do
520 -- not contain commas, then join+split should be idempotent.
521 prop_Utils_commaJoinSplit =
522 forAll (choose (0, 20)) $ \llen ->
523 forAll (vectorOf llen genNonCommaString `suchThat` ((/=) [""])) $ \lst ->
524 Utils.sepSplit ',' (Utils.commaJoin lst) ==? lst
526 -- | Split and join should always be idempotent.
527 prop_Utils_commaSplitJoin s =
528 Utils.commaJoin (Utils.sepSplit ',' s) ==? s
530 -- | fromObjWithDefault, we test using the Maybe monad and an integer
532 prop_Utils_fromObjWithDefault def_value random_key =
533 -- a missing key will be returned with the default
534 JSON.fromObjWithDefault [] random_key def_value == Just def_value &&
535 -- a found key will be returned as is, not with default
536 JSON.fromObjWithDefault [(random_key, J.showJSON def_value)]
537 random_key (def_value+1) == Just def_value
538 where _types = def_value :: Integer
540 -- | Test that functional if' behaves like the syntactic sugar if.
541 prop_Utils_if'if :: Bool -> Int -> Int -> Gen Prop
542 prop_Utils_if'if cnd a b =
543 Utils.if' cnd a b ==? if cnd then a else b
545 -- | Test basic select functionality
546 prop_Utils_select :: Int -- ^ Default result
547 -> [Int] -- ^ List of False values
548 -> [Int] -- ^ List of True values
549 -> Gen Prop -- ^ Test result
550 prop_Utils_select def lst1 lst2 =
551 Utils.select def (flist ++ tlist) ==? expectedresult
552 where expectedresult = Utils.if' (null lst2) def (head lst2)
553 flist = zip (repeat False) lst1
554 tlist = zip (repeat True) lst2
556 -- | Test basic select functionality with undefined default
557 prop_Utils_select_undefd :: [Int] -- ^ List of False values
558 -> NonEmptyList Int -- ^ List of True values
559 -> Gen Prop -- ^ Test result
560 prop_Utils_select_undefd lst1 (NonEmpty lst2) =
561 Utils.select undefined (flist ++ tlist) ==? head lst2
562 where flist = zip (repeat False) lst1
563 tlist = zip (repeat True) lst2
565 -- | Test basic select functionality with undefined list values
566 prop_Utils_select_undefv :: [Int] -- ^ List of False values
567 -> NonEmptyList Int -- ^ List of True values
568 -> Gen Prop -- ^ Test result
569 prop_Utils_select_undefv lst1 (NonEmpty lst2) =
570 Utils.select undefined cndlist ==? head lst2
571 where flist = zip (repeat False) lst1
572 tlist = zip (repeat True) lst2
573 cndlist = flist ++ tlist ++ [undefined]
575 prop_Utils_parseUnit (NonNegative n) =
576 Utils.parseUnit (show n) ==? Types.Ok n .&&.
577 Utils.parseUnit (show n ++ "m") ==? Types.Ok n .&&.
578 Utils.parseUnit (show n ++ "M") ==? Types.Ok (truncate n_mb::Int) .&&.
579 Utils.parseUnit (show n ++ "g") ==? Types.Ok (n*1024) .&&.
580 Utils.parseUnit (show n ++ "G") ==? Types.Ok (truncate n_gb::Int) .&&.
581 Utils.parseUnit (show n ++ "t") ==? Types.Ok (n*1048576) .&&.
582 Utils.parseUnit (show n ++ "T") ==? Types.Ok (truncate n_tb::Int) .&&.
583 printTestCase "Internal error/overflow?"
584 (n_mb >=0 && n_gb >= 0 && n_tb >= 0) .&&.
585 property (Types.isBad (Utils.parseUnit (show n ++ "x")::Types.Result Int))
586 where _types = (n::Int)
587 n_mb = (fromIntegral n::Rational) * 1000 * 1000 / 1024 / 1024
591 -- | Test list for the Utils module.
593 [ 'prop_Utils_commaJoinSplit
594 , 'prop_Utils_commaSplitJoin
595 , 'prop_Utils_fromObjWithDefault
598 , 'prop_Utils_select_undefd
599 , 'prop_Utils_select_undefv
600 , 'prop_Utils_parseUnit
605 -- | Make sure add is idempotent.
606 prop_PeerMap_addIdempotent pmap key em =
607 fn puniq ==? fn (fn puniq)
608 where _types = (pmap::PeerMap.PeerMap,
609 key::PeerMap.Key, em::PeerMap.Elem)
610 fn = PeerMap.add key em
611 puniq = PeerMap.accumArray const pmap
613 -- | Make sure remove is idempotent.
614 prop_PeerMap_removeIdempotent pmap key =
615 fn puniq ==? fn (fn puniq)
616 where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
617 fn = PeerMap.remove key
618 puniq = PeerMap.accumArray const pmap
620 -- | Make sure a missing item returns 0.
621 prop_PeerMap_findMissing pmap key =
622 PeerMap.find key (PeerMap.remove key puniq) ==? 0
623 where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
624 puniq = PeerMap.accumArray const pmap
626 -- | Make sure an added item is found.
627 prop_PeerMap_addFind pmap key em =
628 PeerMap.find key (PeerMap.add key em puniq) ==? em
629 where _types = (pmap::PeerMap.PeerMap,
630 key::PeerMap.Key, em::PeerMap.Elem)
631 puniq = PeerMap.accumArray const pmap
633 -- | Manual check that maxElem returns the maximum indeed, or 0 for null.
634 prop_PeerMap_maxElem pmap =
635 PeerMap.maxElem puniq ==? if null puniq then 0
636 else (maximum . snd . unzip) puniq
637 where _types = pmap::PeerMap.PeerMap
638 puniq = PeerMap.accumArray const pmap
640 -- | List of tests for the PeerMap module.
642 [ 'prop_PeerMap_addIdempotent
643 , 'prop_PeerMap_removeIdempotent
644 , 'prop_PeerMap_maxElem
645 , 'prop_PeerMap_addFind
646 , 'prop_PeerMap_findMissing
649 -- ** Container tests
651 -- we silence the following due to hlint bug fixed in later versions
652 {-# ANN prop_Container_addTwo "HLint: ignore Avoid lambda" #-}
653 prop_Container_addTwo cdata i1 i2 =
654 fn i1 i2 cont == fn i2 i1 cont &&
655 fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
656 where _types = (cdata::[Int],
658 cont = foldl (\c x -> Container.add x x c) Container.empty cdata
659 fn x1 x2 = Container.addTwo x1 x1 x2 x2
661 prop_Container_nameOf node =
662 let nl = makeSmallCluster node 1
663 fnode = head (Container.elems nl)
664 in Container.nameOf nl (Node.idx fnode) ==? Node.name fnode
666 -- | We test that in a cluster, given a random node, we can find it by
667 -- its name and alias, as long as all names and aliases are unique,
668 -- and that we fail to find a non-existing name.
669 prop_Container_findByName =
670 forAll (genNode (Just 1) Nothing) $ \node ->
671 forAll (choose (1, 20)) $ \ cnt ->
672 forAll (choose (0, cnt - 1)) $ \ fidx ->
673 forAll (genUniquesList (cnt * 2)) $ \ allnames ->
674 forAll (arbitrary `suchThat` (`notElem` allnames)) $ \ othername ->
675 let names = zip (take cnt allnames) (drop cnt allnames)
676 nl = makeSmallCluster node cnt
677 nodes = Container.elems nl
678 nodes' = map (\((name, alias), nn) -> (Node.idx nn,
679 nn { Node.name = name,
680 Node.alias = alias }))
682 nl' = Container.fromList nodes'
683 target = snd (nodes' !! fidx)
684 in Container.findByName nl' (Node.name target) ==? Just target .&&.
685 Container.findByName nl' (Node.alias target) ==? Just target .&&.
686 printTestCase "Found non-existing name"
687 (isNothing (Container.findByName nl' othername))
689 testSuite "Container"
690 [ 'prop_Container_addTwo
691 , 'prop_Container_nameOf
692 , 'prop_Container_findByName
697 -- Simple instance tests, we only have setter/getters
699 prop_Instance_creat inst =
700 Instance.name inst ==? Instance.alias inst
702 prop_Instance_setIdx inst idx =
703 Instance.idx (Instance.setIdx inst idx) ==? idx
704 where _types = (inst::Instance.Instance, idx::Types.Idx)
706 prop_Instance_setName inst name =
707 Instance.name newinst == name &&
708 Instance.alias newinst == name
709 where _types = (inst::Instance.Instance, name::String)
710 newinst = Instance.setName inst name
712 prop_Instance_setAlias inst name =
713 Instance.name newinst == Instance.name inst &&
714 Instance.alias newinst == name
715 where _types = (inst::Instance.Instance, name::String)
716 newinst = Instance.setAlias inst name
718 prop_Instance_setPri inst pdx =
719 Instance.pNode (Instance.setPri inst pdx) ==? pdx
720 where _types = (inst::Instance.Instance, pdx::Types.Ndx)
722 prop_Instance_setSec inst sdx =
723 Instance.sNode (Instance.setSec inst sdx) ==? sdx
724 where _types = (inst::Instance.Instance, sdx::Types.Ndx)
726 prop_Instance_setBoth inst pdx sdx =
727 Instance.pNode si == pdx && Instance.sNode si == sdx
728 where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
729 si = Instance.setBoth inst pdx sdx
731 prop_Instance_shrinkMG inst =
732 Instance.mem inst >= 2 * Types.unitMem ==>
733 case Instance.shrinkByType inst Types.FailMem of
734 Types.Ok inst' -> Instance.mem inst' == Instance.mem inst - Types.unitMem
737 prop_Instance_shrinkMF inst =
738 forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
739 let inst' = inst { Instance.mem = mem}
740 in Types.isBad $ Instance.shrinkByType inst' Types.FailMem
742 prop_Instance_shrinkCG inst =
743 Instance.vcpus inst >= 2 * Types.unitCpu ==>
744 case Instance.shrinkByType inst Types.FailCPU of
746 Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
749 prop_Instance_shrinkCF inst =
750 forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
751 let inst' = inst { Instance.vcpus = vcpus }
752 in Types.isBad $ Instance.shrinkByType inst' Types.FailCPU
754 prop_Instance_shrinkDG inst =
755 Instance.dsk inst >= 2 * Types.unitDsk ==>
756 case Instance.shrinkByType inst Types.FailDisk of
758 Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
761 prop_Instance_shrinkDF inst =
762 forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
763 let inst' = inst { Instance.dsk = dsk }
764 in Types.isBad $ Instance.shrinkByType inst' Types.FailDisk
766 prop_Instance_setMovable inst m =
767 Instance.movable inst' ==? m
768 where inst' = Instance.setMovable inst m
771 [ 'prop_Instance_creat
772 , 'prop_Instance_setIdx
773 , 'prop_Instance_setName
774 , 'prop_Instance_setAlias
775 , 'prop_Instance_setPri
776 , 'prop_Instance_setSec
777 , 'prop_Instance_setBoth
778 , 'prop_Instance_shrinkMG
779 , 'prop_Instance_shrinkMF
780 , 'prop_Instance_shrinkCG
781 , 'prop_Instance_shrinkCF
782 , 'prop_Instance_shrinkDG
783 , 'prop_Instance_shrinkDF
784 , 'prop_Instance_setMovable
789 -- *** Text backend tests
791 -- Instance text loader tests
793 prop_Text_Load_Instance name mem dsk vcpus status
794 (NonEmpty pnode) snode
795 (NonNegative pdx) (NonNegative sdx) autobal dt su =
796 pnode /= snode && pdx /= sdx ==>
797 let vcpus_s = show vcpus
801 status_s = Types.instanceStatusToRaw status
804 else [(pnode, pdx), (snode, sdx)]
805 nl = Data.Map.fromList ndx
807 sbal = if autobal then "Y" else "N"
808 sdt = Types.diskTemplateToRaw dt
809 inst = Text.loadInst nl
810 [name, mem_s, dsk_s, vcpus_s, status_s,
811 sbal, pnode, snode, sdt, tags, su_s]
812 fail1 = Text.loadInst nl
813 [name, mem_s, dsk_s, vcpus_s, status_s,
814 sbal, pnode, pnode, tags]
815 _types = ( name::String, mem::Int, dsk::Int
816 , vcpus::Int, status::Types.InstanceStatus
820 Types.Bad msg -> failTest $ "Failed to load instance: " ++ msg
821 Types.Ok (_, i) -> printTestCase "Mismatch in some field while\
822 \ loading the instance" $
823 Instance.name i == name &&
824 Instance.vcpus i == vcpus &&
825 Instance.mem i == mem &&
826 Instance.pNode i == pdx &&
827 Instance.sNode i == (if null snode
828 then Node.noSecondary
830 Instance.autoBalance i == autobal &&
831 Instance.spindleUse i == su &&
834 prop_Text_Load_InstanceFail ktn fields =
835 length fields /= 10 && length fields /= 11 ==>
836 case Text.loadInst nl fields of
837 Types.Ok _ -> failTest "Managed to load instance from invalid data"
838 Types.Bad msg -> printTestCase ("Unrecognised error message: " ++ msg) $
839 "Invalid/incomplete instance data: '" `isPrefixOf` msg
840 where nl = Data.Map.fromList ktn
842 prop_Text_Load_Node name tm nm fm td fd tc fo =
843 let conv v = if v < 0
855 any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
856 gid = Group.uuid defGroup
857 in case Text.loadNode defGroupAssoc
858 [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
860 Just (name', node) ->
862 then Node.offline node
863 else Node.name node == name' && name' == name &&
864 Node.alias node == name &&
865 Node.tMem node == fromIntegral tm &&
866 Node.nMem node == nm &&
867 Node.fMem node == fm &&
868 Node.tDsk node == fromIntegral td &&
869 Node.fDsk node == fd &&
870 Node.tCpu node == fromIntegral tc
872 prop_Text_Load_NodeFail fields =
873 length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
875 prop_Text_NodeLSIdempotent =
876 forAll (genNode (Just 1) Nothing) $ \node ->
877 -- override failN1 to what loadNode returns by default
878 let n = Node.setPolicy Types.defIPolicy $
879 node { Node.failN1 = True, Node.offline = False }
881 (Text.loadNode defGroupAssoc.
882 Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==?
883 Just (Node.name n, n)
885 prop_Text_ISpecIdempotent ispec =
886 case Text.loadISpec "dummy" . Utils.sepSplit ',' .
887 Text.serializeISpec $ ispec of
888 Types.Bad msg -> failTest $ "Failed to load ispec: " ++ msg
889 Types.Ok ispec' -> ispec ==? ispec'
891 prop_Text_IPolicyIdempotent ipol =
892 case Text.loadIPolicy . Utils.sepSplit '|' $
893 Text.serializeIPolicy owner ipol of
894 Types.Bad msg -> failTest $ "Failed to load ispec: " ++ msg
895 Types.Ok res -> (owner, ipol) ==? res
896 where owner = "dummy"
898 -- | This property, while being in the text tests, does more than just
899 -- test end-to-end the serialisation and loading back workflow; it
900 -- also tests the Loader.mergeData and the actuall
901 -- Cluster.iterateAlloc (for well-behaving w.r.t. instance
902 -- allocations, not for the business logic). As such, it's a quite
903 -- complex and slow test, and that's the reason we restrict it to
904 -- small cluster sizes.
905 prop_Text_CreateSerialise =
906 forAll genTags $ \ctags ->
907 forAll (choose (1, 20)) $ \maxiter ->
908 forAll (choose (2, 10)) $ \count ->
909 forAll genOnlineNode $ \node ->
910 forAll (genInstanceSmallerThanNode node) $ \inst ->
911 let nl = makeSmallCluster node count
912 reqnodes = Instance.requiredNodes $ Instance.diskTemplate inst
913 in case Cluster.genAllocNodes defGroupList nl reqnodes True >>= \allocn ->
914 Cluster.iterateAlloc nl Container.empty (Just maxiter) inst allocn [] []
916 Types.Bad msg -> failTest $ "Failed to allocate: " ++ msg
917 Types.Ok (_, _, _, [], _) -> printTestCase
918 "Failed to allocate: no allocations" False
919 Types.Ok (_, nl', il', _, _) ->
920 let cdata = Loader.ClusterData defGroupList nl' il' ctags
922 saved = Text.serializeCluster cdata
923 in case Text.parseData saved >>= Loader.mergeData [] [] [] [] of
924 Types.Bad msg -> failTest $ "Failed to load/merge: " ++ msg
925 Types.Ok (Loader.ClusterData gl2 nl2 il2 ctags2 cpol2) ->
926 ctags ==? ctags2 .&&.
927 Types.defIPolicy ==? cpol2 .&&.
929 defGroupList ==? gl2 .&&.
933 [ 'prop_Text_Load_Instance
934 , 'prop_Text_Load_InstanceFail
935 , 'prop_Text_Load_Node
936 , 'prop_Text_Load_NodeFail
937 , 'prop_Text_NodeLSIdempotent
938 , 'prop_Text_ISpecIdempotent
939 , 'prop_Text_IPolicyIdempotent
940 , 'prop_Text_CreateSerialise
945 -- | Generates a tuple of specs for simulation.
946 genSimuSpec :: Gen (String, Int, Int, Int, Int)
948 pol <- elements [C.allocPolicyPreferred,
949 C.allocPolicyLastResort, C.allocPolicyUnallocable,
951 -- should be reasonable (nodes/group), bigger values only complicate
952 -- the display of failed tests, and we don't care (in this particular
953 -- test) about big node groups
954 nodes <- choose (0, 20)
955 dsk <- choose (0, maxDsk)
956 mem <- choose (0, maxMem)
957 cpu <- choose (0, maxCpu)
958 return (pol, nodes, dsk, mem, cpu)
960 -- | Checks that given a set of corrects specs, we can load them
961 -- successfully, and that at high-level the values look right.
963 forAll (choose (0, 10)) $ \ngroups ->
964 forAll (replicateM ngroups genSimuSpec) $ \specs ->
965 let strspecs = map (\(p, n, d, m, c) -> printf "%s,%d,%d,%d,%d"
966 p n d m c::String) specs
967 totnodes = sum $ map (\(_, n, _, _, _) -> n) specs
968 mdc_in = concatMap (\(_, n, d, m, c) ->
969 replicate n (fromIntegral m, fromIntegral d,
971 fromIntegral m, fromIntegral d)) specs
972 in case Simu.parseData strspecs of
973 Types.Bad msg -> failTest $ "Failed to load specs: " ++ msg
974 Types.Ok (Loader.ClusterData gl nl il tags ipol) ->
975 let nodes = map snd $ IntMap.toAscList nl
976 nidx = map Node.idx nodes
977 mdc_out = map (\n -> (Node.tMem n, Node.tDsk n, Node.tCpu n,
978 Node.fMem n, Node.fDsk n)) nodes
980 Container.size gl ==? ngroups .&&.
981 Container.size nl ==? totnodes .&&.
982 Container.size il ==? 0 .&&.
983 length tags ==? 0 .&&.
984 ipol ==? Types.defIPolicy .&&.
985 nidx ==? [1..totnodes] .&&.
986 mdc_in ==? mdc_out .&&.
987 map Group.iPolicy (Container.elems gl) ==?
988 replicate ngroups Types.defIPolicy
996 prop_Node_setAlias node name =
997 Node.name newnode == Node.name node &&
998 Node.alias newnode == name
999 where _types = (node::Node.Node, name::String)
1000 newnode = Node.setAlias node name
1002 prop_Node_setOffline node status =
1003 Node.offline newnode ==? status
1004 where newnode = Node.setOffline node status
1006 prop_Node_setXmem node xm =
1007 Node.xMem newnode ==? xm
1008 where newnode = Node.setXmem node xm
1010 prop_Node_setMcpu node mc =
1011 Types.iPolicyVcpuRatio (Node.iPolicy newnode) ==? mc
1012 where newnode = Node.setMcpu node mc
1014 -- | Check that an instance add with too high memory or disk will be
1016 prop_Node_addPriFM node inst =
1017 Instance.mem inst >= Node.fMem node && not (Node.failN1 node) &&
1018 not (Instance.isOffline inst) ==>
1019 case Node.addPri node inst'' of
1020 Types.OpFail Types.FailMem -> True
1022 where _types = (node::Node.Node, inst::Instance.Instance)
1023 inst' = setInstanceSmallerThanNode node inst
1024 inst'' = inst' { Instance.mem = Instance.mem inst }
1026 -- | Check that adding a primary instance with too much disk fails
1027 -- with type FailDisk.
1028 prop_Node_addPriFD node inst =
1029 forAll (elements Instance.localStorageTemplates) $ \dt ->
1030 Instance.dsk inst >= Node.fDsk node && not (Node.failN1 node) ==>
1031 let inst' = setInstanceSmallerThanNode node inst
1032 inst'' = inst' { Instance.dsk = Instance.dsk inst
1033 , Instance.diskTemplate = dt }
1034 in case Node.addPri node inst'' of
1035 Types.OpFail Types.FailDisk -> True
1038 -- | Check that adding a primary instance with too many VCPUs fails
1039 -- with type FailCPU.
1040 prop_Node_addPriFC =
1041 forAll (choose (1, maxCpu)) $ \extra ->
1042 forAll genOnlineNode $ \node ->
1043 forAll (arbitrary `suchThat` Instance.notOffline) $ \inst ->
1044 let inst' = setInstanceSmallerThanNode node inst
1045 inst'' = inst' { Instance.vcpus = Node.availCpu node + extra }
1046 in case Node.addPri node inst'' of
1047 Types.OpFail Types.FailCPU -> property True
1048 v -> failTest $ "Expected OpFail FailCPU, but got " ++ show v
1050 -- | Check that an instance add with too high memory or disk will be
1052 prop_Node_addSec node inst pdx =
1053 ((Instance.mem inst >= (Node.fMem node - Node.rMem node) &&
1054 not (Instance.isOffline inst)) ||
1055 Instance.dsk inst >= Node.fDsk node) &&
1056 not (Node.failN1 node) ==>
1057 isFailure (Node.addSec node inst pdx)
1058 where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
1060 -- | Check that an offline instance with reasonable disk size but
1061 -- extra mem/cpu can always be added.
1062 prop_Node_addOfflinePri (NonNegative extra_mem) (NonNegative extra_cpu) =
1063 forAll genOnlineNode $ \node ->
1064 forAll (genInstanceSmallerThanNode node) $ \inst ->
1065 let inst' = inst { Instance.runSt = Types.AdminOffline
1066 , Instance.mem = Node.availMem node + extra_mem
1067 , Instance.vcpus = Node.availCpu node + extra_cpu }
1068 in case Node.addPri node inst' of
1069 Types.OpGood _ -> property True
1070 v -> failTest $ "Expected OpGood, but got: " ++ show v
1072 -- | Check that an offline instance with reasonable disk size but
1073 -- extra mem/cpu can always be added.
1074 prop_Node_addOfflineSec (NonNegative extra_mem) (NonNegative extra_cpu) pdx =
1075 forAll genOnlineNode $ \node ->
1076 forAll (genInstanceSmallerThanNode node) $ \inst ->
1077 let inst' = inst { Instance.runSt = Types.AdminOffline
1078 , Instance.mem = Node.availMem node + extra_mem
1079 , Instance.vcpus = Node.availCpu node + extra_cpu
1080 , Instance.diskTemplate = Types.DTDrbd8 }
1081 in case Node.addSec node inst' pdx of
1082 Types.OpGood _ -> property True
1083 v -> failTest $ "Expected OpGood/OpGood, but got: " ++ show v
1085 -- | Checks for memory reservation changes.
1086 prop_Node_rMem inst =
1087 not (Instance.isOffline inst) ==>
1088 forAll (genOnlineNode `suchThat` ((> Types.unitMem) . Node.fMem)) $ \node ->
1089 -- ab = auto_balance, nb = non-auto_balance
1090 -- we use -1 as the primary node of the instance
1091 let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True
1092 , Instance.diskTemplate = Types.DTDrbd8 }
1093 inst_ab = setInstanceSmallerThanNode node inst'
1094 inst_nb = inst_ab { Instance.autoBalance = False }
1095 -- now we have the two instances, identical except the
1096 -- autoBalance attribute
1097 orig_rmem = Node.rMem node
1098 inst_idx = Instance.idx inst_ab
1099 node_add_ab = Node.addSec node inst_ab (-1)
1100 node_add_nb = Node.addSec node inst_nb (-1)
1101 node_del_ab = liftM (`Node.removeSec` inst_ab) node_add_ab
1102 node_del_nb = liftM (`Node.removeSec` inst_nb) node_add_nb
1103 in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
1104 (Types.OpGood a_ab, Types.OpGood a_nb,
1105 Types.OpGood d_ab, Types.OpGood d_nb) ->
1106 printTestCase "Consistency checks failed" $
1107 Node.rMem a_ab > orig_rmem &&
1108 Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
1109 Node.rMem a_nb == orig_rmem &&
1110 Node.rMem d_ab == orig_rmem &&
1111 Node.rMem d_nb == orig_rmem &&
1112 -- this is not related to rMem, but as good a place to
1114 inst_idx `elem` Node.sList a_ab &&
1115 inst_idx `notElem` Node.sList d_ab
1116 x -> failTest $ "Failed to add/remove instances: " ++ show x
1118 -- | Check mdsk setting.
1119 prop_Node_setMdsk node mx =
1120 Node.loDsk node' >= 0 &&
1121 fromIntegral (Node.loDsk node') <= Node.tDsk node &&
1122 Node.availDisk node' >= 0 &&
1123 Node.availDisk node' <= Node.fDsk node' &&
1124 fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
1125 Node.mDsk node' == mx'
1126 where _types = (node::Node.Node, mx::SmallRatio)
1127 node' = Node.setMdsk node mx'
1131 prop_Node_tagMaps_idempotent =
1132 forAll genTags $ \tags ->
1133 Node.delTags (Node.addTags m tags) tags ==? m
1134 where m = Data.Map.empty
1136 prop_Node_tagMaps_reject =
1137 forAll (genTags `suchThat` (not . null)) $ \tags ->
1138 let m = Node.addTags Data.Map.empty tags
1139 in all (\t -> Node.rejectAddTags m [t]) tags
1141 prop_Node_showField node =
1142 forAll (elements Node.defaultFields) $ \ field ->
1143 fst (Node.showHeader field) /= Types.unknownField &&
1144 Node.showField node field /= Types.unknownField
1146 prop_Node_computeGroups nodes =
1147 let ng = Node.computeGroups nodes
1148 onlyuuid = map fst ng
1149 in length nodes == sum (map (length . snd) ng) &&
1150 all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
1151 length (nub onlyuuid) == length onlyuuid &&
1152 (null nodes || not (null ng))
1154 -- Check idempotence of add/remove operations
1155 prop_Node_addPri_idempotent =
1156 forAll genOnlineNode $ \node ->
1157 forAll (genInstanceSmallerThanNode node) $ \inst ->
1158 case Node.addPri node inst of
1159 Types.OpGood node' -> Node.removePri node' inst ==? node
1160 _ -> failTest "Can't add instance"
1162 prop_Node_addSec_idempotent =
1163 forAll genOnlineNode $ \node ->
1164 forAll (genInstanceSmallerThanNode node) $ \inst ->
1165 let pdx = Node.idx node + 1
1166 inst' = Instance.setPri inst pdx
1167 inst'' = inst' { Instance.diskTemplate = Types.DTDrbd8 }
1168 in case Node.addSec node inst'' pdx of
1169 Types.OpGood node' -> Node.removeSec node' inst'' ==? node
1170 _ -> failTest "Can't add instance"
1173 [ 'prop_Node_setAlias
1174 , 'prop_Node_setOffline
1175 , 'prop_Node_setMcpu
1176 , 'prop_Node_setXmem
1177 , 'prop_Node_addPriFM
1178 , 'prop_Node_addPriFD
1179 , 'prop_Node_addPriFC
1181 , 'prop_Node_addOfflinePri
1182 , 'prop_Node_addOfflineSec
1184 , 'prop_Node_setMdsk
1185 , 'prop_Node_tagMaps_idempotent
1186 , 'prop_Node_tagMaps_reject
1187 , 'prop_Node_showField
1188 , 'prop_Node_computeGroups
1189 , 'prop_Node_addPri_idempotent
1190 , 'prop_Node_addSec_idempotent
1195 -- | Check that the cluster score is close to zero for a homogeneous
1197 prop_Score_Zero node =
1198 forAll (choose (1, 1024)) $ \count ->
1199 (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
1200 (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
1201 let fn = Node.buildPeers node Container.empty
1202 nlst = replicate count fn
1203 score = Cluster.compCVNodes nlst
1204 -- we can't say == 0 here as the floating point errors accumulate;
1205 -- this should be much lower than the default score in CLI.hs
1208 -- | Check that cluster stats are sane.
1210 forAll (choose (1, 1024)) $ \count ->
1211 forAll genOnlineNode $ \node ->
1212 let fn = Node.buildPeers node Container.empty
1213 nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
1214 nl = Container.fromList nlst
1215 cstats = Cluster.totalResources nl
1216 in Cluster.csAdsk cstats >= 0 &&
1217 Cluster.csAdsk cstats <= Cluster.csFdsk cstats
1219 -- | Check that one instance is allocated correctly, without
1220 -- rebalances needed.
1221 prop_ClusterAlloc_sane inst =
1222 forAll (choose (5, 20)) $ \count ->
1223 forAll genOnlineNode $ \node ->
1224 let (nl, il, inst') = makeSmallEmptyCluster node count inst
1225 reqnodes = Instance.requiredNodes $ Instance.diskTemplate inst
1226 in case Cluster.genAllocNodes defGroupList nl reqnodes True >>=
1227 Cluster.tryAlloc nl il inst' of
1228 Types.Bad _ -> False
1230 case Cluster.asSolution as of
1232 Just (xnl, xi, _, cv) ->
1233 let il' = Container.add (Instance.idx xi) xi il
1234 tbl = Cluster.Table xnl il' cv []
1235 in not (canBalance tbl True True False)
1237 -- | Checks that on a 2-5 node cluster, we can allocate a random
1238 -- instance spec via tiered allocation (whatever the original instance
1239 -- spec), on either one or two nodes. Furthermore, we test that
1240 -- computed allocation statistics are correct.
1241 prop_ClusterCanTieredAlloc inst =
1242 forAll (choose (2, 5)) $ \count ->
1243 forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
1244 let nl = makeSmallCluster node count
1245 il = Container.empty
1246 rqnodes = Instance.requiredNodes $ Instance.diskTemplate inst
1247 allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
1248 in case allocnodes >>= \allocnodes' ->
1249 Cluster.tieredAlloc nl il (Just 1) inst allocnodes' [] [] of
1250 Types.Bad msg -> failTest $ "Failed to tiered alloc: " ++ msg
1251 Types.Ok (_, nl', il', ixes, cstats) ->
1252 let (ai_alloc, ai_pool, ai_unav) =
1253 Cluster.computeAllocationDelta
1254 (Cluster.totalResources nl)
1255 (Cluster.totalResources nl')
1256 all_nodes = Container.elems nl
1257 in property (not (null ixes)) .&&.
1258 IntMap.size il' ==? length ixes .&&.
1259 length ixes ==? length cstats .&&.
1260 sum (map Types.allocInfoVCpus [ai_alloc, ai_pool, ai_unav]) ==?
1261 sum (map Node.hiCpu all_nodes) .&&.
1262 sum (map Types.allocInfoNCpus [ai_alloc, ai_pool, ai_unav]) ==?
1263 sum (map Node.tCpu all_nodes) .&&.
1264 sum (map Types.allocInfoMem [ai_alloc, ai_pool, ai_unav]) ==?
1265 truncate (sum (map Node.tMem all_nodes)) .&&.
1266 sum (map Types.allocInfoDisk [ai_alloc, ai_pool, ai_unav]) ==?
1267 truncate (sum (map Node.tDsk all_nodes))
1269 -- | Helper function to create a cluster with the given range of nodes
1270 -- and allocate an instance on it.
1271 genClusterAlloc count node inst =
1272 let nl = makeSmallCluster node count
1273 reqnodes = Instance.requiredNodes $ Instance.diskTemplate inst
1274 in case Cluster.genAllocNodes defGroupList nl reqnodes True >>=
1275 Cluster.tryAlloc nl Container.empty inst of
1276 Types.Bad _ -> Types.Bad "Can't allocate"
1278 case Cluster.asSolution as of
1279 Nothing -> Types.Bad "Empty solution?"
1280 Just (xnl, xi, _, _) ->
1281 let xil = Container.add (Instance.idx xi) xi Container.empty
1282 in Types.Ok (xnl, xil, xi)
1284 -- | Checks that on a 4-8 node cluster, once we allocate an instance,
1285 -- we can also relocate it.
1286 prop_ClusterAllocRelocate =
1287 forAll (choose (4, 8)) $ \count ->
1288 forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
1289 forAll (genInstanceSmallerThanNode node `suchThat` isMirrored) $ \inst ->
1290 case genClusterAlloc count node inst of
1291 Types.Bad msg -> failTest msg
1292 Types.Ok (nl, il, inst') ->
1293 case IAlloc.processRelocate defGroupList nl il
1294 (Instance.idx inst) 1
1295 [(if Instance.diskTemplate inst' == Types.DTDrbd8
1297 else Instance.pNode) inst'] of
1298 Types.Ok _ -> property True
1299 Types.Bad msg -> failTest $ "Failed to relocate: " ++ msg
1301 -- | Helper property checker for the result of a nodeEvac or
1302 -- changeGroup operation.
1303 check_EvacMode grp inst result =
1305 Types.Bad msg -> failTest $ "Couldn't evacuate/change group:" ++ msg
1306 Types.Ok (_, _, es) ->
1307 let moved = Cluster.esMoved es
1308 failed = Cluster.esFailed es
1309 opcodes = not . null $ Cluster.esOpCodes es
1310 in failmsg ("'failed' not empty: " ++ show failed) (null failed) .&&.
1311 failmsg "'opcodes' is null" opcodes .&&.
1313 [(idx', gdx, _)] -> failmsg "invalid instance moved" (idx == idx')
1315 failmsg "wrong target group"
1316 (gdx == Group.idx grp)
1317 v -> failmsg ("invalid solution: " ++ show v) False
1318 where failmsg = \msg -> printTestCase ("Failed to evacuate: " ++ msg)
1319 idx = Instance.idx inst
1321 -- | Checks that on a 4-8 node cluster, once we allocate an instance,
1322 -- we can also node-evacuate it.
1323 prop_ClusterAllocEvacuate =
1324 forAll (choose (4, 8)) $ \count ->
1325 forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
1326 forAll (genInstanceSmallerThanNode node `suchThat` isMirrored) $ \inst ->
1327 case genClusterAlloc count node inst of
1328 Types.Bad msg -> failTest msg
1329 Types.Ok (nl, il, inst') ->
1330 conjoin $ map (\mode -> check_EvacMode defGroup inst' $
1331 Cluster.tryNodeEvac defGroupList nl il mode
1332 [Instance.idx inst']) .
1334 Instance.mirrorType $ inst'
1336 -- | Checks that on a 4-8 node cluster with two node groups, once we
1337 -- allocate an instance on the first node group, we can also change
1339 prop_ClusterAllocChangeGroup =
1340 forAll (choose (4, 8)) $ \count ->
1341 forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
1342 forAll (genInstanceSmallerThanNode node `suchThat` isMirrored) $ \inst ->
1343 case genClusterAlloc count node inst of
1344 Types.Bad msg -> failTest msg
1345 Types.Ok (nl, il, inst') ->
1346 -- we need to add a second node group and nodes to the cluster
1347 let nl2 = Container.elems $ makeSmallCluster node count
1348 grp2 = Group.setIdx defGroup (Group.idx defGroup + 1)
1349 maxndx = maximum . map Node.idx $ nl2
1350 nl3 = map (\n -> n { Node.group = Group.idx grp2
1351 , Node.idx = Node.idx n + maxndx }) nl2
1352 nl4 = Container.fromList . map (\n -> (Node.idx n, n)) $ nl3
1353 gl' = Container.add (Group.idx grp2) grp2 defGroupList
1354 nl' = IntMap.union nl nl4
1355 in check_EvacMode grp2 inst' $
1356 Cluster.tryChangeGroup gl' nl' il [] [Instance.idx inst']
1358 -- | Check that allocating multiple instances on a cluster, then
1359 -- adding an empty node, results in a valid rebalance.
1360 prop_ClusterAllocBalance =
1361 forAll (genNode (Just 5) (Just 128)) $ \node ->
1362 forAll (choose (3, 5)) $ \count ->
1363 not (Node.offline node) && not (Node.failN1 node) ==>
1364 let nl = makeSmallCluster node count
1365 (hnode, nl') = IntMap.deleteFindMax nl
1366 il = Container.empty
1367 allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
1368 i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
1369 in case allocnodes >>= \allocnodes' ->
1370 Cluster.iterateAlloc nl' il (Just 5) i_templ allocnodes' [] [] of
1371 Types.Bad msg -> failTest $ "Failed to allocate: " ++ msg
1372 Types.Ok (_, _, _, [], _) -> failTest "Failed to allocate: no instances"
1373 Types.Ok (_, xnl, il', _, _) ->
1374 let ynl = Container.add (Node.idx hnode) hnode xnl
1375 cv = Cluster.compCV ynl
1376 tbl = Cluster.Table ynl il' cv []
1377 in printTestCase "Failed to rebalance" $
1378 canBalance tbl True True False
1380 -- | Checks consistency.
1381 prop_ClusterCheckConsistency node inst =
1382 let nl = makeSmallCluster node 3
1383 [node1, node2, node3] = Container.elems nl
1384 node3' = node3 { Node.group = 1 }
1385 nl' = Container.add (Node.idx node3') node3' nl
1386 inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
1387 inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
1388 inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
1389 ccheck = Cluster.findSplitInstances nl' . Container.fromList
1390 in null (ccheck [(0, inst1)]) &&
1391 null (ccheck [(0, inst2)]) &&
1392 (not . null $ ccheck [(0, inst3)])
1394 -- | For now, we only test that we don't lose instances during the split.
1395 prop_ClusterSplitCluster node inst =
1396 forAll (choose (0, 100)) $ \icnt ->
1397 let nl = makeSmallCluster node 2
1398 (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
1399 (nl, Container.empty) [1..icnt]
1400 gni = Cluster.splitCluster nl' il'
1401 in sum (map (Container.size . snd . snd) gni) == icnt &&
1402 all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
1403 (Container.elems nl'')) gni
1405 -- | Helper function to check if we can allocate an instance on a
1407 canAllocOn :: Node.List -> Int -> Instance.Instance -> Bool
1408 canAllocOn nl reqnodes inst =
1409 case Cluster.genAllocNodes defGroupList nl reqnodes True >>=
1410 Cluster.tryAlloc nl (Container.empty) inst of
1411 Types.Bad _ -> False
1413 case Cluster.asSolution as of
1417 -- | Checks that allocation obeys minimum and maximum instance
1418 -- policies. The unittest generates a random node, duplicates it count
1419 -- times, and generates a random instance that can be allocated on
1420 -- this mini-cluster; it then checks that after applying a policy that
1421 -- the instance doesn't fits, the allocation fails.
1422 prop_ClusterAllocPolicy node =
1423 -- rqn is the required nodes (1 or 2)
1424 forAll (choose (1, 2)) $ \rqn ->
1425 forAll (choose (5, 20)) $ \count ->
1426 forAll (arbitrary `suchThat` (canAllocOn (makeSmallCluster node count) rqn))
1428 forAll (arbitrary `suchThat` (isFailure .
1429 Instance.instMatchesPolicy inst)) $ \ipol ->
1430 let node' = Node.setPolicy ipol node
1431 nl = makeSmallCluster node' count
1432 in not $ canAllocOn nl rqn inst
1437 , 'prop_ClusterAlloc_sane
1438 , 'prop_ClusterCanTieredAlloc
1439 , 'prop_ClusterAllocRelocate
1440 , 'prop_ClusterAllocEvacuate
1441 , 'prop_ClusterAllocChangeGroup
1442 , 'prop_ClusterAllocBalance
1443 , 'prop_ClusterCheckConsistency
1444 , 'prop_ClusterSplitCluster
1445 , 'prop_ClusterAllocPolicy
1450 -- | Check that opcode serialization is idempotent.
1451 prop_OpCodes_serialization op =
1452 case J.readJSON (J.showJSON op) of
1453 J.Error e -> failTest $ "Cannot deserialise: " ++ e
1454 J.Ok op' -> op ==? op'
1455 where _types = op::OpCodes.OpCode
1458 [ 'prop_OpCodes_serialization ]
1462 -- | Check that (queued) job\/opcode status serialization is idempotent.
1463 prop_OpStatus_serialization os =
1464 case J.readJSON (J.showJSON os) of
1465 J.Error e -> failTest $ "Cannot deserialise: " ++ e
1466 J.Ok os' -> os ==? os'
1467 where _types = os::Jobs.OpStatus
1469 prop_JobStatus_serialization js =
1470 case J.readJSON (J.showJSON js) of
1471 J.Error e -> failTest $ "Cannot deserialise: " ++ e
1472 J.Ok js' -> js ==? js'
1473 where _types = js::Jobs.JobStatus
1476 [ 'prop_OpStatus_serialization
1477 , 'prop_JobStatus_serialization
1482 prop_Loader_lookupNode ktn inst node =
1483 Loader.lookupNode nl inst node ==? Data.Map.lookup node nl
1484 where nl = Data.Map.fromList ktn
1486 prop_Loader_lookupInstance kti inst =
1487 Loader.lookupInstance il inst ==? Data.Map.lookup inst il
1488 where il = Data.Map.fromList kti
1490 prop_Loader_assignIndices =
1491 -- generate nodes with unique names
1492 forAll (arbitrary `suchThat`
1494 let names = map Node.name nodes
1495 in length names == length (nub names))) $ \nodes ->
1497 Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
1498 in Data.Map.size nassoc == length nodes &&
1499 Container.size kt == length nodes &&
1501 then maximum (IntMap.keys kt) == length nodes - 1
1504 -- | Checks that the number of primary instances recorded on the nodes
1506 prop_Loader_mergeData ns =
1507 let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
1508 in case Loader.mergeData [] [] [] []
1509 (Loader.emptyCluster {Loader.cdNodes = na}) of
1510 Types.Bad _ -> False
1511 Types.Ok (Loader.ClusterData _ nl il _ _) ->
1512 let nodes = Container.elems nl
1513 instances = Container.elems il
1514 in (sum . map (length . Node.pList)) nodes == 0 &&
1517 -- | Check that compareNameComponent on equal strings works.
1518 prop_Loader_compareNameComponent_equal :: String -> Bool
1519 prop_Loader_compareNameComponent_equal s =
1520 Loader.compareNameComponent s s ==
1521 Loader.LookupResult Loader.ExactMatch s
1523 -- | Check that compareNameComponent on prefix strings works.
1524 prop_Loader_compareNameComponent_prefix :: NonEmptyList Char -> String -> Bool
1525 prop_Loader_compareNameComponent_prefix (NonEmpty s1) s2 =
1526 Loader.compareNameComponent (s1 ++ "." ++ s2) s1 ==
1527 Loader.LookupResult Loader.PartialMatch s1
1530 [ 'prop_Loader_lookupNode
1531 , 'prop_Loader_lookupInstance
1532 , 'prop_Loader_assignIndices
1533 , 'prop_Loader_mergeData
1534 , 'prop_Loader_compareNameComponent_equal
1535 , 'prop_Loader_compareNameComponent_prefix
1540 prop_Types_AllocPolicy_serialisation apol =
1541 case J.readJSON (J.showJSON apol) of
1542 J.Ok p -> p ==? apol
1543 J.Error s -> failTest $ "Failed to deserialise: " ++ s
1544 where _types = apol::Types.AllocPolicy
1546 prop_Types_DiskTemplate_serialisation dt =
1547 case J.readJSON (J.showJSON dt) of
1549 J.Error s -> failTest $ "Failed to deserialise: " ++ s
1550 where _types = dt::Types.DiskTemplate
1552 prop_Types_ISpec_serialisation ispec =
1553 case J.readJSON (J.showJSON ispec) of
1554 J.Ok p -> p ==? ispec
1555 J.Error s -> failTest $ "Failed to deserialise: " ++ s
1556 where _types = ispec::Types.ISpec
1558 prop_Types_IPolicy_serialisation ipol =
1559 case J.readJSON (J.showJSON ipol) of
1560 J.Ok p -> p ==? ipol
1561 J.Error s -> failTest $ "Failed to deserialise: " ++ s
1562 where _types = ipol::Types.IPolicy
1564 prop_Types_EvacMode_serialisation em =
1565 case J.readJSON (J.showJSON em) of
1567 J.Error s -> failTest $ "Failed to deserialise: " ++ s
1568 where _types = em::Types.EvacMode
1570 prop_Types_opToResult op =
1572 Types.OpFail _ -> Types.isBad r
1573 Types.OpGood v -> case r of
1574 Types.Bad _ -> False
1575 Types.Ok v' -> v == v'
1576 where r = Types.opToResult op
1577 _types = op::Types.OpResult Int
1579 prop_Types_eitherToResult ei =
1581 Left _ -> Types.isBad r
1582 Right v -> case r of
1583 Types.Bad _ -> False
1584 Types.Ok v' -> v == v'
1585 where r = Types.eitherToResult ei
1586 _types = ei::Either String Int
1589 [ 'prop_Types_AllocPolicy_serialisation
1590 , 'prop_Types_DiskTemplate_serialisation
1591 , 'prop_Types_ISpec_serialisation
1592 , 'prop_Types_IPolicy_serialisation
1593 , 'prop_Types_EvacMode_serialisation
1594 , 'prop_Types_opToResult
1595 , 'prop_Types_eitherToResult
1600 -- | Test correct parsing.
1601 prop_CLI_parseISpec descr dsk mem cpu =
1602 let str = printf "%d,%d,%d" dsk mem cpu
1603 in CLI.parseISpecString descr str ==? Types.Ok (Types.RSpec cpu mem dsk)
1605 -- | Test parsing failure due to wrong section count.
1606 prop_CLI_parseISpecFail descr =
1607 forAll (choose (0,100) `suchThat` ((/=) 3)) $ \nelems ->
1608 forAll (replicateM nelems arbitrary) $ \values ->
1609 let str = intercalate "," $ map show (values::[Int])
1610 in case CLI.parseISpecString descr str of
1611 Types.Ok v -> failTest $ "Expected failure, got " ++ show v
1614 -- | Test parseYesNo.
1615 prop_CLI_parseYesNo def testval val =
1616 forAll (elements [val, "yes", "no"]) $ \actual_val ->
1618 then CLI.parseYesNo def Nothing ==? Types.Ok def
1619 else let result = CLI.parseYesNo def (Just actual_val)
1620 in if actual_val `elem` ["yes", "no"]
1621 then result ==? Types.Ok (actual_val == "yes")
1622 else property $ Types.isBad result
1624 -- | Helper to check for correct parsing of string arg.
1625 checkStringArg val (opt, fn) =
1626 let GetOpt.Option _ longs _ _ = opt
1628 [] -> failTest "no long options?"
1630 case CLI.parseOptsInner ["--" ++ cmdarg ++ "=" ++ val] "prog" [opt] of
1631 Left e -> failTest $ "Failed to parse option: " ++ show e
1632 Right (options, _) -> fn options ==? Just val
1634 -- | Test a few string arguments.
1635 prop_CLI_StringArg argument =
1636 let args = [ (CLI.oDataFile, CLI.optDataFile)
1637 , (CLI.oDynuFile, CLI.optDynuFile)
1638 , (CLI.oSaveCluster, CLI.optSaveCluster)
1639 , (CLI.oReplay, CLI.optReplay)
1640 , (CLI.oPrintCommands, CLI.optShowCmds)
1641 , (CLI.oLuxiSocket, CLI.optLuxi)
1643 in conjoin $ map (checkStringArg argument) args
1645 -- | Helper to test that a given option is accepted OK with quick exit.
1646 checkEarlyExit name options param =
1647 case CLI.parseOptsInner [param] name options of
1648 Left (code, _) -> if code == 0
1650 else failTest $ "Program " ++ name ++
1651 " returns invalid code " ++ show code ++
1652 " for option " ++ param
1653 _ -> failTest $ "Program " ++ name ++ " doesn't consider option " ++
1654 param ++ " as early exit one"
1656 -- | Test that all binaries support some common options. There is
1657 -- nothing actually random about this test...
1659 let params = ["-h", "--help", "-V", "--version"]
1660 opts = map (\(name, (_, o)) -> (name, o)) Program.personalities
1661 -- apply checkEarlyExit across the cartesian product of params and opts
1662 in conjoin [checkEarlyExit n o p | p <- params, (n, o) <- opts]
1665 [ 'prop_CLI_parseISpec
1666 , 'prop_CLI_parseISpecFail
1667 , 'prop_CLI_parseYesNo
1668 , 'prop_CLI_StringArg
1674 prop_JSON_toArray :: [Int] -> Property
1675 prop_JSON_toArray intarr =
1676 let arr = map J.showJSON intarr in
1677 case JSON.toArray (J.JSArray arr) of
1678 Types.Ok arr' -> arr ==? arr'
1679 Types.Bad err -> failTest $ "Failed to parse array: " ++ err
1681 prop_JSON_toArrayFail :: Int -> String -> Bool -> Property
1682 prop_JSON_toArrayFail i s b =
1683 -- poor man's instance Arbitrary JSValue
1684 forAll (elements [J.showJSON i, J.showJSON s, J.showJSON b]) $ \item ->
1685 case JSON.toArray item of
1686 Types.Bad _ -> property True
1687 Types.Ok result -> failTest $ "Unexpected parse, got " ++ show result
1690 [ 'prop_JSON_toArray
1691 , 'prop_JSON_toArrayFail
1696 instance Arbitrary Luxi.LuxiReq where
1697 arbitrary = elements [minBound..maxBound]
1699 instance Arbitrary Luxi.QrViaLuxi where
1700 arbitrary = elements [minBound..maxBound]
1702 instance Arbitrary Luxi.LuxiOp where
1706 Luxi.ReqQuery -> Luxi.Query <$> arbitrary <*> getFields <*> arbitrary
1707 Luxi.ReqQueryNodes -> Luxi.QueryNodes <$> (listOf getFQDN) <*>
1708 getFields <*> arbitrary
1709 Luxi.ReqQueryGroups -> Luxi.QueryGroups <$> arbitrary <*>
1710 arbitrary <*> arbitrary
1711 Luxi.ReqQueryInstances -> Luxi.QueryInstances <$> (listOf getFQDN) <*>
1712 getFields <*> arbitrary
1713 Luxi.ReqQueryJobs -> Luxi.QueryJobs <$> arbitrary <*> getFields
1714 Luxi.ReqQueryExports -> Luxi.QueryExports <$>
1715 (listOf getFQDN) <*> arbitrary
1716 Luxi.ReqQueryConfigValues -> Luxi.QueryConfigValues <$> getFields
1717 Luxi.ReqQueryClusterInfo -> pure Luxi.QueryClusterInfo
1718 Luxi.ReqQueryTags -> Luxi.QueryTags <$> getName <*> getFQDN
1719 Luxi.ReqSubmitJob -> Luxi.SubmitJob <$> (resize maxOpCodes arbitrary)
1720 Luxi.ReqSubmitManyJobs -> Luxi.SubmitManyJobs <$>
1721 (resize maxOpCodes arbitrary)
1722 Luxi.ReqWaitForJobChange -> Luxi.WaitForJobChange <$> arbitrary <*>
1723 getFields <*> pure J.JSNull <*>
1724 pure J.JSNull <*> arbitrary
1725 Luxi.ReqArchiveJob -> Luxi.ArchiveJob <$> arbitrary
1726 Luxi.ReqAutoArchiveJobs -> Luxi.AutoArchiveJobs <$> arbitrary <*>
1728 Luxi.ReqCancelJob -> Luxi.CancelJob <$> arbitrary
1729 Luxi.ReqSetDrainFlag -> Luxi.SetDrainFlag <$> arbitrary
1730 Luxi.ReqSetWatcherPause -> Luxi.SetWatcherPause <$> arbitrary
1732 -- | Simple check that encoding/decoding of LuxiOp works.
1733 prop_Luxi_CallEncoding :: Luxi.LuxiOp -> Property
1734 prop_Luxi_CallEncoding op =
1735 (Luxi.validateCall (Luxi.buildCall op) >>= Luxi.decodeCall) ==? Types.Ok op
1738 [ 'prop_Luxi_CallEncoding
1743 instance Arbitrary Ssconf.SSKey where
1744 arbitrary = elements [minBound..maxBound]
1746 prop_Ssconf_filename key =
1747 printTestCase "Key doesn't start with correct prefix" $
1748 Ssconf.sSFilePrefix `isPrefixOf` Ssconf.keyToFilename (Just "") key
1751 [ 'prop_Ssconf_filename