root / htools / Ganeti / HTools / QC.hs @ a7ea861a
History | View | Annotate | Download (55.6 kB)
1 |
{-# LANGUAGE TemplateHaskell #-} |
---|---|
2 |
|
3 |
{-| Unittests for ganeti-htools. |
4 |
|
5 |
-} |
6 |
|
7 |
{- |
8 |
|
9 |
Copyright (C) 2009, 2010, 2011, 2012 Google Inc. |
10 |
|
11 |
This program is free software; you can redistribute it and/or modify |
12 |
it under the terms of the GNU General Public License as published by |
13 |
the Free Software Foundation; either version 2 of the License, or |
14 |
(at your option) any later version. |
15 |
|
16 |
This program is distributed in the hope that it will be useful, but |
17 |
WITHOUT ANY WARRANTY; without even the implied warranty of |
18 |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
19 |
General Public License for more details. |
20 |
|
21 |
You should have received a copy of the GNU General Public License |
22 |
along with this program; if not, write to the Free Software |
23 |
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
24 |
02110-1301, USA. |
25 |
|
26 |
-} |
27 |
|
28 |
module Ganeti.HTools.QC |
29 |
( testUtils |
30 |
, testPeerMap |
31 |
, testContainer |
32 |
, testInstance |
33 |
, testNode |
34 |
, testText |
35 |
, testSimu |
36 |
, testOpCodes |
37 |
, testJobs |
38 |
, testCluster |
39 |
, testLoader |
40 |
, testTypes |
41 |
, testCLI |
42 |
) where |
43 |
|
44 |
import Test.QuickCheck |
45 |
import Text.Printf (printf) |
46 |
import Data.List (findIndex, intercalate, nub, isPrefixOf) |
47 |
import qualified Data.Set as Set |
48 |
import Data.Maybe |
49 |
import Control.Monad |
50 |
import qualified Text.JSON as J |
51 |
import qualified Data.Map |
52 |
import qualified Data.IntMap as IntMap |
53 |
import qualified Ganeti.OpCodes as OpCodes |
54 |
import qualified Ganeti.Jobs as Jobs |
55 |
import qualified Ganeti.Luxi |
56 |
import qualified Ganeti.HTools.CLI as CLI |
57 |
import qualified Ganeti.HTools.Cluster as Cluster |
58 |
import qualified Ganeti.HTools.Container as Container |
59 |
import qualified Ganeti.HTools.ExtLoader |
60 |
import qualified Ganeti.HTools.IAlloc as IAlloc |
61 |
import qualified Ganeti.HTools.Instance as Instance |
62 |
import qualified Ganeti.HTools.JSON as JSON |
63 |
import qualified Ganeti.HTools.Loader as Loader |
64 |
import qualified Ganeti.HTools.Luxi |
65 |
import qualified Ganeti.HTools.Node as Node |
66 |
import qualified Ganeti.HTools.Group as Group |
67 |
import qualified Ganeti.HTools.PeerMap as PeerMap |
68 |
import qualified Ganeti.HTools.Rapi |
69 |
import qualified Ganeti.HTools.Simu as Simu |
70 |
import qualified Ganeti.HTools.Text as Text |
71 |
import qualified Ganeti.HTools.Types as Types |
72 |
import qualified Ganeti.HTools.Utils as Utils |
73 |
import qualified Ganeti.HTools.Version |
74 |
import qualified Ganeti.Constants as C |
75 |
|
76 |
import qualified Ganeti.HTools.Program.Hail |
77 |
import qualified Ganeti.HTools.Program.Hbal |
78 |
import qualified Ganeti.HTools.Program.Hscan |
79 |
import qualified Ganeti.HTools.Program.Hspace |
80 |
|
81 |
import Ganeti.HTools.QCHelper (testSuite) |
82 |
|
83 |
-- * Constants |
84 |
|
85 |
-- | Maximum memory (1TiB, somewhat random value). |
86 |
maxMem :: Int |
87 |
maxMem = 1024 * 1024 |
88 |
|
89 |
-- | Maximum disk (8TiB, somewhat random value). |
90 |
maxDsk :: Int |
91 |
maxDsk = 1024 * 1024 * 8 |
92 |
|
93 |
-- | Max CPUs (1024, somewhat random value). |
94 |
maxCpu :: Int |
95 |
maxCpu = 1024 |
96 |
|
97 |
-- | All disk templates (used later) |
98 |
allDiskTemplates :: [Types.DiskTemplate] |
99 |
allDiskTemplates = [minBound..maxBound] |
100 |
|
101 |
-- | Null iPolicy, and by null we mean very liberal. |
102 |
nullIPolicy = Types.IPolicy |
103 |
{ Types.iPolicyMinSpec = Types.ISpec { Types.iSpecMemorySize = 0 |
104 |
, Types.iSpecCpuCount = 0 |
105 |
, Types.iSpecDiskSize = 0 |
106 |
, Types.iSpecDiskCount = 0 |
107 |
, Types.iSpecNicCount = 0 |
108 |
} |
109 |
, Types.iPolicyMaxSpec = Types.ISpec { Types.iSpecMemorySize = maxBound |
110 |
, Types.iSpecCpuCount = maxBound |
111 |
, Types.iSpecDiskSize = maxBound |
112 |
, Types.iSpecDiskCount = C.maxDisks |
113 |
, Types.iSpecNicCount = C.maxNics |
114 |
} |
115 |
, Types.iPolicyStdSpec = Types.ISpec { Types.iSpecMemorySize = Types.unitMem |
116 |
, Types.iSpecCpuCount = Types.unitCpu |
117 |
, Types.iSpecDiskSize = Types.unitDsk |
118 |
, Types.iSpecDiskCount = 1 |
119 |
, Types.iSpecNicCount = 1 |
120 |
} |
121 |
, Types.iPolicyDiskTemplates = [Types.DTDrbd8, Types.DTPlain] |
122 |
} |
123 |
|
124 |
|
125 |
defGroup :: Group.Group |
126 |
defGroup = flip Group.setIdx 0 $ |
127 |
Group.create "default" Types.defaultGroupID Types.AllocPreferred |
128 |
nullIPolicy |
129 |
|
130 |
defGroupList :: Group.List |
131 |
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)] |
132 |
|
133 |
defGroupAssoc :: Data.Map.Map String Types.Gdx |
134 |
defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup) |
135 |
|
136 |
-- * Helper functions |
137 |
|
138 |
-- | Simple checker for whether OpResult is fail or pass. |
139 |
isFailure :: Types.OpResult a -> Bool |
140 |
isFailure (Types.OpFail _) = True |
141 |
isFailure _ = False |
142 |
|
143 |
-- | Checks for equality with proper annotation. |
144 |
(==?) :: (Show a, Eq a) => a -> a -> Property |
145 |
(==?) x y = printTestCase |
146 |
("Expected equality, but '" ++ |
147 |
show x ++ "' /= '" ++ show y ++ "'") (x == y) |
148 |
infix 3 ==? |
149 |
|
150 |
-- | Show a message and fail the test. |
151 |
failTest :: String -> Property |
152 |
failTest msg = printTestCase msg False |
153 |
|
154 |
-- | Update an instance to be smaller than a node. |
155 |
setInstanceSmallerThanNode node inst = |
156 |
inst { Instance.mem = Node.availMem node `div` 2 |
157 |
, Instance.dsk = Node.availDisk node `div` 2 |
158 |
, Instance.vcpus = Node.availCpu node `div` 2 |
159 |
} |
160 |
|
161 |
-- | Create an instance given its spec. |
162 |
createInstance mem dsk vcpus = |
163 |
Instance.create "inst-unnamed" mem dsk vcpus Types.Running [] True (-1) (-1) |
164 |
Types.DTDrbd8 |
165 |
|
166 |
-- | Create a small cluster by repeating a node spec. |
167 |
makeSmallCluster :: Node.Node -> Int -> Node.List |
168 |
makeSmallCluster node count = |
169 |
let origname = Node.name node |
170 |
origalias = Node.alias node |
171 |
nodes = map (\idx -> node { Node.name = origname ++ "-" ++ show idx |
172 |
, Node.alias = origalias ++ "-" ++ show idx }) |
173 |
[1..count] |
174 |
fn = flip Node.buildPeers Container.empty |
175 |
namelst = map (\n -> (Node.name n, fn n)) nodes |
176 |
(_, nlst) = Loader.assignIndices namelst |
177 |
in nlst |
178 |
|
179 |
-- | Make a small cluster, both nodes and instances. |
180 |
makeSmallEmptyCluster :: Node.Node -> Int -> Instance.Instance |
181 |
-> (Node.List, Instance.List, Instance.Instance) |
182 |
makeSmallEmptyCluster node count inst = |
183 |
(makeSmallCluster node count, Container.empty, |
184 |
setInstanceSmallerThanNode node inst) |
185 |
|
186 |
-- | Checks if a node is "big" enough. |
187 |
isNodeBig :: Int -> Node.Node -> Bool |
188 |
isNodeBig size node = Node.availDisk node > size * Types.unitDsk |
189 |
&& Node.availMem node > size * Types.unitMem |
190 |
&& Node.availCpu node > size * Types.unitCpu |
191 |
|
192 |
canBalance :: Cluster.Table -> Bool -> Bool -> Bool -> Bool |
193 |
canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac 0 0 |
194 |
|
195 |
-- | Assigns a new fresh instance to a cluster; this is not |
196 |
-- allocation, so no resource checks are done. |
197 |
assignInstance :: Node.List -> Instance.List -> Instance.Instance -> |
198 |
Types.Idx -> Types.Idx -> |
199 |
(Node.List, Instance.List) |
200 |
assignInstance nl il inst pdx sdx = |
201 |
let pnode = Container.find pdx nl |
202 |
snode = Container.find sdx nl |
203 |
maxiidx = if Container.null il |
204 |
then 0 |
205 |
else fst (Container.findMax il) + 1 |
206 |
inst' = inst { Instance.idx = maxiidx, |
207 |
Instance.pNode = pdx, Instance.sNode = sdx } |
208 |
pnode' = Node.setPri pnode inst' |
209 |
snode' = Node.setSec snode inst' |
210 |
nl' = Container.addTwo pdx pnode' sdx snode' nl |
211 |
il' = Container.add maxiidx inst' il |
212 |
in (nl', il') |
213 |
|
214 |
-- | Generates a list of a given size with non-duplicate elements. |
215 |
genUniquesList :: (Eq a, Arbitrary a) => Int -> Gen [a] |
216 |
genUniquesList cnt = |
217 |
foldM (\lst _ -> do |
218 |
newelem <- arbitrary `suchThat` (`notElem` lst) |
219 |
return (newelem:lst)) [] [1..cnt] |
220 |
|
221 |
-- * Arbitrary instances |
222 |
|
223 |
-- | Defines a DNS name. |
224 |
newtype DNSChar = DNSChar { dnsGetChar::Char } |
225 |
|
226 |
instance Arbitrary DNSChar where |
227 |
arbitrary = do |
228 |
x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-") |
229 |
return (DNSChar x) |
230 |
|
231 |
-- | Generates a single name component. |
232 |
getName :: Gen String |
233 |
getName = do |
234 |
n <- choose (1, 64) |
235 |
dn <- vector n::Gen [DNSChar] |
236 |
return (map dnsGetChar dn) |
237 |
|
238 |
-- | Generates an entire FQDN. |
239 |
getFQDN :: Gen String |
240 |
getFQDN = do |
241 |
ncomps <- choose (1, 4) |
242 |
names <- mapM (const getName) [1..ncomps::Int] |
243 |
return $ intercalate "." names |
244 |
|
245 |
-- | Defines a tag type. |
246 |
newtype TagChar = TagChar { tagGetChar :: Char } |
247 |
|
248 |
-- | All valid tag chars. This doesn't need to match _exactly_ |
249 |
-- Ganeti's own tag regex, just enough for it to be close. |
250 |
tagChar :: [Char] |
251 |
tagChar = ['a'..'z'] ++ ['A'..'Z'] ++ ['0'..'9'] ++ ".+*/:@-" |
252 |
|
253 |
instance Arbitrary TagChar where |
254 |
arbitrary = do |
255 |
c <- elements tagChar |
256 |
return (TagChar c) |
257 |
|
258 |
-- | Generates a tag |
259 |
genTag :: Gen [TagChar] |
260 |
genTag = do |
261 |
-- the correct value would be C.maxTagLen, but that's way too |
262 |
-- verbose in unittests, and at the moment I don't see any possible |
263 |
-- bugs with longer tags and the way we use tags in htools |
264 |
n <- choose (1, 10) |
265 |
vector n |
266 |
|
267 |
-- | Generates a list of tags (correctly upper bounded). |
268 |
genTags :: Gen [String] |
269 |
genTags = do |
270 |
-- the correct value would be C.maxTagsPerObj, but per the comment |
271 |
-- in genTag, we don't use tags enough in htools to warrant testing |
272 |
-- such big values |
273 |
n <- choose (0, 10::Int) |
274 |
tags <- mapM (const genTag) [1..n] |
275 |
return $ map (map tagGetChar) tags |
276 |
|
277 |
instance Arbitrary Types.InstanceStatus where |
278 |
arbitrary = elements [minBound..maxBound] |
279 |
|
280 |
-- | Generates a random instance with maximum disk/mem/cpu values. |
281 |
genInstanceSmallerThan :: Int -> Int -> Int -> Gen Instance.Instance |
282 |
genInstanceSmallerThan lim_mem lim_dsk lim_cpu = do |
283 |
name <- getFQDN |
284 |
mem <- choose (0, lim_mem) |
285 |
dsk <- choose (0, lim_dsk) |
286 |
run_st <- arbitrary |
287 |
pn <- arbitrary |
288 |
sn <- arbitrary |
289 |
vcpus <- choose (0, lim_cpu) |
290 |
return $ Instance.create name mem dsk vcpus run_st [] True pn sn |
291 |
Types.DTDrbd8 |
292 |
|
293 |
-- | Generates an instance smaller than a node. |
294 |
genInstanceSmallerThanNode :: Node.Node -> Gen Instance.Instance |
295 |
genInstanceSmallerThanNode node = |
296 |
genInstanceSmallerThan (Node.availMem node `div` 2) |
297 |
(Node.availDisk node `div` 2) |
298 |
(Node.availCpu node `div` 2) |
299 |
|
300 |
-- let's generate a random instance |
301 |
instance Arbitrary Instance.Instance where |
302 |
arbitrary = genInstanceSmallerThan maxMem maxDsk maxCpu |
303 |
|
304 |
-- | Generas an arbitrary node based on sizing information. |
305 |
genNode :: Maybe Int -- ^ Minimum node size in terms of units |
306 |
-> Maybe Int -- ^ Maximum node size (when Nothing, bounded |
307 |
-- just by the max... constants) |
308 |
-> Gen Node.Node |
309 |
genNode min_multiplier max_multiplier = do |
310 |
let (base_mem, base_dsk, base_cpu) = |
311 |
case min_multiplier of |
312 |
Just mm -> (mm * Types.unitMem, |
313 |
mm * Types.unitDsk, |
314 |
mm * Types.unitCpu) |
315 |
Nothing -> (0, 0, 0) |
316 |
(top_mem, top_dsk, top_cpu) = |
317 |
case max_multiplier of |
318 |
Just mm -> (mm * Types.unitMem, |
319 |
mm * Types.unitDsk, |
320 |
mm * Types.unitCpu) |
321 |
Nothing -> (maxMem, maxDsk, maxCpu) |
322 |
name <- getFQDN |
323 |
mem_t <- choose (base_mem, top_mem) |
324 |
mem_f <- choose (base_mem, mem_t) |
325 |
mem_n <- choose (0, mem_t - mem_f) |
326 |
dsk_t <- choose (base_dsk, top_dsk) |
327 |
dsk_f <- choose (base_dsk, dsk_t) |
328 |
cpu_t <- choose (base_cpu, top_cpu) |
329 |
offl <- arbitrary |
330 |
let n = Node.create name (fromIntegral mem_t) mem_n mem_f |
331 |
(fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl 0 |
332 |
n' = Node.setPolicy nullIPolicy n |
333 |
return $ Node.buildPeers n' Container.empty |
334 |
|
335 |
-- | Helper function to generate a sane node. |
336 |
genOnlineNode :: Gen Node.Node |
337 |
genOnlineNode = do |
338 |
arbitrary `suchThat` (\n -> not (Node.offline n) && |
339 |
not (Node.failN1 n) && |
340 |
Node.availDisk n > 0 && |
341 |
Node.availMem n > 0 && |
342 |
Node.availCpu n > 0) |
343 |
|
344 |
-- and a random node |
345 |
instance Arbitrary Node.Node where |
346 |
arbitrary = genNode Nothing Nothing |
347 |
|
348 |
-- replace disks |
349 |
instance Arbitrary OpCodes.ReplaceDisksMode where |
350 |
arbitrary = elements [minBound..maxBound] |
351 |
|
352 |
instance Arbitrary OpCodes.OpCode where |
353 |
arbitrary = do |
354 |
op_id <- elements [ "OP_TEST_DELAY" |
355 |
, "OP_INSTANCE_REPLACE_DISKS" |
356 |
, "OP_INSTANCE_FAILOVER" |
357 |
, "OP_INSTANCE_MIGRATE" |
358 |
] |
359 |
case op_id of |
360 |
"OP_TEST_DELAY" -> |
361 |
liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary |
362 |
"OP_INSTANCE_REPLACE_DISKS" -> |
363 |
liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary |
364 |
arbitrary arbitrary arbitrary |
365 |
"OP_INSTANCE_FAILOVER" -> |
366 |
liftM3 OpCodes.OpInstanceFailover arbitrary arbitrary |
367 |
arbitrary |
368 |
"OP_INSTANCE_MIGRATE" -> |
369 |
liftM5 OpCodes.OpInstanceMigrate arbitrary arbitrary |
370 |
arbitrary arbitrary arbitrary |
371 |
_ -> fail "Wrong opcode" |
372 |
|
373 |
instance Arbitrary Jobs.OpStatus where |
374 |
arbitrary = elements [minBound..maxBound] |
375 |
|
376 |
instance Arbitrary Jobs.JobStatus where |
377 |
arbitrary = elements [minBound..maxBound] |
378 |
|
379 |
newtype SmallRatio = SmallRatio Double deriving Show |
380 |
instance Arbitrary SmallRatio where |
381 |
arbitrary = do |
382 |
v <- choose (0, 1) |
383 |
return $ SmallRatio v |
384 |
|
385 |
instance Arbitrary Types.AllocPolicy where |
386 |
arbitrary = elements [minBound..maxBound] |
387 |
|
388 |
instance Arbitrary Types.DiskTemplate where |
389 |
arbitrary = elements [minBound..maxBound] |
390 |
|
391 |
instance Arbitrary Types.FailMode where |
392 |
arbitrary = elements [minBound..maxBound] |
393 |
|
394 |
instance Arbitrary Types.EvacMode where |
395 |
arbitrary = elements [minBound..maxBound] |
396 |
|
397 |
instance Arbitrary a => Arbitrary (Types.OpResult a) where |
398 |
arbitrary = arbitrary >>= \c -> |
399 |
if c |
400 |
then liftM Types.OpGood arbitrary |
401 |
else liftM Types.OpFail arbitrary |
402 |
|
403 |
instance Arbitrary Types.ISpec where |
404 |
arbitrary = do |
405 |
mem_s <- arbitrary::Gen (NonNegative Int) |
406 |
dsk_c <- arbitrary::Gen (NonNegative Int) |
407 |
dsk_s <- arbitrary::Gen (NonNegative Int) |
408 |
cpu_c <- arbitrary::Gen (NonNegative Int) |
409 |
nic_c <- arbitrary::Gen (NonNegative Int) |
410 |
return Types.ISpec { Types.iSpecMemorySize = fromIntegral mem_s |
411 |
, Types.iSpecCpuCount = fromIntegral cpu_c |
412 |
, Types.iSpecDiskSize = fromIntegral dsk_s |
413 |
, Types.iSpecDiskCount = fromIntegral dsk_c |
414 |
, Types.iSpecNicCount = fromIntegral nic_c |
415 |
} |
416 |
|
417 |
-- | Generates an ispec bigger than the given one. |
418 |
genBiggerISpec :: Types.ISpec -> Gen Types.ISpec |
419 |
genBiggerISpec imin = do |
420 |
mem_s <- choose (Types.iSpecMemorySize imin, maxBound) |
421 |
dsk_c <- choose (Types.iSpecDiskCount imin, maxBound) |
422 |
dsk_s <- choose (Types.iSpecDiskSize imin, maxBound) |
423 |
cpu_c <- choose (Types.iSpecCpuCount imin, maxBound) |
424 |
nic_c <- choose (Types.iSpecNicCount imin, maxBound) |
425 |
return Types.ISpec { Types.iSpecMemorySize = fromIntegral mem_s |
426 |
, Types.iSpecCpuCount = fromIntegral cpu_c |
427 |
, Types.iSpecDiskSize = fromIntegral dsk_s |
428 |
, Types.iSpecDiskCount = fromIntegral dsk_c |
429 |
, Types.iSpecNicCount = fromIntegral nic_c |
430 |
} |
431 |
|
432 |
instance Arbitrary Types.IPolicy where |
433 |
arbitrary = do |
434 |
imin <- arbitrary |
435 |
istd <- genBiggerISpec imin |
436 |
imax <- genBiggerISpec istd |
437 |
num_tmpl <- choose (0, length allDiskTemplates) |
438 |
dts <- genUniquesList num_tmpl |
439 |
return Types.IPolicy { Types.iPolicyMinSpec = imin |
440 |
, Types.iPolicyStdSpec = istd |
441 |
, Types.iPolicyMaxSpec = imax |
442 |
, Types.iPolicyDiskTemplates = dts |
443 |
} |
444 |
|
445 |
-- * Actual tests |
446 |
|
447 |
-- ** Utils tests |
448 |
|
449 |
-- | If the list is not just an empty element, and if the elements do |
450 |
-- not contain commas, then join+split should be idempotent. |
451 |
prop_Utils_commaJoinSplit = |
452 |
forAll (arbitrary `suchThat` |
453 |
(\l -> l /= [""] && all (notElem ',') l )) $ \lst -> |
454 |
Utils.sepSplit ',' (Utils.commaJoin lst) ==? lst |
455 |
|
456 |
-- | Split and join should always be idempotent. |
457 |
prop_Utils_commaSplitJoin s = |
458 |
Utils.commaJoin (Utils.sepSplit ',' s) ==? s |
459 |
|
460 |
-- | fromObjWithDefault, we test using the Maybe monad and an integer |
461 |
-- value. |
462 |
prop_Utils_fromObjWithDefault def_value random_key = |
463 |
-- a missing key will be returned with the default |
464 |
JSON.fromObjWithDefault [] random_key def_value == Just def_value && |
465 |
-- a found key will be returned as is, not with default |
466 |
JSON.fromObjWithDefault [(random_key, J.showJSON def_value)] |
467 |
random_key (def_value+1) == Just def_value |
468 |
where _types = def_value :: Integer |
469 |
|
470 |
-- | Test that functional if' behaves like the syntactic sugar if. |
471 |
prop_Utils_if'if :: Bool -> Int -> Int -> Gen Prop |
472 |
prop_Utils_if'if cnd a b = |
473 |
Utils.if' cnd a b ==? if cnd then a else b |
474 |
|
475 |
-- | Test basic select functionality |
476 |
prop_Utils_select :: Int -- ^ Default result |
477 |
-> [Int] -- ^ List of False values |
478 |
-> [Int] -- ^ List of True values |
479 |
-> Gen Prop -- ^ Test result |
480 |
prop_Utils_select def lst1 lst2 = |
481 |
Utils.select def (flist ++ tlist) ==? expectedresult |
482 |
where expectedresult = Utils.if' (null lst2) def (head lst2) |
483 |
flist = zip (repeat False) lst1 |
484 |
tlist = zip (repeat True) lst2 |
485 |
|
486 |
-- | Test basic select functionality with undefined default |
487 |
prop_Utils_select_undefd :: [Int] -- ^ List of False values |
488 |
-> NonEmptyList Int -- ^ List of True values |
489 |
-> Gen Prop -- ^ Test result |
490 |
prop_Utils_select_undefd lst1 (NonEmpty lst2) = |
491 |
Utils.select undefined (flist ++ tlist) ==? head lst2 |
492 |
where flist = zip (repeat False) lst1 |
493 |
tlist = zip (repeat True) lst2 |
494 |
|
495 |
-- | Test basic select functionality with undefined list values |
496 |
prop_Utils_select_undefv :: [Int] -- ^ List of False values |
497 |
-> NonEmptyList Int -- ^ List of True values |
498 |
-> Gen Prop -- ^ Test result |
499 |
prop_Utils_select_undefv lst1 (NonEmpty lst2) = |
500 |
Utils.select undefined cndlist ==? head lst2 |
501 |
where flist = zip (repeat False) lst1 |
502 |
tlist = zip (repeat True) lst2 |
503 |
cndlist = flist ++ tlist ++ [undefined] |
504 |
|
505 |
prop_Utils_parseUnit (NonNegative n) = |
506 |
Utils.parseUnit (show n) == Types.Ok n && |
507 |
Utils.parseUnit (show n ++ "m") == Types.Ok n && |
508 |
(case Utils.parseUnit (show n ++ "M") of |
509 |
Types.Ok m -> if n > 0 |
510 |
then m < n -- for positive values, X MB is < than X MiB |
511 |
else m == 0 -- but for 0, 0 MB == 0 MiB |
512 |
Types.Bad _ -> False) && |
513 |
Utils.parseUnit (show n ++ "g") == Types.Ok (n*1024) && |
514 |
Utils.parseUnit (show n ++ "t") == Types.Ok (n*1048576) && |
515 |
Types.isBad (Utils.parseUnit (show n ++ "x")::Types.Result Int) |
516 |
where _types = n::Int |
517 |
|
518 |
-- | Test list for the Utils module. |
519 |
testSuite "Utils" |
520 |
[ 'prop_Utils_commaJoinSplit |
521 |
, 'prop_Utils_commaSplitJoin |
522 |
, 'prop_Utils_fromObjWithDefault |
523 |
, 'prop_Utils_if'if |
524 |
, 'prop_Utils_select |
525 |
, 'prop_Utils_select_undefd |
526 |
, 'prop_Utils_select_undefv |
527 |
, 'prop_Utils_parseUnit |
528 |
] |
529 |
|
530 |
-- ** PeerMap tests |
531 |
|
532 |
-- | Make sure add is idempotent. |
533 |
prop_PeerMap_addIdempotent pmap key em = |
534 |
fn puniq ==? fn (fn puniq) |
535 |
where _types = (pmap::PeerMap.PeerMap, |
536 |
key::PeerMap.Key, em::PeerMap.Elem) |
537 |
fn = PeerMap.add key em |
538 |
puniq = PeerMap.accumArray const pmap |
539 |
|
540 |
-- | Make sure remove is idempotent. |
541 |
prop_PeerMap_removeIdempotent pmap key = |
542 |
fn puniq ==? fn (fn puniq) |
543 |
where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key) |
544 |
fn = PeerMap.remove key |
545 |
puniq = PeerMap.accumArray const pmap |
546 |
|
547 |
-- | Make sure a missing item returns 0. |
548 |
prop_PeerMap_findMissing pmap key = |
549 |
PeerMap.find key (PeerMap.remove key puniq) ==? 0 |
550 |
where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key) |
551 |
puniq = PeerMap.accumArray const pmap |
552 |
|
553 |
-- | Make sure an added item is found. |
554 |
prop_PeerMap_addFind pmap key em = |
555 |
PeerMap.find key (PeerMap.add key em puniq) ==? em |
556 |
where _types = (pmap::PeerMap.PeerMap, |
557 |
key::PeerMap.Key, em::PeerMap.Elem) |
558 |
puniq = PeerMap.accumArray const pmap |
559 |
|
560 |
-- | Manual check that maxElem returns the maximum indeed, or 0 for null. |
561 |
prop_PeerMap_maxElem pmap = |
562 |
PeerMap.maxElem puniq ==? if null puniq then 0 |
563 |
else (maximum . snd . unzip) puniq |
564 |
where _types = pmap::PeerMap.PeerMap |
565 |
puniq = PeerMap.accumArray const pmap |
566 |
|
567 |
-- | List of tests for the PeerMap module. |
568 |
testSuite "PeerMap" |
569 |
[ 'prop_PeerMap_addIdempotent |
570 |
, 'prop_PeerMap_removeIdempotent |
571 |
, 'prop_PeerMap_maxElem |
572 |
, 'prop_PeerMap_addFind |
573 |
, 'prop_PeerMap_findMissing |
574 |
] |
575 |
|
576 |
-- ** Container tests |
577 |
|
578 |
-- we silence the following due to hlint bug fixed in later versions |
579 |
{-# ANN prop_Container_addTwo "HLint: ignore Avoid lambda" #-} |
580 |
prop_Container_addTwo cdata i1 i2 = |
581 |
fn i1 i2 cont == fn i2 i1 cont && |
582 |
fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont) |
583 |
where _types = (cdata::[Int], |
584 |
i1::Int, i2::Int) |
585 |
cont = foldl (\c x -> Container.add x x c) Container.empty cdata |
586 |
fn x1 x2 = Container.addTwo x1 x1 x2 x2 |
587 |
|
588 |
prop_Container_nameOf node = |
589 |
let nl = makeSmallCluster node 1 |
590 |
fnode = head (Container.elems nl) |
591 |
in Container.nameOf nl (Node.idx fnode) ==? Node.name fnode |
592 |
|
593 |
-- | We test that in a cluster, given a random node, we can find it by |
594 |
-- its name and alias, as long as all names and aliases are unique, |
595 |
-- and that we fail to find a non-existing name. |
596 |
prop_Container_findByName node = |
597 |
forAll (choose (1, 20)) $ \ cnt -> |
598 |
forAll (choose (0, cnt - 1)) $ \ fidx -> |
599 |
forAll (genUniquesList (cnt * 2)) $ \ allnames -> |
600 |
forAll (arbitrary `suchThat` (`notElem` allnames)) $ \ othername -> |
601 |
let names = zip (take cnt allnames) (drop cnt allnames) |
602 |
nl = makeSmallCluster node cnt |
603 |
nodes = Container.elems nl |
604 |
nodes' = map (\((name, alias), nn) -> (Node.idx nn, |
605 |
nn { Node.name = name, |
606 |
Node.alias = alias })) |
607 |
$ zip names nodes |
608 |
nl' = Container.fromList nodes' |
609 |
target = snd (nodes' !! fidx) |
610 |
in Container.findByName nl' (Node.name target) == Just target && |
611 |
Container.findByName nl' (Node.alias target) == Just target && |
612 |
isNothing (Container.findByName nl' othername) |
613 |
|
614 |
testSuite "Container" |
615 |
[ 'prop_Container_addTwo |
616 |
, 'prop_Container_nameOf |
617 |
, 'prop_Container_findByName |
618 |
] |
619 |
|
620 |
-- ** Instance tests |
621 |
|
622 |
-- Simple instance tests, we only have setter/getters |
623 |
|
624 |
prop_Instance_creat inst = |
625 |
Instance.name inst ==? Instance.alias inst |
626 |
|
627 |
prop_Instance_setIdx inst idx = |
628 |
Instance.idx (Instance.setIdx inst idx) ==? idx |
629 |
where _types = (inst::Instance.Instance, idx::Types.Idx) |
630 |
|
631 |
prop_Instance_setName inst name = |
632 |
Instance.name newinst == name && |
633 |
Instance.alias newinst == name |
634 |
where _types = (inst::Instance.Instance, name::String) |
635 |
newinst = Instance.setName inst name |
636 |
|
637 |
prop_Instance_setAlias inst name = |
638 |
Instance.name newinst == Instance.name inst && |
639 |
Instance.alias newinst == name |
640 |
where _types = (inst::Instance.Instance, name::String) |
641 |
newinst = Instance.setAlias inst name |
642 |
|
643 |
prop_Instance_setPri inst pdx = |
644 |
Instance.pNode (Instance.setPri inst pdx) ==? pdx |
645 |
where _types = (inst::Instance.Instance, pdx::Types.Ndx) |
646 |
|
647 |
prop_Instance_setSec inst sdx = |
648 |
Instance.sNode (Instance.setSec inst sdx) ==? sdx |
649 |
where _types = (inst::Instance.Instance, sdx::Types.Ndx) |
650 |
|
651 |
prop_Instance_setBoth inst pdx sdx = |
652 |
Instance.pNode si == pdx && Instance.sNode si == sdx |
653 |
where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx) |
654 |
si = Instance.setBoth inst pdx sdx |
655 |
|
656 |
prop_Instance_shrinkMG inst = |
657 |
Instance.mem inst >= 2 * Types.unitMem ==> |
658 |
case Instance.shrinkByType inst Types.FailMem of |
659 |
Types.Ok inst' -> Instance.mem inst' == Instance.mem inst - Types.unitMem |
660 |
_ -> False |
661 |
|
662 |
prop_Instance_shrinkMF inst = |
663 |
forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem -> |
664 |
let inst' = inst { Instance.mem = mem} |
665 |
in Types.isBad $ Instance.shrinkByType inst' Types.FailMem |
666 |
|
667 |
prop_Instance_shrinkCG inst = |
668 |
Instance.vcpus inst >= 2 * Types.unitCpu ==> |
669 |
case Instance.shrinkByType inst Types.FailCPU of |
670 |
Types.Ok inst' -> |
671 |
Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu |
672 |
_ -> False |
673 |
|
674 |
prop_Instance_shrinkCF inst = |
675 |
forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus -> |
676 |
let inst' = inst { Instance.vcpus = vcpus } |
677 |
in Types.isBad $ Instance.shrinkByType inst' Types.FailCPU |
678 |
|
679 |
prop_Instance_shrinkDG inst = |
680 |
Instance.dsk inst >= 2 * Types.unitDsk ==> |
681 |
case Instance.shrinkByType inst Types.FailDisk of |
682 |
Types.Ok inst' -> |
683 |
Instance.dsk inst' == Instance.dsk inst - Types.unitDsk |
684 |
_ -> False |
685 |
|
686 |
prop_Instance_shrinkDF inst = |
687 |
forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk -> |
688 |
let inst' = inst { Instance.dsk = dsk } |
689 |
in Types.isBad $ Instance.shrinkByType inst' Types.FailDisk |
690 |
|
691 |
prop_Instance_setMovable inst m = |
692 |
Instance.movable inst' ==? m |
693 |
where inst' = Instance.setMovable inst m |
694 |
|
695 |
testSuite "Instance" |
696 |
[ 'prop_Instance_creat |
697 |
, 'prop_Instance_setIdx |
698 |
, 'prop_Instance_setName |
699 |
, 'prop_Instance_setAlias |
700 |
, 'prop_Instance_setPri |
701 |
, 'prop_Instance_setSec |
702 |
, 'prop_Instance_setBoth |
703 |
, 'prop_Instance_shrinkMG |
704 |
, 'prop_Instance_shrinkMF |
705 |
, 'prop_Instance_shrinkCG |
706 |
, 'prop_Instance_shrinkCF |
707 |
, 'prop_Instance_shrinkDG |
708 |
, 'prop_Instance_shrinkDF |
709 |
, 'prop_Instance_setMovable |
710 |
] |
711 |
|
712 |
-- ** Backends |
713 |
|
714 |
-- *** Text backend tests |
715 |
|
716 |
-- Instance text loader tests |
717 |
|
718 |
prop_Text_Load_Instance name mem dsk vcpus status |
719 |
(NonEmpty pnode) snode |
720 |
(NonNegative pdx) (NonNegative sdx) autobal dt = |
721 |
pnode /= snode && pdx /= sdx ==> |
722 |
let vcpus_s = show vcpus |
723 |
dsk_s = show dsk |
724 |
mem_s = show mem |
725 |
status_s = Types.instanceStatusToRaw status |
726 |
ndx = if null snode |
727 |
then [(pnode, pdx)] |
728 |
else [(pnode, pdx), (snode, sdx)] |
729 |
nl = Data.Map.fromList ndx |
730 |
tags = "" |
731 |
sbal = if autobal then "Y" else "N" |
732 |
sdt = Types.diskTemplateToRaw dt |
733 |
inst = Text.loadInst nl |
734 |
[name, mem_s, dsk_s, vcpus_s, status_s, |
735 |
sbal, pnode, snode, sdt, tags] |
736 |
fail1 = Text.loadInst nl |
737 |
[name, mem_s, dsk_s, vcpus_s, status_s, |
738 |
sbal, pnode, pnode, tags] |
739 |
_types = ( name::String, mem::Int, dsk::Int |
740 |
, vcpus::Int, status::Types.InstanceStatus |
741 |
, snode::String |
742 |
, autobal::Bool) |
743 |
in case inst of |
744 |
Types.Bad msg -> failTest $ "Failed to load instance: " ++ msg |
745 |
Types.Ok (_, i) -> printTestCase "Mismatch in some field while\ |
746 |
\ loading the instance" $ |
747 |
Instance.name i == name && |
748 |
Instance.vcpus i == vcpus && |
749 |
Instance.mem i == mem && |
750 |
Instance.pNode i == pdx && |
751 |
Instance.sNode i == (if null snode |
752 |
then Node.noSecondary |
753 |
else sdx) && |
754 |
Instance.autoBalance i == autobal && |
755 |
Types.isBad fail1 |
756 |
|
757 |
prop_Text_Load_InstanceFail ktn fields = |
758 |
length fields /= 10 ==> |
759 |
case Text.loadInst nl fields of |
760 |
Types.Ok _ -> failTest "Managed to load instance from invalid data" |
761 |
Types.Bad msg -> printTestCase ("Unrecognised error message: " ++ msg) $ |
762 |
"Invalid/incomplete instance data: '" `isPrefixOf` msg |
763 |
where nl = Data.Map.fromList ktn |
764 |
|
765 |
prop_Text_Load_Node name tm nm fm td fd tc fo = |
766 |
let conv v = if v < 0 |
767 |
then "?" |
768 |
else show v |
769 |
tm_s = conv tm |
770 |
nm_s = conv nm |
771 |
fm_s = conv fm |
772 |
td_s = conv td |
773 |
fd_s = conv fd |
774 |
tc_s = conv tc |
775 |
fo_s = if fo |
776 |
then "Y" |
777 |
else "N" |
778 |
any_broken = any (< 0) [tm, nm, fm, td, fd, tc] |
779 |
gid = Group.uuid defGroup |
780 |
in case Text.loadNode defGroupAssoc |
781 |
[name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of |
782 |
Nothing -> False |
783 |
Just (name', node) -> |
784 |
if fo || any_broken |
785 |
then Node.offline node |
786 |
else Node.name node == name' && name' == name && |
787 |
Node.alias node == name && |
788 |
Node.tMem node == fromIntegral tm && |
789 |
Node.nMem node == nm && |
790 |
Node.fMem node == fm && |
791 |
Node.tDsk node == fromIntegral td && |
792 |
Node.fDsk node == fd && |
793 |
Node.tCpu node == fromIntegral tc |
794 |
|
795 |
prop_Text_Load_NodeFail fields = |
796 |
length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields |
797 |
|
798 |
prop_Text_NodeLSIdempotent node = |
799 |
(Text.loadNode defGroupAssoc. |
800 |
Utils.sepSplit '|' . Text.serializeNode defGroupList) n == |
801 |
Just (Node.name n, n) |
802 |
-- override failN1 to what loadNode returns by default |
803 |
where n = node { Node.failN1 = True, Node.offline = False |
804 |
, Node.iPolicy = Types.defIPolicy } |
805 |
|
806 |
prop_Text_ISpecIdempotent ispec = |
807 |
case Text.loadISpec "dummy" . Utils.sepSplit ',' . |
808 |
Text.serializeISpec $ ispec of |
809 |
Types.Bad msg -> failTest $ "Failed to load ispec: " ++ msg |
810 |
Types.Ok ispec' -> ispec ==? ispec' |
811 |
|
812 |
prop_Text_IPolicyIdempotent ipol = |
813 |
case Text.loadIPolicy . Utils.sepSplit '|' $ |
814 |
Text.serializeIPolicy owner ipol of |
815 |
Types.Bad msg -> failTest $ "Failed to load ispec: " ++ msg |
816 |
Types.Ok res -> (owner, ipol) ==? res |
817 |
where owner = "dummy" |
818 |
|
819 |
-- | This property, while being in the text tests, does more than just |
820 |
-- test end-to-end the serialisation and loading back workflow; it |
821 |
-- also tests the Loader.mergeData and the actuall |
822 |
-- Cluster.iterateAlloc (for well-behaving w.r.t. instance |
823 |
-- allocations, not for the business logic). As such, it's a quite |
824 |
-- complex and slow test, and that's the reason we restrict it to |
825 |
-- small cluster sizes. |
826 |
prop_Text_CreateSerialise = |
827 |
forAll genTags $ \ctags -> |
828 |
forAll (choose (1, 2)) $ \reqnodes -> |
829 |
forAll (choose (1, 20)) $ \maxiter -> |
830 |
forAll (choose (2, 10)) $ \count -> |
831 |
forAll genOnlineNode $ \node -> |
832 |
forAll (genInstanceSmallerThanNode node) $ \inst -> |
833 |
let inst' = Instance.setMovable inst (reqnodes == 2) |
834 |
nl = makeSmallCluster node count |
835 |
in case Cluster.genAllocNodes defGroupList nl reqnodes True >>= \allocn -> |
836 |
Cluster.iterateAlloc nl Container.empty (Just maxiter) inst' allocn [] [] |
837 |
of |
838 |
Types.Bad msg -> failTest $ "Failed to allocate: " ++ msg |
839 |
Types.Ok (_, _, _, [], _) -> printTestCase |
840 |
"Failed to allocate: no allocations" False |
841 |
Types.Ok (_, nl', il', _, _) -> |
842 |
let cdata = Loader.ClusterData defGroupList nl' il' ctags |
843 |
Types.defIPolicy |
844 |
saved = Text.serializeCluster cdata |
845 |
in case Text.parseData saved >>= Loader.mergeData [] [] [] [] of |
846 |
Types.Bad msg -> failTest $ "Failed to load/merge: " ++ msg |
847 |
Types.Ok (Loader.ClusterData gl2 nl2 il2 ctags2 cpol2) -> |
848 |
ctags ==? ctags2 .&&. |
849 |
Types.defIPolicy ==? cpol2 .&&. |
850 |
il' ==? il2 .&&. |
851 |
defGroupList ==? gl2 .&&. |
852 |
nl' ==? nl2 |
853 |
|
854 |
testSuite "Text" |
855 |
[ 'prop_Text_Load_Instance |
856 |
, 'prop_Text_Load_InstanceFail |
857 |
, 'prop_Text_Load_Node |
858 |
, 'prop_Text_Load_NodeFail |
859 |
, 'prop_Text_NodeLSIdempotent |
860 |
, 'prop_Text_ISpecIdempotent |
861 |
, 'prop_Text_IPolicyIdempotent |
862 |
, 'prop_Text_CreateSerialise |
863 |
] |
864 |
|
865 |
-- *** Simu backend |
866 |
|
867 |
-- | Generates a tuple of specs for simulation. |
868 |
genSimuSpec :: Gen (String, Int, Int, Int, Int) |
869 |
genSimuSpec = do |
870 |
pol <- elements [C.allocPolicyPreferred, |
871 |
C.allocPolicyLastResort, C.allocPolicyUnallocable, |
872 |
"p", "a", "u"] |
873 |
-- should be reasonable (nodes/group), bigger values only complicate |
874 |
-- the display of failed tests, and we don't care (in this particular |
875 |
-- test) about big node groups |
876 |
nodes <- choose (0, 20) |
877 |
dsk <- choose (0, maxDsk) |
878 |
mem <- choose (0, maxMem) |
879 |
cpu <- choose (0, maxCpu) |
880 |
return (pol, nodes, dsk, mem, cpu) |
881 |
|
882 |
-- | Checks that given a set of corrects specs, we can load them |
883 |
-- successfully, and that at high-level the values look right. |
884 |
prop_SimuLoad = |
885 |
forAll (choose (0, 10)) $ \ngroups -> |
886 |
forAll (replicateM ngroups genSimuSpec) $ \specs -> |
887 |
let strspecs = map (\(p, n, d, m, c) -> printf "%s,%d,%d,%d,%d" |
888 |
p n d m c::String) specs |
889 |
totnodes = sum $ map (\(_, n, _, _, _) -> n) specs |
890 |
mdc_in = concatMap (\(_, n, d, m, c) -> |
891 |
replicate n (fromIntegral m, fromIntegral d, |
892 |
fromIntegral c, |
893 |
fromIntegral m, fromIntegral d)) specs |
894 |
in case Simu.parseData strspecs of |
895 |
Types.Bad msg -> failTest $ "Failed to load specs: " ++ msg |
896 |
Types.Ok (Loader.ClusterData gl nl il tags ipol) -> |
897 |
let nodes = map snd $ IntMap.toAscList nl |
898 |
nidx = map Node.idx nodes |
899 |
mdc_out = map (\n -> (Node.tMem n, Node.tDsk n, Node.tCpu n, |
900 |
Node.fMem n, Node.fDsk n)) nodes |
901 |
in |
902 |
Container.size gl ==? ngroups .&&. |
903 |
Container.size nl ==? totnodes .&&. |
904 |
Container.size il ==? 0 .&&. |
905 |
length tags ==? 0 .&&. |
906 |
ipol ==? Types.defIPolicy .&&. |
907 |
nidx ==? [1..totnodes] .&&. |
908 |
mdc_in ==? mdc_out .&&. |
909 |
map Group.iPolicy (Container.elems gl) ==? |
910 |
replicate ngroups Types.defIPolicy |
911 |
|
912 |
testSuite "Simu" |
913 |
[ 'prop_SimuLoad |
914 |
] |
915 |
|
916 |
-- ** Node tests |
917 |
|
918 |
prop_Node_setAlias node name = |
919 |
Node.name newnode == Node.name node && |
920 |
Node.alias newnode == name |
921 |
where _types = (node::Node.Node, name::String) |
922 |
newnode = Node.setAlias node name |
923 |
|
924 |
prop_Node_setOffline node status = |
925 |
Node.offline newnode ==? status |
926 |
where newnode = Node.setOffline node status |
927 |
|
928 |
prop_Node_setXmem node xm = |
929 |
Node.xMem newnode ==? xm |
930 |
where newnode = Node.setXmem node xm |
931 |
|
932 |
prop_Node_setMcpu node mc = |
933 |
Node.mCpu newnode ==? mc |
934 |
where newnode = Node.setMcpu node mc |
935 |
|
936 |
-- | Check that an instance add with too high memory or disk will be |
937 |
-- rejected. |
938 |
prop_Node_addPriFM node inst = |
939 |
Instance.mem inst >= Node.fMem node && not (Node.failN1 node) && |
940 |
not (Instance.instanceOffline inst) ==> |
941 |
case Node.addPri node inst'' of |
942 |
Types.OpFail Types.FailMem -> True |
943 |
_ -> False |
944 |
where _types = (node::Node.Node, inst::Instance.Instance) |
945 |
inst' = setInstanceSmallerThanNode node inst |
946 |
inst'' = inst' { Instance.mem = Instance.mem inst } |
947 |
|
948 |
prop_Node_addPriFD node inst = |
949 |
Instance.dsk inst >= Node.fDsk node && not (Node.failN1 node) ==> |
950 |
case Node.addPri node inst'' of |
951 |
Types.OpFail Types.FailDisk -> True |
952 |
_ -> False |
953 |
where _types = (node::Node.Node, inst::Instance.Instance) |
954 |
inst' = setInstanceSmallerThanNode node inst |
955 |
inst'' = inst' { Instance.dsk = Instance.dsk inst } |
956 |
|
957 |
prop_Node_addPriFC (Positive extra) = |
958 |
forAll genOnlineNode $ \node -> |
959 |
forAll (arbitrary `suchThat` Instance.instanceNotOffline) $ \inst -> |
960 |
let inst' = setInstanceSmallerThanNode node inst |
961 |
inst'' = inst' { Instance.vcpus = Node.availCpu node + extra } |
962 |
in case Node.addPri node inst'' of |
963 |
Types.OpFail Types.FailCPU -> property True |
964 |
v -> failTest $ "Expected OpFail FailCPU, but got " ++ show v |
965 |
|
966 |
-- | Check that an instance add with too high memory or disk will be |
967 |
-- rejected. |
968 |
prop_Node_addSec node inst pdx = |
969 |
((Instance.mem inst >= (Node.fMem node - Node.rMem node) && |
970 |
not (Instance.instanceOffline inst)) || |
971 |
Instance.dsk inst >= Node.fDsk node) && |
972 |
not (Node.failN1 node) ==> |
973 |
isFailure (Node.addSec node inst pdx) |
974 |
where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int) |
975 |
|
976 |
-- | Check that an offline instance with reasonable disk size can always |
977 |
-- be added. |
978 |
prop_Node_addPriOffline = |
979 |
forAll (arbitrary `suchThat` ((> 0) . Node.fMem)) $ \node -> |
980 |
forAll (arbitrary `suchThat` |
981 |
(\ x -> (Instance.dsk x < Node.fDsk node) && |
982 |
Instance.instanceOffline x)) $ \inst -> |
983 |
case Node.addPri node inst of |
984 |
Types.OpGood _ -> True |
985 |
_ -> False |
986 |
|
987 |
prop_Node_addSecOffline pdx = |
988 |
forAll genOnlineNode $ \node -> |
989 |
forAll (arbitrary `suchThat` |
990 |
(\ inst -> Instance.dsk inst < Node.availDisk node)) $ \inst -> |
991 |
case Node.addSec node (inst { Instance.runSt = Types.AdminOffline }) pdx of |
992 |
Types.OpGood _ -> True |
993 |
_ -> False |
994 |
|
995 |
-- | Checks for memory reservation changes. |
996 |
prop_Node_rMem inst = |
997 |
not (Instance.instanceOffline inst) ==> |
998 |
forAll (arbitrary `suchThat` ((> Types.unitMem) . Node.fMem)) $ \node -> |
999 |
-- ab = auto_balance, nb = non-auto_balance |
1000 |
-- we use -1 as the primary node of the instance |
1001 |
let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True } |
1002 |
inst_ab = setInstanceSmallerThanNode node inst' |
1003 |
inst_nb = inst_ab { Instance.autoBalance = False } |
1004 |
-- now we have the two instances, identical except the |
1005 |
-- autoBalance attribute |
1006 |
orig_rmem = Node.rMem node |
1007 |
inst_idx = Instance.idx inst_ab |
1008 |
node_add_ab = Node.addSec node inst_ab (-1) |
1009 |
node_add_nb = Node.addSec node inst_nb (-1) |
1010 |
node_del_ab = liftM (`Node.removeSec` inst_ab) node_add_ab |
1011 |
node_del_nb = liftM (`Node.removeSec` inst_nb) node_add_nb |
1012 |
in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of |
1013 |
(Types.OpGood a_ab, Types.OpGood a_nb, |
1014 |
Types.OpGood d_ab, Types.OpGood d_nb) -> |
1015 |
printTestCase "Consistency checks failed" $ |
1016 |
Node.rMem a_ab > orig_rmem && |
1017 |
Node.rMem a_ab - orig_rmem == Instance.mem inst_ab && |
1018 |
Node.rMem a_nb == orig_rmem && |
1019 |
Node.rMem d_ab == orig_rmem && |
1020 |
Node.rMem d_nb == orig_rmem && |
1021 |
-- this is not related to rMem, but as good a place to |
1022 |
-- test as any |
1023 |
inst_idx `elem` Node.sList a_ab && |
1024 |
inst_idx `notElem` Node.sList d_ab |
1025 |
x -> failTest $ "Failed to add/remove instances: " ++ show x |
1026 |
|
1027 |
-- | Check mdsk setting. |
1028 |
prop_Node_setMdsk node mx = |
1029 |
Node.loDsk node' >= 0 && |
1030 |
fromIntegral (Node.loDsk node') <= Node.tDsk node && |
1031 |
Node.availDisk node' >= 0 && |
1032 |
Node.availDisk node' <= Node.fDsk node' && |
1033 |
fromIntegral (Node.availDisk node') <= Node.tDsk node' && |
1034 |
Node.mDsk node' == mx' |
1035 |
where _types = (node::Node.Node, mx::SmallRatio) |
1036 |
node' = Node.setMdsk node mx' |
1037 |
SmallRatio mx' = mx |
1038 |
|
1039 |
-- Check tag maps |
1040 |
prop_Node_tagMaps_idempotent tags = |
1041 |
Node.delTags (Node.addTags m tags) tags ==? m |
1042 |
where m = Data.Map.empty |
1043 |
|
1044 |
prop_Node_tagMaps_reject tags = |
1045 |
not (null tags) ==> |
1046 |
all (\t -> Node.rejectAddTags m [t]) tags |
1047 |
where m = Node.addTags Data.Map.empty tags |
1048 |
|
1049 |
prop_Node_showField node = |
1050 |
forAll (elements Node.defaultFields) $ \ field -> |
1051 |
fst (Node.showHeader field) /= Types.unknownField && |
1052 |
Node.showField node field /= Types.unknownField |
1053 |
|
1054 |
prop_Node_computeGroups nodes = |
1055 |
let ng = Node.computeGroups nodes |
1056 |
onlyuuid = map fst ng |
1057 |
in length nodes == sum (map (length . snd) ng) && |
1058 |
all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng && |
1059 |
length (nub onlyuuid) == length onlyuuid && |
1060 |
(null nodes || not (null ng)) |
1061 |
|
1062 |
testSuite "Node" |
1063 |
[ 'prop_Node_setAlias |
1064 |
, 'prop_Node_setOffline |
1065 |
, 'prop_Node_setMcpu |
1066 |
, 'prop_Node_setXmem |
1067 |
, 'prop_Node_addPriFM |
1068 |
, 'prop_Node_addPriFD |
1069 |
, 'prop_Node_addPriFC |
1070 |
, 'prop_Node_addSec |
1071 |
, 'prop_Node_addPriOffline |
1072 |
, 'prop_Node_addSecOffline |
1073 |
, 'prop_Node_rMem |
1074 |
, 'prop_Node_setMdsk |
1075 |
, 'prop_Node_tagMaps_idempotent |
1076 |
, 'prop_Node_tagMaps_reject |
1077 |
, 'prop_Node_showField |
1078 |
, 'prop_Node_computeGroups |
1079 |
] |
1080 |
|
1081 |
-- ** Cluster tests |
1082 |
|
1083 |
-- | Check that the cluster score is close to zero for a homogeneous |
1084 |
-- cluster. |
1085 |
prop_Score_Zero node = |
1086 |
forAll (choose (1, 1024)) $ \count -> |
1087 |
(not (Node.offline node) && not (Node.failN1 node) && (count > 0) && |
1088 |
(Node.tDsk node > 0) && (Node.tMem node > 0)) ==> |
1089 |
let fn = Node.buildPeers node Container.empty |
1090 |
nlst = replicate count fn |
1091 |
score = Cluster.compCVNodes nlst |
1092 |
-- we can't say == 0 here as the floating point errors accumulate; |
1093 |
-- this should be much lower than the default score in CLI.hs |
1094 |
in score <= 1e-12 |
1095 |
|
1096 |
-- | Check that cluster stats are sane. |
1097 |
prop_CStats_sane = |
1098 |
forAll (choose (1, 1024)) $ \count -> |
1099 |
forAll genOnlineNode $ \node -> |
1100 |
let fn = Node.buildPeers node Container.empty |
1101 |
nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)] |
1102 |
nl = Container.fromList nlst |
1103 |
cstats = Cluster.totalResources nl |
1104 |
in Cluster.csAdsk cstats >= 0 && |
1105 |
Cluster.csAdsk cstats <= Cluster.csFdsk cstats |
1106 |
|
1107 |
-- | Check that one instance is allocated correctly, without |
1108 |
-- rebalances needed. |
1109 |
prop_ClusterAlloc_sane inst = |
1110 |
forAll (choose (5, 20)) $ \count -> |
1111 |
forAll genOnlineNode $ \node -> |
1112 |
let (nl, il, inst') = makeSmallEmptyCluster node count inst |
1113 |
in case Cluster.genAllocNodes defGroupList nl 2 True >>= |
1114 |
Cluster.tryAlloc nl il inst' of |
1115 |
Types.Bad _ -> False |
1116 |
Types.Ok as -> |
1117 |
case Cluster.asSolution as of |
1118 |
Nothing -> False |
1119 |
Just (xnl, xi, _, cv) -> |
1120 |
let il' = Container.add (Instance.idx xi) xi il |
1121 |
tbl = Cluster.Table xnl il' cv [] |
1122 |
in not (canBalance tbl True True False) |
1123 |
|
1124 |
-- | Checks that on a 2-5 node cluster, we can allocate a random |
1125 |
-- instance spec via tiered allocation (whatever the original instance |
1126 |
-- spec), on either one or two nodes. |
1127 |
prop_ClusterCanTieredAlloc inst = |
1128 |
forAll (choose (2, 5)) $ \count -> |
1129 |
forAll (choose (1, 2)) $ \rqnodes -> |
1130 |
forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node -> |
1131 |
let nl = makeSmallCluster node count |
1132 |
il = Container.empty |
1133 |
allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True |
1134 |
in case allocnodes >>= \allocnodes' -> |
1135 |
Cluster.tieredAlloc nl il (Just 1) inst allocnodes' [] [] of |
1136 |
Types.Bad _ -> False |
1137 |
Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) && |
1138 |
IntMap.size il' == length ixes && |
1139 |
length ixes == length cstats |
1140 |
|
1141 |
-- | Helper function to create a cluster with the given range of nodes |
1142 |
-- and allocate an instance on it. |
1143 |
genClusterAlloc count node inst = |
1144 |
let nl = makeSmallCluster node count |
1145 |
in case Cluster.genAllocNodes defGroupList nl 2 True >>= |
1146 |
Cluster.tryAlloc nl Container.empty inst of |
1147 |
Types.Bad _ -> Types.Bad "Can't allocate" |
1148 |
Types.Ok as -> |
1149 |
case Cluster.asSolution as of |
1150 |
Nothing -> Types.Bad "Empty solution?" |
1151 |
Just (xnl, xi, _, _) -> |
1152 |
let xil = Container.add (Instance.idx xi) xi Container.empty |
1153 |
in Types.Ok (xnl, xil, xi) |
1154 |
|
1155 |
-- | Checks that on a 4-8 node cluster, once we allocate an instance, |
1156 |
-- we can also relocate it. |
1157 |
prop_ClusterAllocRelocate = |
1158 |
forAll (choose (4, 8)) $ \count -> |
1159 |
forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node -> |
1160 |
forAll (genInstanceSmallerThanNode node) $ \inst -> |
1161 |
case genClusterAlloc count node inst of |
1162 |
Types.Bad msg -> failTest msg |
1163 |
Types.Ok (nl, il, inst') -> |
1164 |
case IAlloc.processRelocate defGroupList nl il |
1165 |
(Instance.idx inst) 1 [Instance.sNode inst'] of |
1166 |
Types.Ok _ -> printTestCase "??" True -- huh, how to make |
1167 |
-- this nicer... |
1168 |
Types.Bad msg -> failTest $ "Failed to relocate: " ++ msg |
1169 |
|
1170 |
-- | Helper property checker for the result of a nodeEvac or |
1171 |
-- changeGroup operation. |
1172 |
check_EvacMode grp inst result = |
1173 |
case result of |
1174 |
Types.Bad msg -> failTest $ "Couldn't evacuate/change group:" ++ msg |
1175 |
Types.Ok (_, _, es) -> |
1176 |
let moved = Cluster.esMoved es |
1177 |
failed = Cluster.esFailed es |
1178 |
opcodes = not . null $ Cluster.esOpCodes es |
1179 |
in failmsg ("'failed' not empty: " ++ show failed) (null failed) .&&. |
1180 |
failmsg "'opcodes' is null" opcodes .&&. |
1181 |
case moved of |
1182 |
[(idx', gdx, _)] -> failmsg "invalid instance moved" (idx == idx') |
1183 |
.&&. |
1184 |
failmsg "wrong target group" |
1185 |
(gdx == Group.idx grp) |
1186 |
v -> failmsg ("invalid solution: " ++ show v) False |
1187 |
where failmsg = \msg -> printTestCase ("Failed to evacuate: " ++ msg) |
1188 |
idx = Instance.idx inst |
1189 |
|
1190 |
-- | Checks that on a 4-8 node cluster, once we allocate an instance, |
1191 |
-- we can also node-evacuate it. |
1192 |
prop_ClusterAllocEvacuate = |
1193 |
forAll (choose (4, 8)) $ \count -> |
1194 |
forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node -> |
1195 |
forAll (genInstanceSmallerThanNode node) $ \inst -> |
1196 |
case genClusterAlloc count node inst of |
1197 |
Types.Bad msg -> failTest msg |
1198 |
Types.Ok (nl, il, inst') -> |
1199 |
conjoin $ map (\mode -> check_EvacMode defGroup inst' $ |
1200 |
Cluster.tryNodeEvac defGroupList nl il mode |
1201 |
[Instance.idx inst']) [minBound..maxBound] |
1202 |
|
1203 |
-- | Checks that on a 4-8 node cluster with two node groups, once we |
1204 |
-- allocate an instance on the first node group, we can also change |
1205 |
-- its group. |
1206 |
prop_ClusterAllocChangeGroup = |
1207 |
forAll (choose (4, 8)) $ \count -> |
1208 |
forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node -> |
1209 |
forAll (genInstanceSmallerThanNode node) $ \inst -> |
1210 |
case genClusterAlloc count node inst of |
1211 |
Types.Bad msg -> failTest msg |
1212 |
Types.Ok (nl, il, inst') -> |
1213 |
-- we need to add a second node group and nodes to the cluster |
1214 |
let nl2 = Container.elems $ makeSmallCluster node count |
1215 |
grp2 = Group.setIdx defGroup (Group.idx defGroup + 1) |
1216 |
maxndx = maximum . map Node.idx $ nl2 |
1217 |
nl3 = map (\n -> n { Node.group = Group.idx grp2 |
1218 |
, Node.idx = Node.idx n + maxndx }) nl2 |
1219 |
nl4 = Container.fromList . map (\n -> (Node.idx n, n)) $ nl3 |
1220 |
gl' = Container.add (Group.idx grp2) grp2 defGroupList |
1221 |
nl' = IntMap.union nl nl4 |
1222 |
in check_EvacMode grp2 inst' $ |
1223 |
Cluster.tryChangeGroup gl' nl' il [] [Instance.idx inst'] |
1224 |
|
1225 |
-- | Check that allocating multiple instances on a cluster, then |
1226 |
-- adding an empty node, results in a valid rebalance. |
1227 |
prop_ClusterAllocBalance = |
1228 |
forAll (genNode (Just 5) (Just 128)) $ \node -> |
1229 |
forAll (choose (3, 5)) $ \count -> |
1230 |
not (Node.offline node) && not (Node.failN1 node) ==> |
1231 |
let nl = makeSmallCluster node count |
1232 |
(hnode, nl') = IntMap.deleteFindMax nl |
1233 |
il = Container.empty |
1234 |
allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True |
1235 |
i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu |
1236 |
in case allocnodes >>= \allocnodes' -> |
1237 |
Cluster.iterateAlloc nl' il (Just 5) i_templ allocnodes' [] [] of |
1238 |
Types.Bad msg -> failTest $ "Failed to allocate: " ++ msg |
1239 |
Types.Ok (_, _, _, [], _) -> failTest "Failed to allocate: no instances" |
1240 |
Types.Ok (_, xnl, il', _, _) -> |
1241 |
let ynl = Container.add (Node.idx hnode) hnode xnl |
1242 |
cv = Cluster.compCV ynl |
1243 |
tbl = Cluster.Table ynl il' cv [] |
1244 |
in printTestCase "Failed to rebalance" $ |
1245 |
canBalance tbl True True False |
1246 |
|
1247 |
-- | Checks consistency. |
1248 |
prop_ClusterCheckConsistency node inst = |
1249 |
let nl = makeSmallCluster node 3 |
1250 |
[node1, node2, node3] = Container.elems nl |
1251 |
node3' = node3 { Node.group = 1 } |
1252 |
nl' = Container.add (Node.idx node3') node3' nl |
1253 |
inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2) |
1254 |
inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary |
1255 |
inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3) |
1256 |
ccheck = Cluster.findSplitInstances nl' . Container.fromList |
1257 |
in null (ccheck [(0, inst1)]) && |
1258 |
null (ccheck [(0, inst2)]) && |
1259 |
(not . null $ ccheck [(0, inst3)]) |
1260 |
|
1261 |
-- | For now, we only test that we don't lose instances during the split. |
1262 |
prop_ClusterSplitCluster node inst = |
1263 |
forAll (choose (0, 100)) $ \icnt -> |
1264 |
let nl = makeSmallCluster node 2 |
1265 |
(nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1) |
1266 |
(nl, Container.empty) [1..icnt] |
1267 |
gni = Cluster.splitCluster nl' il' |
1268 |
in sum (map (Container.size . snd . snd) gni) == icnt && |
1269 |
all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group) |
1270 |
(Container.elems nl'')) gni |
1271 |
|
1272 |
-- | Helper function to check if we can allocate an instance on a |
1273 |
-- given node list. |
1274 |
canAllocOn :: Node.List -> Int -> Instance.Instance -> Bool |
1275 |
canAllocOn nl reqnodes inst = |
1276 |
case Cluster.genAllocNodes defGroupList nl reqnodes True >>= |
1277 |
Cluster.tryAlloc nl (Container.empty) inst of |
1278 |
Types.Bad _ -> False |
1279 |
Types.Ok as -> |
1280 |
case Cluster.asSolution as of |
1281 |
Nothing -> False |
1282 |
Just _ -> True |
1283 |
|
1284 |
-- | Checks that allocation obeys minimum and maximum instance |
1285 |
-- policies. The unittest generates a random node, duplicates it count |
1286 |
-- times, and generates a random instance that can be allocated on |
1287 |
-- this mini-cluster; it then checks that after applying a policy that |
1288 |
-- the instance doesn't fits, the allocation fails. |
1289 |
prop_ClusterAllocPolicy node = |
1290 |
-- rqn is the required nodes (1 or 2) |
1291 |
forAll (choose (1, 2)) $ \rqn -> |
1292 |
forAll (choose (5, 20)) $ \count -> |
1293 |
forAll (arbitrary `suchThat` (canAllocOn (makeSmallCluster node count) rqn)) |
1294 |
$ \inst -> |
1295 |
forAll (arbitrary `suchThat` (isFailure . |
1296 |
Instance.instMatchesPolicy inst)) $ \ipol -> |
1297 |
let node' = Node.setPolicy ipol node |
1298 |
nl = makeSmallCluster node' count |
1299 |
in not $ canAllocOn nl rqn inst |
1300 |
|
1301 |
testSuite "Cluster" |
1302 |
[ 'prop_Score_Zero |
1303 |
, 'prop_CStats_sane |
1304 |
, 'prop_ClusterAlloc_sane |
1305 |
, 'prop_ClusterCanTieredAlloc |
1306 |
, 'prop_ClusterAllocRelocate |
1307 |
, 'prop_ClusterAllocEvacuate |
1308 |
, 'prop_ClusterAllocChangeGroup |
1309 |
, 'prop_ClusterAllocBalance |
1310 |
, 'prop_ClusterCheckConsistency |
1311 |
, 'prop_ClusterSplitCluster |
1312 |
, 'prop_ClusterAllocPolicy |
1313 |
] |
1314 |
|
1315 |
-- ** OpCodes tests |
1316 |
|
1317 |
-- | Check that opcode serialization is idempotent. |
1318 |
prop_OpCodes_serialization op = |
1319 |
case J.readJSON (J.showJSON op) of |
1320 |
J.Error e -> failTest $ "Cannot deserialise: " ++ e |
1321 |
J.Ok op' -> op ==? op' |
1322 |
where _types = op::OpCodes.OpCode |
1323 |
|
1324 |
testSuite "OpCodes" |
1325 |
[ 'prop_OpCodes_serialization ] |
1326 |
|
1327 |
-- ** Jobs tests |
1328 |
|
1329 |
-- | Check that (queued) job\/opcode status serialization is idempotent. |
1330 |
prop_OpStatus_serialization os = |
1331 |
case J.readJSON (J.showJSON os) of |
1332 |
J.Error e -> failTest $ "Cannot deserialise: " ++ e |
1333 |
J.Ok os' -> os ==? os' |
1334 |
where _types = os::Jobs.OpStatus |
1335 |
|
1336 |
prop_JobStatus_serialization js = |
1337 |
case J.readJSON (J.showJSON js) of |
1338 |
J.Error e -> failTest $ "Cannot deserialise: " ++ e |
1339 |
J.Ok js' -> js ==? js' |
1340 |
where _types = js::Jobs.JobStatus |
1341 |
|
1342 |
testSuite "Jobs" |
1343 |
[ 'prop_OpStatus_serialization |
1344 |
, 'prop_JobStatus_serialization |
1345 |
] |
1346 |
|
1347 |
-- ** Loader tests |
1348 |
|
1349 |
prop_Loader_lookupNode ktn inst node = |
1350 |
Loader.lookupNode nl inst node ==? Data.Map.lookup node nl |
1351 |
where nl = Data.Map.fromList ktn |
1352 |
|
1353 |
prop_Loader_lookupInstance kti inst = |
1354 |
Loader.lookupInstance il inst ==? Data.Map.lookup inst il |
1355 |
where il = Data.Map.fromList kti |
1356 |
|
1357 |
prop_Loader_assignIndices = |
1358 |
-- generate nodes with unique names |
1359 |
forAll (arbitrary `suchThat` |
1360 |
(\nodes -> |
1361 |
let names = map Node.name nodes |
1362 |
in length names == length (nub names))) $ \nodes -> |
1363 |
let (nassoc, kt) = |
1364 |
Loader.assignIndices (map (\n -> (Node.name n, n)) nodes) |
1365 |
in Data.Map.size nassoc == length nodes && |
1366 |
Container.size kt == length nodes && |
1367 |
if not (null nodes) |
1368 |
then maximum (IntMap.keys kt) == length nodes - 1 |
1369 |
else True |
1370 |
|
1371 |
-- | Checks that the number of primary instances recorded on the nodes |
1372 |
-- is zero. |
1373 |
prop_Loader_mergeData ns = |
1374 |
let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns |
1375 |
in case Loader.mergeData [] [] [] [] |
1376 |
(Loader.emptyCluster {Loader.cdNodes = na}) of |
1377 |
Types.Bad _ -> False |
1378 |
Types.Ok (Loader.ClusterData _ nl il _ _) -> |
1379 |
let nodes = Container.elems nl |
1380 |
instances = Container.elems il |
1381 |
in (sum . map (length . Node.pList)) nodes == 0 && |
1382 |
null instances |
1383 |
|
1384 |
-- | Check that compareNameComponent on equal strings works. |
1385 |
prop_Loader_compareNameComponent_equal :: String -> Bool |
1386 |
prop_Loader_compareNameComponent_equal s = |
1387 |
Loader.compareNameComponent s s == |
1388 |
Loader.LookupResult Loader.ExactMatch s |
1389 |
|
1390 |
-- | Check that compareNameComponent on prefix strings works. |
1391 |
prop_Loader_compareNameComponent_prefix :: NonEmptyList Char -> String -> Bool |
1392 |
prop_Loader_compareNameComponent_prefix (NonEmpty s1) s2 = |
1393 |
Loader.compareNameComponent (s1 ++ "." ++ s2) s1 == |
1394 |
Loader.LookupResult Loader.PartialMatch s1 |
1395 |
|
1396 |
testSuite "Loader" |
1397 |
[ 'prop_Loader_lookupNode |
1398 |
, 'prop_Loader_lookupInstance |
1399 |
, 'prop_Loader_assignIndices |
1400 |
, 'prop_Loader_mergeData |
1401 |
, 'prop_Loader_compareNameComponent_equal |
1402 |
, 'prop_Loader_compareNameComponent_prefix |
1403 |
] |
1404 |
|
1405 |
-- ** Types tests |
1406 |
|
1407 |
prop_Types_AllocPolicy_serialisation apol = |
1408 |
case J.readJSON (J.showJSON apol) of |
1409 |
J.Ok p -> p ==? apol |
1410 |
J.Error s -> failTest $ "Failed to deserialise: " ++ s |
1411 |
where _types = apol::Types.AllocPolicy |
1412 |
|
1413 |
prop_Types_DiskTemplate_serialisation dt = |
1414 |
case J.readJSON (J.showJSON dt) of |
1415 |
J.Ok p -> p ==? dt |
1416 |
J.Error s -> failTest $ "Failed to deserialise: " ++ s |
1417 |
where _types = dt::Types.DiskTemplate |
1418 |
|
1419 |
prop_Types_ISpec_serialisation ispec = |
1420 |
case J.readJSON (J.showJSON ispec) of |
1421 |
J.Ok p -> p ==? ispec |
1422 |
J.Error s -> failTest $ "Failed to deserialise: " ++ s |
1423 |
where _types = ispec::Types.ISpec |
1424 |
|
1425 |
prop_Types_IPolicy_serialisation ipol = |
1426 |
case J.readJSON (J.showJSON ipol) of |
1427 |
J.Ok p -> p ==? ipol |
1428 |
J.Error s -> failTest $ "Failed to deserialise: " ++ s |
1429 |
where _types = ipol::Types.IPolicy |
1430 |
|
1431 |
prop_Types_EvacMode_serialisation em = |
1432 |
case J.readJSON (J.showJSON em) of |
1433 |
J.Ok p -> p ==? em |
1434 |
J.Error s -> failTest $ "Failed to deserialise: " ++ s |
1435 |
where _types = em::Types.EvacMode |
1436 |
|
1437 |
prop_Types_opToResult op = |
1438 |
case op of |
1439 |
Types.OpFail _ -> Types.isBad r |
1440 |
Types.OpGood v -> case r of |
1441 |
Types.Bad _ -> False |
1442 |
Types.Ok v' -> v == v' |
1443 |
where r = Types.opToResult op |
1444 |
_types = op::Types.OpResult Int |
1445 |
|
1446 |
prop_Types_eitherToResult ei = |
1447 |
case ei of |
1448 |
Left _ -> Types.isBad r |
1449 |
Right v -> case r of |
1450 |
Types.Bad _ -> False |
1451 |
Types.Ok v' -> v == v' |
1452 |
where r = Types.eitherToResult ei |
1453 |
_types = ei::Either String Int |
1454 |
|
1455 |
testSuite "Types" |
1456 |
[ 'prop_Types_AllocPolicy_serialisation |
1457 |
, 'prop_Types_DiskTemplate_serialisation |
1458 |
, 'prop_Types_ISpec_serialisation |
1459 |
, 'prop_Types_IPolicy_serialisation |
1460 |
, 'prop_Types_EvacMode_serialisation |
1461 |
, 'prop_Types_opToResult |
1462 |
, 'prop_Types_eitherToResult |
1463 |
] |
1464 |
|
1465 |
-- ** CLI tests |
1466 |
|
1467 |
-- | Test correct parsing. |
1468 |
prop_CLI_parseISpec descr dsk mem cpu = |
1469 |
let str = printf "%d,%d,%d" dsk mem cpu |
1470 |
in CLI.parseISpecString descr str ==? Types.Ok (Types.RSpec cpu mem dsk) |
1471 |
|
1472 |
-- | Test parsing failure due to wrong section count. |
1473 |
prop_CLI_parseISpecFail descr = |
1474 |
forAll (choose (0,100) `suchThat` ((/=) 3)) $ \nelems -> |
1475 |
forAll (replicateM nelems arbitrary) $ \values -> |
1476 |
let str = intercalate "," $ map show (values::[Int]) |
1477 |
in case CLI.parseISpecString descr str of |
1478 |
Types.Ok v -> failTest $ "Expected failure, got " ++ show v |
1479 |
_ -> property True |
1480 |
|
1481 |
-- | Test parseYesNo. |
1482 |
prop_CLI_parseYesNo def testval val = |
1483 |
forAll (elements [val, "yes", "no"]) $ \actual_val -> |
1484 |
if testval |
1485 |
then CLI.parseYesNo def Nothing ==? Types.Ok def |
1486 |
else let result = CLI.parseYesNo def (Just actual_val) |
1487 |
in if actual_val `elem` ["yes", "no"] |
1488 |
then result ==? Types.Ok (actual_val == "yes") |
1489 |
else property $ Types.isBad result |
1490 |
|
1491 |
testSuite "CLI" |
1492 |
[ 'prop_CLI_parseISpec |
1493 |
, 'prop_CLI_parseISpecFail |
1494 |
, 'prop_CLI_parseYesNo |
1495 |
] |