root / htools / Ganeti / HTools / QC.hs @ be747966
History | View | Annotate | Download (77 kB)
1 |
{-# LANGUAGE TemplateHaskell #-} |
---|---|
2 |
{-# OPTIONS_GHC -fno-warn-orphans -fno-warn-unused-imports #-} |
3 |
|
4 |
-- FIXME: should remove the no-warn-unused-imports option, once we get |
5 |
-- around to testing function from all modules; until then, we keep |
6 |
-- the (unused) imports here to generate correct coverage (0 for |
7 |
-- modules we don't use) |
8 |
|
9 |
{-| Unittests for ganeti-htools. |
10 |
|
11 |
-} |
12 |
|
13 |
{- |
14 |
|
15 |
Copyright (C) 2009, 2010, 2011, 2012 Google Inc. |
16 |
|
17 |
This program is free software; you can redistribute it and/or modify |
18 |
it under the terms of the GNU General Public License as published by |
19 |
the Free Software Foundation; either version 2 of the License, or |
20 |
(at your option) any later version. |
21 |
|
22 |
This program is distributed in the hope that it will be useful, but |
23 |
WITHOUT ANY WARRANTY; without even the implied warranty of |
24 |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
25 |
General Public License for more details. |
26 |
|
27 |
You should have received a copy of the GNU General Public License |
28 |
along with this program; if not, write to the Free Software |
29 |
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
30 |
02110-1301, USA. |
31 |
|
32 |
-} |
33 |
|
34 |
module Ganeti.HTools.QC |
35 |
( testUtils |
36 |
, testPeerMap |
37 |
, testContainer |
38 |
, testInstance |
39 |
, testNode |
40 |
, testText |
41 |
, testSimu |
42 |
, testOpCodes |
43 |
, testJobs |
44 |
, testCluster |
45 |
, testLoader |
46 |
, testTypes |
47 |
, testCLI |
48 |
, testJSON |
49 |
, testLUXI |
50 |
, testSsconf |
51 |
, testRpc |
52 |
, testQlang |
53 |
) where |
54 |
|
55 |
import Test.QuickCheck |
56 |
import Test.QuickCheck.Monadic (assert, monadicIO, run, stop) |
57 |
import Text.Printf (printf) |
58 |
import Data.List (intercalate, nub, isPrefixOf) |
59 |
import Data.Maybe |
60 |
import Control.Monad |
61 |
import Control.Applicative |
62 |
import qualified System.Console.GetOpt as GetOpt |
63 |
import qualified Text.JSON as J |
64 |
import qualified Data.Map |
65 |
import qualified Data.IntMap as IntMap |
66 |
import Control.Concurrent (forkIO) |
67 |
import Control.Exception (bracket, catchJust) |
68 |
import System.Directory (getTemporaryDirectory, removeFile) |
69 |
import System.IO (hClose, openTempFile) |
70 |
import System.IO.Error (isEOFErrorType, ioeGetErrorType) |
71 |
|
72 |
import qualified Ganeti.Confd as Confd |
73 |
import qualified Ganeti.Config as Config |
74 |
import qualified Ganeti.Daemon as Daemon |
75 |
import qualified Ganeti.Hash as Hash |
76 |
import qualified Ganeti.BasicTypes as BasicTypes |
77 |
import qualified Ganeti.Jobs as Jobs |
78 |
import qualified Ganeti.Logging as Logging |
79 |
import qualified Ganeti.Luxi as Luxi |
80 |
import qualified Ganeti.Objects as Objects |
81 |
import qualified Ganeti.OpCodes as OpCodes |
82 |
import qualified Ganeti.Qlang as Qlang |
83 |
import qualified Ganeti.Rpc as Rpc |
84 |
import qualified Ganeti.Runtime as Runtime |
85 |
import qualified Ganeti.Ssconf as Ssconf |
86 |
import qualified Ganeti.HTools.CLI as CLI |
87 |
import qualified Ganeti.HTools.Cluster as Cluster |
88 |
import qualified Ganeti.HTools.Container as Container |
89 |
import qualified Ganeti.HTools.ExtLoader |
90 |
import qualified Ganeti.HTools.Group as Group |
91 |
import qualified Ganeti.HTools.IAlloc as IAlloc |
92 |
import qualified Ganeti.HTools.Instance as Instance |
93 |
import qualified Ganeti.HTools.JSON as JSON |
94 |
import qualified Ganeti.HTools.Loader as Loader |
95 |
import qualified Ganeti.HTools.Luxi as HTools.Luxi |
96 |
import qualified Ganeti.HTools.Node as Node |
97 |
import qualified Ganeti.HTools.PeerMap as PeerMap |
98 |
import qualified Ganeti.HTools.Rapi |
99 |
import qualified Ganeti.HTools.Simu as Simu |
100 |
import qualified Ganeti.HTools.Text as Text |
101 |
import qualified Ganeti.HTools.Types as Types |
102 |
import qualified Ganeti.HTools.Utils as Utils |
103 |
import qualified Ganeti.HTools.Version |
104 |
import qualified Ganeti.Constants as C |
105 |
|
106 |
import qualified Ganeti.HTools.Program as Program |
107 |
import qualified Ganeti.HTools.Program.Hail |
108 |
import qualified Ganeti.HTools.Program.Hbal |
109 |
import qualified Ganeti.HTools.Program.Hscan |
110 |
import qualified Ganeti.HTools.Program.Hspace |
111 |
|
112 |
import Ganeti.HTools.QCHelper (testSuite) |
113 |
|
114 |
-- * Constants |
115 |
|
116 |
-- | Maximum memory (1TiB, somewhat random value). |
117 |
maxMem :: Int |
118 |
maxMem = 1024 * 1024 |
119 |
|
120 |
-- | Maximum disk (8TiB, somewhat random value). |
121 |
maxDsk :: Int |
122 |
maxDsk = 1024 * 1024 * 8 |
123 |
|
124 |
-- | Max CPUs (1024, somewhat random value). |
125 |
maxCpu :: Int |
126 |
maxCpu = 1024 |
127 |
|
128 |
-- | Max vcpu ratio (random value). |
129 |
maxVcpuRatio :: Double |
130 |
maxVcpuRatio = 1024.0 |
131 |
|
132 |
-- | Max spindle ratio (random value). |
133 |
maxSpindleRatio :: Double |
134 |
maxSpindleRatio = 1024.0 |
135 |
|
136 |
-- | Max nodes, used just to limit arbitrary instances for smaller |
137 |
-- opcode definitions (e.g. list of nodes in OpTestDelay). |
138 |
maxNodes :: Int |
139 |
maxNodes = 32 |
140 |
|
141 |
-- | Max opcodes or jobs in a submit job and submit many jobs. |
142 |
maxOpCodes :: Int |
143 |
maxOpCodes = 16 |
144 |
|
145 |
-- | All disk templates (used later) |
146 |
allDiskTemplates :: [Types.DiskTemplate] |
147 |
allDiskTemplates = [minBound..maxBound] |
148 |
|
149 |
-- | Null iPolicy, and by null we mean very liberal. |
150 |
nullIPolicy :: Types.IPolicy |
151 |
nullIPolicy = Types.IPolicy |
152 |
{ Types.iPolicyMinSpec = Types.ISpec { Types.iSpecMemorySize = 0 |
153 |
, Types.iSpecCpuCount = 0 |
154 |
, Types.iSpecDiskSize = 0 |
155 |
, Types.iSpecDiskCount = 0 |
156 |
, Types.iSpecNicCount = 0 |
157 |
, Types.iSpecSpindleUse = 0 |
158 |
} |
159 |
, Types.iPolicyMaxSpec = Types.ISpec { Types.iSpecMemorySize = maxBound |
160 |
, Types.iSpecCpuCount = maxBound |
161 |
, Types.iSpecDiskSize = maxBound |
162 |
, Types.iSpecDiskCount = C.maxDisks |
163 |
, Types.iSpecNicCount = C.maxNics |
164 |
, Types.iSpecSpindleUse = maxBound |
165 |
} |
166 |
, Types.iPolicyStdSpec = Types.ISpec { Types.iSpecMemorySize = Types.unitMem |
167 |
, Types.iSpecCpuCount = Types.unitCpu |
168 |
, Types.iSpecDiskSize = Types.unitDsk |
169 |
, Types.iSpecDiskCount = 1 |
170 |
, Types.iSpecNicCount = 1 |
171 |
, Types.iSpecSpindleUse = 1 |
172 |
} |
173 |
, Types.iPolicyDiskTemplates = [minBound..maxBound] |
174 |
, Types.iPolicyVcpuRatio = maxVcpuRatio -- somewhat random value, high |
175 |
-- enough to not impact us |
176 |
, Types.iPolicySpindleRatio = maxSpindleRatio |
177 |
} |
178 |
|
179 |
|
180 |
defGroup :: Group.Group |
181 |
defGroup = flip Group.setIdx 0 $ |
182 |
Group.create "default" Types.defaultGroupID Types.AllocPreferred |
183 |
nullIPolicy |
184 |
|
185 |
defGroupList :: Group.List |
186 |
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)] |
187 |
|
188 |
defGroupAssoc :: Data.Map.Map String Types.Gdx |
189 |
defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup) |
190 |
|
191 |
-- * Helper functions |
192 |
|
193 |
-- | Simple checker for whether OpResult is fail or pass. |
194 |
isFailure :: Types.OpResult a -> Bool |
195 |
isFailure (Types.OpFail _) = True |
196 |
isFailure _ = False |
197 |
|
198 |
-- | Checks for equality with proper annotation. |
199 |
(==?) :: (Show a, Eq a) => a -> a -> Property |
200 |
(==?) x y = printTestCase |
201 |
("Expected equality, but '" ++ |
202 |
show x ++ "' /= '" ++ show y ++ "'") (x == y) |
203 |
infix 3 ==? |
204 |
|
205 |
-- | Show a message and fail the test. |
206 |
failTest :: String -> Property |
207 |
failTest msg = printTestCase msg False |
208 |
|
209 |
-- | Update an instance to be smaller than a node. |
210 |
setInstanceSmallerThanNode :: Node.Node |
211 |
-> Instance.Instance -> Instance.Instance |
212 |
setInstanceSmallerThanNode node inst = |
213 |
inst { Instance.mem = Node.availMem node `div` 2 |
214 |
, Instance.dsk = Node.availDisk node `div` 2 |
215 |
, Instance.vcpus = Node.availCpu node `div` 2 |
216 |
} |
217 |
|
218 |
-- | Create an instance given its spec. |
219 |
createInstance :: Int -> Int -> Int -> Instance.Instance |
220 |
createInstance mem dsk vcpus = |
221 |
Instance.create "inst-unnamed" mem dsk vcpus Types.Running [] True (-1) (-1) |
222 |
Types.DTDrbd8 1 |
223 |
|
224 |
-- | Create a small cluster by repeating a node spec. |
225 |
makeSmallCluster :: Node.Node -> Int -> Node.List |
226 |
makeSmallCluster node count = |
227 |
let origname = Node.name node |
228 |
origalias = Node.alias node |
229 |
nodes = map (\idx -> node { Node.name = origname ++ "-" ++ show idx |
230 |
, Node.alias = origalias ++ "-" ++ show idx }) |
231 |
[1..count] |
232 |
fn = flip Node.buildPeers Container.empty |
233 |
namelst = map (\n -> (Node.name n, fn n)) nodes |
234 |
(_, nlst) = Loader.assignIndices namelst |
235 |
in nlst |
236 |
|
237 |
-- | Make a small cluster, both nodes and instances. |
238 |
makeSmallEmptyCluster :: Node.Node -> Int -> Instance.Instance |
239 |
-> (Node.List, Instance.List, Instance.Instance) |
240 |
makeSmallEmptyCluster node count inst = |
241 |
(makeSmallCluster node count, Container.empty, |
242 |
setInstanceSmallerThanNode node inst) |
243 |
|
244 |
-- | Checks if a node is "big" enough. |
245 |
isNodeBig :: Int -> Node.Node -> Bool |
246 |
isNodeBig size node = Node.availDisk node > size * Types.unitDsk |
247 |
&& Node.availMem node > size * Types.unitMem |
248 |
&& Node.availCpu node > size * Types.unitCpu |
249 |
|
250 |
canBalance :: Cluster.Table -> Bool -> Bool -> Bool -> Bool |
251 |
canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac 0 0 |
252 |
|
253 |
-- | Assigns a new fresh instance to a cluster; this is not |
254 |
-- allocation, so no resource checks are done. |
255 |
assignInstance :: Node.List -> Instance.List -> Instance.Instance -> |
256 |
Types.Idx -> Types.Idx -> |
257 |
(Node.List, Instance.List) |
258 |
assignInstance nl il inst pdx sdx = |
259 |
let pnode = Container.find pdx nl |
260 |
snode = Container.find sdx nl |
261 |
maxiidx = if Container.null il |
262 |
then 0 |
263 |
else fst (Container.findMax il) + 1 |
264 |
inst' = inst { Instance.idx = maxiidx, |
265 |
Instance.pNode = pdx, Instance.sNode = sdx } |
266 |
pnode' = Node.setPri pnode inst' |
267 |
snode' = Node.setSec snode inst' |
268 |
nl' = Container.addTwo pdx pnode' sdx snode' nl |
269 |
il' = Container.add maxiidx inst' il |
270 |
in (nl', il') |
271 |
|
272 |
-- | Generates a list of a given size with non-duplicate elements. |
273 |
genUniquesList :: (Eq a, Arbitrary a) => Int -> Gen [a] |
274 |
genUniquesList cnt = |
275 |
foldM (\lst _ -> do |
276 |
newelem <- arbitrary `suchThat` (`notElem` lst) |
277 |
return (newelem:lst)) [] [1..cnt] |
278 |
|
279 |
-- | Checks if an instance is mirrored. |
280 |
isMirrored :: Instance.Instance -> Bool |
281 |
isMirrored = (/= Types.MirrorNone) . Instance.mirrorType |
282 |
|
283 |
-- | Returns the possible change node types for a disk template. |
284 |
evacModeOptions :: Types.MirrorType -> [Types.EvacMode] |
285 |
evacModeOptions Types.MirrorNone = [] |
286 |
evacModeOptions Types.MirrorInternal = [minBound..maxBound] -- DRBD can do all |
287 |
evacModeOptions Types.MirrorExternal = [Types.ChangePrimary, Types.ChangeAll] |
288 |
|
289 |
-- * Arbitrary instances |
290 |
|
291 |
-- | Defines a DNS name. |
292 |
newtype DNSChar = DNSChar { dnsGetChar::Char } |
293 |
|
294 |
instance Arbitrary DNSChar where |
295 |
arbitrary = do |
296 |
x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-") |
297 |
return (DNSChar x) |
298 |
|
299 |
instance Show DNSChar where |
300 |
show = show . dnsGetChar |
301 |
|
302 |
-- | Generates a single name component. |
303 |
getName :: Gen String |
304 |
getName = do |
305 |
n <- choose (1, 64) |
306 |
dn <- vector n |
307 |
return (map dnsGetChar dn) |
308 |
|
309 |
-- | Generates an entire FQDN. |
310 |
getFQDN :: Gen String |
311 |
getFQDN = do |
312 |
ncomps <- choose (1, 4) |
313 |
names <- vectorOf ncomps getName |
314 |
return $ intercalate "." names |
315 |
|
316 |
-- | Combinator that generates a 'Maybe' using a sub-combinator. |
317 |
getMaybe :: Gen a -> Gen (Maybe a) |
318 |
getMaybe subgen = do |
319 |
bool <- arbitrary |
320 |
if bool |
321 |
then Just <$> subgen |
322 |
else return Nothing |
323 |
|
324 |
-- | Generates a fields list. This uses the same character set as a |
325 |
-- DNS name (just for simplicity). |
326 |
getFields :: Gen [String] |
327 |
getFields = do |
328 |
n <- choose (1, 32) |
329 |
vectorOf n getName |
330 |
|
331 |
-- | Defines a tag type. |
332 |
newtype TagChar = TagChar { tagGetChar :: Char } |
333 |
|
334 |
-- | All valid tag chars. This doesn't need to match _exactly_ |
335 |
-- Ganeti's own tag regex, just enough for it to be close. |
336 |
tagChar :: [Char] |
337 |
tagChar = ['a'..'z'] ++ ['A'..'Z'] ++ ['0'..'9'] ++ ".+*/:@-" |
338 |
|
339 |
instance Arbitrary TagChar where |
340 |
arbitrary = do |
341 |
c <- elements tagChar |
342 |
return (TagChar c) |
343 |
|
344 |
-- | Generates a tag |
345 |
genTag :: Gen [TagChar] |
346 |
genTag = do |
347 |
-- the correct value would be C.maxTagLen, but that's way too |
348 |
-- verbose in unittests, and at the moment I don't see any possible |
349 |
-- bugs with longer tags and the way we use tags in htools |
350 |
n <- choose (1, 10) |
351 |
vector n |
352 |
|
353 |
-- | Generates a list of tags (correctly upper bounded). |
354 |
genTags :: Gen [String] |
355 |
genTags = do |
356 |
-- the correct value would be C.maxTagsPerObj, but per the comment |
357 |
-- in genTag, we don't use tags enough in htools to warrant testing |
358 |
-- such big values |
359 |
n <- choose (0, 10::Int) |
360 |
tags <- mapM (const genTag) [1..n] |
361 |
return $ map (map tagGetChar) tags |
362 |
|
363 |
instance Arbitrary Types.InstanceStatus where |
364 |
arbitrary = elements [minBound..maxBound] |
365 |
|
366 |
-- | Generates a random instance with maximum disk/mem/cpu values. |
367 |
genInstanceSmallerThan :: Int -> Int -> Int -> Gen Instance.Instance |
368 |
genInstanceSmallerThan lim_mem lim_dsk lim_cpu = do |
369 |
name <- getFQDN |
370 |
mem <- choose (0, lim_mem) |
371 |
dsk <- choose (0, lim_dsk) |
372 |
run_st <- arbitrary |
373 |
pn <- arbitrary |
374 |
sn <- arbitrary |
375 |
vcpus <- choose (0, lim_cpu) |
376 |
dt <- arbitrary |
377 |
return $ Instance.create name mem dsk vcpus run_st [] True pn sn dt 1 |
378 |
|
379 |
-- | Generates an instance smaller than a node. |
380 |
genInstanceSmallerThanNode :: Node.Node -> Gen Instance.Instance |
381 |
genInstanceSmallerThanNode node = |
382 |
genInstanceSmallerThan (Node.availMem node `div` 2) |
383 |
(Node.availDisk node `div` 2) |
384 |
(Node.availCpu node `div` 2) |
385 |
|
386 |
-- let's generate a random instance |
387 |
instance Arbitrary Instance.Instance where |
388 |
arbitrary = genInstanceSmallerThan maxMem maxDsk maxCpu |
389 |
|
390 |
-- | Generas an arbitrary node based on sizing information. |
391 |
genNode :: Maybe Int -- ^ Minimum node size in terms of units |
392 |
-> Maybe Int -- ^ Maximum node size (when Nothing, bounded |
393 |
-- just by the max... constants) |
394 |
-> Gen Node.Node |
395 |
genNode min_multiplier max_multiplier = do |
396 |
let (base_mem, base_dsk, base_cpu) = |
397 |
case min_multiplier of |
398 |
Just mm -> (mm * Types.unitMem, |
399 |
mm * Types.unitDsk, |
400 |
mm * Types.unitCpu) |
401 |
Nothing -> (0, 0, 0) |
402 |
(top_mem, top_dsk, top_cpu) = |
403 |
case max_multiplier of |
404 |
Just mm -> (mm * Types.unitMem, |
405 |
mm * Types.unitDsk, |
406 |
mm * Types.unitCpu) |
407 |
Nothing -> (maxMem, maxDsk, maxCpu) |
408 |
name <- getFQDN |
409 |
mem_t <- choose (base_mem, top_mem) |
410 |
mem_f <- choose (base_mem, mem_t) |
411 |
mem_n <- choose (0, mem_t - mem_f) |
412 |
dsk_t <- choose (base_dsk, top_dsk) |
413 |
dsk_f <- choose (base_dsk, dsk_t) |
414 |
cpu_t <- choose (base_cpu, top_cpu) |
415 |
offl <- arbitrary |
416 |
let n = Node.create name (fromIntegral mem_t) mem_n mem_f |
417 |
(fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl 1 0 |
418 |
n' = Node.setPolicy nullIPolicy n |
419 |
return $ Node.buildPeers n' Container.empty |
420 |
|
421 |
-- | Helper function to generate a sane node. |
422 |
genOnlineNode :: Gen Node.Node |
423 |
genOnlineNode = do |
424 |
arbitrary `suchThat` (\n -> not (Node.offline n) && |
425 |
not (Node.failN1 n) && |
426 |
Node.availDisk n > 0 && |
427 |
Node.availMem n > 0 && |
428 |
Node.availCpu n > 0) |
429 |
|
430 |
-- and a random node |
431 |
instance Arbitrary Node.Node where |
432 |
arbitrary = genNode Nothing Nothing |
433 |
|
434 |
-- replace disks |
435 |
instance Arbitrary OpCodes.ReplaceDisksMode where |
436 |
arbitrary = elements [minBound..maxBound] |
437 |
|
438 |
instance Arbitrary OpCodes.OpCode where |
439 |
arbitrary = do |
440 |
op_id <- elements [ "OP_TEST_DELAY" |
441 |
, "OP_INSTANCE_REPLACE_DISKS" |
442 |
, "OP_INSTANCE_FAILOVER" |
443 |
, "OP_INSTANCE_MIGRATE" |
444 |
] |
445 |
case op_id of |
446 |
"OP_TEST_DELAY" -> |
447 |
OpCodes.OpTestDelay <$> arbitrary <*> arbitrary |
448 |
<*> resize maxNodes (listOf getFQDN) |
449 |
"OP_INSTANCE_REPLACE_DISKS" -> |
450 |
OpCodes.OpInstanceReplaceDisks <$> getFQDN <*> getMaybe getFQDN <*> |
451 |
arbitrary <*> resize C.maxDisks arbitrary <*> getMaybe getName |
452 |
"OP_INSTANCE_FAILOVER" -> |
453 |
OpCodes.OpInstanceFailover <$> getFQDN <*> arbitrary <*> |
454 |
getMaybe getFQDN |
455 |
"OP_INSTANCE_MIGRATE" -> |
456 |
OpCodes.OpInstanceMigrate <$> getFQDN <*> arbitrary <*> |
457 |
arbitrary <*> arbitrary <*> getMaybe getFQDN |
458 |
_ -> fail "Wrong opcode" |
459 |
|
460 |
instance Arbitrary Jobs.OpStatus where |
461 |
arbitrary = elements [minBound..maxBound] |
462 |
|
463 |
instance Arbitrary Jobs.JobStatus where |
464 |
arbitrary = elements [minBound..maxBound] |
465 |
|
466 |
newtype SmallRatio = SmallRatio Double deriving Show |
467 |
instance Arbitrary SmallRatio where |
468 |
arbitrary = do |
469 |
v <- choose (0, 1) |
470 |
return $ SmallRatio v |
471 |
|
472 |
instance Arbitrary Types.AllocPolicy where |
473 |
arbitrary = elements [minBound..maxBound] |
474 |
|
475 |
instance Arbitrary Types.DiskTemplate where |
476 |
arbitrary = elements [minBound..maxBound] |
477 |
|
478 |
instance Arbitrary Types.FailMode where |
479 |
arbitrary = elements [minBound..maxBound] |
480 |
|
481 |
instance Arbitrary Types.EvacMode where |
482 |
arbitrary = elements [minBound..maxBound] |
483 |
|
484 |
instance Arbitrary a => Arbitrary (Types.OpResult a) where |
485 |
arbitrary = arbitrary >>= \c -> |
486 |
if c |
487 |
then Types.OpGood <$> arbitrary |
488 |
else Types.OpFail <$> arbitrary |
489 |
|
490 |
instance Arbitrary Types.ISpec where |
491 |
arbitrary = do |
492 |
mem_s <- arbitrary::Gen (NonNegative Int) |
493 |
dsk_c <- arbitrary::Gen (NonNegative Int) |
494 |
dsk_s <- arbitrary::Gen (NonNegative Int) |
495 |
cpu_c <- arbitrary::Gen (NonNegative Int) |
496 |
nic_c <- arbitrary::Gen (NonNegative Int) |
497 |
su <- arbitrary::Gen (NonNegative Int) |
498 |
return Types.ISpec { Types.iSpecMemorySize = fromIntegral mem_s |
499 |
, Types.iSpecCpuCount = fromIntegral cpu_c |
500 |
, Types.iSpecDiskSize = fromIntegral dsk_s |
501 |
, Types.iSpecDiskCount = fromIntegral dsk_c |
502 |
, Types.iSpecNicCount = fromIntegral nic_c |
503 |
, Types.iSpecSpindleUse = fromIntegral su |
504 |
} |
505 |
|
506 |
-- | Generates an ispec bigger than the given one. |
507 |
genBiggerISpec :: Types.ISpec -> Gen Types.ISpec |
508 |
genBiggerISpec imin = do |
509 |
mem_s <- choose (Types.iSpecMemorySize imin, maxBound) |
510 |
dsk_c <- choose (Types.iSpecDiskCount imin, maxBound) |
511 |
dsk_s <- choose (Types.iSpecDiskSize imin, maxBound) |
512 |
cpu_c <- choose (Types.iSpecCpuCount imin, maxBound) |
513 |
nic_c <- choose (Types.iSpecNicCount imin, maxBound) |
514 |
su <- choose (Types.iSpecSpindleUse imin, maxBound) |
515 |
return Types.ISpec { Types.iSpecMemorySize = fromIntegral mem_s |
516 |
, Types.iSpecCpuCount = fromIntegral cpu_c |
517 |
, Types.iSpecDiskSize = fromIntegral dsk_s |
518 |
, Types.iSpecDiskCount = fromIntegral dsk_c |
519 |
, Types.iSpecNicCount = fromIntegral nic_c |
520 |
, Types.iSpecSpindleUse = fromIntegral su |
521 |
} |
522 |
|
523 |
instance Arbitrary Types.IPolicy where |
524 |
arbitrary = do |
525 |
imin <- arbitrary |
526 |
istd <- genBiggerISpec imin |
527 |
imax <- genBiggerISpec istd |
528 |
num_tmpl <- choose (0, length allDiskTemplates) |
529 |
dts <- genUniquesList num_tmpl |
530 |
vcpu_ratio <- choose (1.0, maxVcpuRatio) |
531 |
spindle_ratio <- choose (1.0, maxSpindleRatio) |
532 |
return Types.IPolicy { Types.iPolicyMinSpec = imin |
533 |
, Types.iPolicyStdSpec = istd |
534 |
, Types.iPolicyMaxSpec = imax |
535 |
, Types.iPolicyDiskTemplates = dts |
536 |
, Types.iPolicyVcpuRatio = vcpu_ratio |
537 |
, Types.iPolicySpindleRatio = spindle_ratio |
538 |
} |
539 |
|
540 |
instance Arbitrary Objects.Hypervisor where |
541 |
arbitrary = elements [minBound..maxBound] |
542 |
|
543 |
instance Arbitrary Objects.PartialNDParams where |
544 |
arbitrary = Objects.PartialNDParams <$> arbitrary <*> arbitrary |
545 |
|
546 |
instance Arbitrary Objects.Node where |
547 |
arbitrary = Objects.Node <$> getFQDN <*> getFQDN <*> getFQDN |
548 |
<*> arbitrary <*> arbitrary <*> arbitrary <*> getFQDN |
549 |
<*> arbitrary <*> arbitrary <*> arbitrary <*> arbitrary |
550 |
<*> arbitrary <*> arbitrary <*> getFQDN <*> arbitrary |
551 |
|
552 |
instance Arbitrary Rpc.RpcCallAllInstancesInfo where |
553 |
arbitrary = Rpc.RpcCallAllInstancesInfo <$> arbitrary |
554 |
|
555 |
instance Arbitrary Rpc.RpcCallInstanceList where |
556 |
arbitrary = Rpc.RpcCallInstanceList <$> arbitrary |
557 |
|
558 |
instance Arbitrary Rpc.RpcCallNodeInfo where |
559 |
arbitrary = Rpc.RpcCallNodeInfo <$> arbitrary <*> arbitrary |
560 |
|
561 |
-- | Custom 'Qlang.Filter' generator (top-level), which enforces a |
562 |
-- (sane) limit on the depth of the generated filters. |
563 |
genFilter :: Gen Qlang.Filter |
564 |
genFilter = choose (0, 10) >>= genFilter' |
565 |
|
566 |
-- | Custom generator for filters that correctly halves the state of |
567 |
-- the generators at each recursive step, per the QuickCheck |
568 |
-- documentation, in order not to run out of memory. |
569 |
genFilter' :: Int -> Gen Qlang.Filter |
570 |
genFilter' 0 = |
571 |
oneof [ return Qlang.EmptyFilter |
572 |
, Qlang.TrueFilter <$> getName |
573 |
, Qlang.EQFilter <$> getName <*> value |
574 |
, Qlang.LTFilter <$> getName <*> value |
575 |
, Qlang.GTFilter <$> getName <*> value |
576 |
, Qlang.LEFilter <$> getName <*> value |
577 |
, Qlang.GEFilter <$> getName <*> value |
578 |
, Qlang.RegexpFilter <$> getName <*> getName |
579 |
, Qlang.ContainsFilter <$> getName <*> value |
580 |
] |
581 |
where value = oneof [ Qlang.QuotedString <$> getName |
582 |
, Qlang.NumericValue <$> arbitrary |
583 |
] |
584 |
genFilter' n = do |
585 |
oneof [ Qlang.AndFilter <$> vectorOf n'' (genFilter' n') |
586 |
, Qlang.OrFilter <$> vectorOf n'' (genFilter' n') |
587 |
, Qlang.NotFilter <$> genFilter' n' |
588 |
] |
589 |
where n' = n `div` 2 -- sub-filter generator size |
590 |
n'' = max n' 2 -- but we don't want empty or 1-element lists, |
591 |
-- so use this for and/or filter list length |
592 |
|
593 |
instance Arbitrary Qlang.ItemType where |
594 |
arbitrary = elements [minBound..maxBound] |
595 |
|
596 |
-- * Actual tests |
597 |
|
598 |
-- ** Utils tests |
599 |
|
600 |
-- | Helper to generate a small string that doesn't contain commas. |
601 |
genNonCommaString :: Gen [Char] |
602 |
genNonCommaString = do |
603 |
size <- choose (0, 20) -- arbitrary max size |
604 |
vectorOf size (arbitrary `suchThat` ((/=) ',')) |
605 |
|
606 |
-- | If the list is not just an empty element, and if the elements do |
607 |
-- not contain commas, then join+split should be idempotent. |
608 |
prop_Utils_commaJoinSplit :: Property |
609 |
prop_Utils_commaJoinSplit = |
610 |
forAll (choose (0, 20)) $ \llen -> |
611 |
forAll (vectorOf llen genNonCommaString `suchThat` ((/=) [""])) $ \lst -> |
612 |
Utils.sepSplit ',' (Utils.commaJoin lst) ==? lst |
613 |
|
614 |
-- | Split and join should always be idempotent. |
615 |
prop_Utils_commaSplitJoin :: [Char] -> Property |
616 |
prop_Utils_commaSplitJoin s = |
617 |
Utils.commaJoin (Utils.sepSplit ',' s) ==? s |
618 |
|
619 |
-- | fromObjWithDefault, we test using the Maybe monad and an integer |
620 |
-- value. |
621 |
prop_Utils_fromObjWithDefault :: Integer -> String -> Bool |
622 |
prop_Utils_fromObjWithDefault def_value random_key = |
623 |
-- a missing key will be returned with the default |
624 |
JSON.fromObjWithDefault [] random_key def_value == Just def_value && |
625 |
-- a found key will be returned as is, not with default |
626 |
JSON.fromObjWithDefault [(random_key, J.showJSON def_value)] |
627 |
random_key (def_value+1) == Just def_value |
628 |
|
629 |
-- | Test that functional if' behaves like the syntactic sugar if. |
630 |
prop_Utils_if'if :: Bool -> Int -> Int -> Gen Prop |
631 |
prop_Utils_if'if cnd a b = |
632 |
Utils.if' cnd a b ==? if cnd then a else b |
633 |
|
634 |
-- | Test basic select functionality |
635 |
prop_Utils_select :: Int -- ^ Default result |
636 |
-> [Int] -- ^ List of False values |
637 |
-> [Int] -- ^ List of True values |
638 |
-> Gen Prop -- ^ Test result |
639 |
prop_Utils_select def lst1 lst2 = |
640 |
Utils.select def (flist ++ tlist) ==? expectedresult |
641 |
where expectedresult = Utils.if' (null lst2) def (head lst2) |
642 |
flist = zip (repeat False) lst1 |
643 |
tlist = zip (repeat True) lst2 |
644 |
|
645 |
-- | Test basic select functionality with undefined default |
646 |
prop_Utils_select_undefd :: [Int] -- ^ List of False values |
647 |
-> NonEmptyList Int -- ^ List of True values |
648 |
-> Gen Prop -- ^ Test result |
649 |
prop_Utils_select_undefd lst1 (NonEmpty lst2) = |
650 |
Utils.select undefined (flist ++ tlist) ==? head lst2 |
651 |
where flist = zip (repeat False) lst1 |
652 |
tlist = zip (repeat True) lst2 |
653 |
|
654 |
-- | Test basic select functionality with undefined list values |
655 |
prop_Utils_select_undefv :: [Int] -- ^ List of False values |
656 |
-> NonEmptyList Int -- ^ List of True values |
657 |
-> Gen Prop -- ^ Test result |
658 |
prop_Utils_select_undefv lst1 (NonEmpty lst2) = |
659 |
Utils.select undefined cndlist ==? head lst2 |
660 |
where flist = zip (repeat False) lst1 |
661 |
tlist = zip (repeat True) lst2 |
662 |
cndlist = flist ++ tlist ++ [undefined] |
663 |
|
664 |
prop_Utils_parseUnit :: NonNegative Int -> Property |
665 |
prop_Utils_parseUnit (NonNegative n) = |
666 |
Utils.parseUnit (show n) ==? Types.Ok n .&&. |
667 |
Utils.parseUnit (show n ++ "m") ==? Types.Ok n .&&. |
668 |
Utils.parseUnit (show n ++ "M") ==? Types.Ok (truncate n_mb::Int) .&&. |
669 |
Utils.parseUnit (show n ++ "g") ==? Types.Ok (n*1024) .&&. |
670 |
Utils.parseUnit (show n ++ "G") ==? Types.Ok (truncate n_gb::Int) .&&. |
671 |
Utils.parseUnit (show n ++ "t") ==? Types.Ok (n*1048576) .&&. |
672 |
Utils.parseUnit (show n ++ "T") ==? Types.Ok (truncate n_tb::Int) .&&. |
673 |
printTestCase "Internal error/overflow?" |
674 |
(n_mb >=0 && n_gb >= 0 && n_tb >= 0) .&&. |
675 |
property (Types.isBad (Utils.parseUnit (show n ++ "x")::Types.Result Int)) |
676 |
where n_mb = (fromIntegral n::Rational) * 1000 * 1000 / 1024 / 1024 |
677 |
n_gb = n_mb * 1000 |
678 |
n_tb = n_gb * 1000 |
679 |
|
680 |
-- | Test list for the Utils module. |
681 |
testSuite "Utils" |
682 |
[ 'prop_Utils_commaJoinSplit |
683 |
, 'prop_Utils_commaSplitJoin |
684 |
, 'prop_Utils_fromObjWithDefault |
685 |
, 'prop_Utils_if'if |
686 |
, 'prop_Utils_select |
687 |
, 'prop_Utils_select_undefd |
688 |
, 'prop_Utils_select_undefv |
689 |
, 'prop_Utils_parseUnit |
690 |
] |
691 |
|
692 |
-- ** PeerMap tests |
693 |
|
694 |
-- | Make sure add is idempotent. |
695 |
prop_PeerMap_addIdempotent :: PeerMap.PeerMap |
696 |
-> PeerMap.Key -> PeerMap.Elem -> Property |
697 |
prop_PeerMap_addIdempotent pmap key em = |
698 |
fn puniq ==? fn (fn puniq) |
699 |
where fn = PeerMap.add key em |
700 |
puniq = PeerMap.accumArray const pmap |
701 |
|
702 |
-- | Make sure remove is idempotent. |
703 |
prop_PeerMap_removeIdempotent :: PeerMap.PeerMap -> PeerMap.Key -> Property |
704 |
prop_PeerMap_removeIdempotent pmap key = |
705 |
fn puniq ==? fn (fn puniq) |
706 |
where fn = PeerMap.remove key |
707 |
puniq = PeerMap.accumArray const pmap |
708 |
|
709 |
-- | Make sure a missing item returns 0. |
710 |
prop_PeerMap_findMissing :: PeerMap.PeerMap -> PeerMap.Key -> Property |
711 |
prop_PeerMap_findMissing pmap key = |
712 |
PeerMap.find key (PeerMap.remove key puniq) ==? 0 |
713 |
where puniq = PeerMap.accumArray const pmap |
714 |
|
715 |
-- | Make sure an added item is found. |
716 |
prop_PeerMap_addFind :: PeerMap.PeerMap |
717 |
-> PeerMap.Key -> PeerMap.Elem -> Property |
718 |
prop_PeerMap_addFind pmap key em = |
719 |
PeerMap.find key (PeerMap.add key em puniq) ==? em |
720 |
where puniq = PeerMap.accumArray const pmap |
721 |
|
722 |
-- | Manual check that maxElem returns the maximum indeed, or 0 for null. |
723 |
prop_PeerMap_maxElem :: PeerMap.PeerMap -> Property |
724 |
prop_PeerMap_maxElem pmap = |
725 |
PeerMap.maxElem puniq ==? if null puniq then 0 |
726 |
else (maximum . snd . unzip) puniq |
727 |
where puniq = PeerMap.accumArray const pmap |
728 |
|
729 |
-- | List of tests for the PeerMap module. |
730 |
testSuite "PeerMap" |
731 |
[ 'prop_PeerMap_addIdempotent |
732 |
, 'prop_PeerMap_removeIdempotent |
733 |
, 'prop_PeerMap_maxElem |
734 |
, 'prop_PeerMap_addFind |
735 |
, 'prop_PeerMap_findMissing |
736 |
] |
737 |
|
738 |
-- ** Container tests |
739 |
|
740 |
-- we silence the following due to hlint bug fixed in later versions |
741 |
{-# ANN prop_Container_addTwo "HLint: ignore Avoid lambda" #-} |
742 |
prop_Container_addTwo :: [Container.Key] -> Int -> Int -> Bool |
743 |
prop_Container_addTwo cdata i1 i2 = |
744 |
fn i1 i2 cont == fn i2 i1 cont && |
745 |
fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont) |
746 |
where cont = foldl (\c x -> Container.add x x c) Container.empty cdata |
747 |
fn x1 x2 = Container.addTwo x1 x1 x2 x2 |
748 |
|
749 |
prop_Container_nameOf :: Node.Node -> Property |
750 |
prop_Container_nameOf node = |
751 |
let nl = makeSmallCluster node 1 |
752 |
fnode = head (Container.elems nl) |
753 |
in Container.nameOf nl (Node.idx fnode) ==? Node.name fnode |
754 |
|
755 |
-- | We test that in a cluster, given a random node, we can find it by |
756 |
-- its name and alias, as long as all names and aliases are unique, |
757 |
-- and that we fail to find a non-existing name. |
758 |
prop_Container_findByName :: Property |
759 |
prop_Container_findByName = |
760 |
forAll (genNode (Just 1) Nothing) $ \node -> |
761 |
forAll (choose (1, 20)) $ \ cnt -> |
762 |
forAll (choose (0, cnt - 1)) $ \ fidx -> |
763 |
forAll (genUniquesList (cnt * 2)) $ \ allnames -> |
764 |
forAll (arbitrary `suchThat` (`notElem` allnames)) $ \ othername -> |
765 |
let names = zip (take cnt allnames) (drop cnt allnames) |
766 |
nl = makeSmallCluster node cnt |
767 |
nodes = Container.elems nl |
768 |
nodes' = map (\((name, alias), nn) -> (Node.idx nn, |
769 |
nn { Node.name = name, |
770 |
Node.alias = alias })) |
771 |
$ zip names nodes |
772 |
nl' = Container.fromList nodes' |
773 |
target = snd (nodes' !! fidx) |
774 |
in Container.findByName nl' (Node.name target) ==? Just target .&&. |
775 |
Container.findByName nl' (Node.alias target) ==? Just target .&&. |
776 |
printTestCase "Found non-existing name" |
777 |
(isNothing (Container.findByName nl' othername)) |
778 |
|
779 |
testSuite "Container" |
780 |
[ 'prop_Container_addTwo |
781 |
, 'prop_Container_nameOf |
782 |
, 'prop_Container_findByName |
783 |
] |
784 |
|
785 |
-- ** Instance tests |
786 |
|
787 |
-- Simple instance tests, we only have setter/getters |
788 |
|
789 |
prop_Instance_creat :: Instance.Instance -> Property |
790 |
prop_Instance_creat inst = |
791 |
Instance.name inst ==? Instance.alias inst |
792 |
|
793 |
prop_Instance_setIdx :: Instance.Instance -> Types.Idx -> Property |
794 |
prop_Instance_setIdx inst idx = |
795 |
Instance.idx (Instance.setIdx inst idx) ==? idx |
796 |
|
797 |
prop_Instance_setName :: Instance.Instance -> String -> Bool |
798 |
prop_Instance_setName inst name = |
799 |
Instance.name newinst == name && |
800 |
Instance.alias newinst == name |
801 |
where newinst = Instance.setName inst name |
802 |
|
803 |
prop_Instance_setAlias :: Instance.Instance -> String -> Bool |
804 |
prop_Instance_setAlias inst name = |
805 |
Instance.name newinst == Instance.name inst && |
806 |
Instance.alias newinst == name |
807 |
where newinst = Instance.setAlias inst name |
808 |
|
809 |
prop_Instance_setPri :: Instance.Instance -> Types.Ndx -> Property |
810 |
prop_Instance_setPri inst pdx = |
811 |
Instance.pNode (Instance.setPri inst pdx) ==? pdx |
812 |
|
813 |
prop_Instance_setSec :: Instance.Instance -> Types.Ndx -> Property |
814 |
prop_Instance_setSec inst sdx = |
815 |
Instance.sNode (Instance.setSec inst sdx) ==? sdx |
816 |
|
817 |
prop_Instance_setBoth :: Instance.Instance -> Types.Ndx -> Types.Ndx -> Bool |
818 |
prop_Instance_setBoth inst pdx sdx = |
819 |
Instance.pNode si == pdx && Instance.sNode si == sdx |
820 |
where si = Instance.setBoth inst pdx sdx |
821 |
|
822 |
prop_Instance_shrinkMG :: Instance.Instance -> Property |
823 |
prop_Instance_shrinkMG inst = |
824 |
Instance.mem inst >= 2 * Types.unitMem ==> |
825 |
case Instance.shrinkByType inst Types.FailMem of |
826 |
Types.Ok inst' -> Instance.mem inst' == Instance.mem inst - Types.unitMem |
827 |
_ -> False |
828 |
|
829 |
prop_Instance_shrinkMF :: Instance.Instance -> Property |
830 |
prop_Instance_shrinkMF inst = |
831 |
forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem -> |
832 |
let inst' = inst { Instance.mem = mem} |
833 |
in Types.isBad $ Instance.shrinkByType inst' Types.FailMem |
834 |
|
835 |
prop_Instance_shrinkCG :: Instance.Instance -> Property |
836 |
prop_Instance_shrinkCG inst = |
837 |
Instance.vcpus inst >= 2 * Types.unitCpu ==> |
838 |
case Instance.shrinkByType inst Types.FailCPU of |
839 |
Types.Ok inst' -> |
840 |
Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu |
841 |
_ -> False |
842 |
|
843 |
prop_Instance_shrinkCF :: Instance.Instance -> Property |
844 |
prop_Instance_shrinkCF inst = |
845 |
forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus -> |
846 |
let inst' = inst { Instance.vcpus = vcpus } |
847 |
in Types.isBad $ Instance.shrinkByType inst' Types.FailCPU |
848 |
|
849 |
prop_Instance_shrinkDG :: Instance.Instance -> Property |
850 |
prop_Instance_shrinkDG inst = |
851 |
Instance.dsk inst >= 2 * Types.unitDsk ==> |
852 |
case Instance.shrinkByType inst Types.FailDisk of |
853 |
Types.Ok inst' -> |
854 |
Instance.dsk inst' == Instance.dsk inst - Types.unitDsk |
855 |
_ -> False |
856 |
|
857 |
prop_Instance_shrinkDF :: Instance.Instance -> Property |
858 |
prop_Instance_shrinkDF inst = |
859 |
forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk -> |
860 |
let inst' = inst { Instance.dsk = dsk } |
861 |
in Types.isBad $ Instance.shrinkByType inst' Types.FailDisk |
862 |
|
863 |
prop_Instance_setMovable :: Instance.Instance -> Bool -> Property |
864 |
prop_Instance_setMovable inst m = |
865 |
Instance.movable inst' ==? m |
866 |
where inst' = Instance.setMovable inst m |
867 |
|
868 |
testSuite "Instance" |
869 |
[ 'prop_Instance_creat |
870 |
, 'prop_Instance_setIdx |
871 |
, 'prop_Instance_setName |
872 |
, 'prop_Instance_setAlias |
873 |
, 'prop_Instance_setPri |
874 |
, 'prop_Instance_setSec |
875 |
, 'prop_Instance_setBoth |
876 |
, 'prop_Instance_shrinkMG |
877 |
, 'prop_Instance_shrinkMF |
878 |
, 'prop_Instance_shrinkCG |
879 |
, 'prop_Instance_shrinkCF |
880 |
, 'prop_Instance_shrinkDG |
881 |
, 'prop_Instance_shrinkDF |
882 |
, 'prop_Instance_setMovable |
883 |
] |
884 |
|
885 |
-- ** Backends |
886 |
|
887 |
-- *** Text backend tests |
888 |
|
889 |
-- Instance text loader tests |
890 |
|
891 |
prop_Text_Load_Instance :: String -> Int -> Int -> Int -> Types.InstanceStatus |
892 |
-> NonEmptyList Char -> [Char] |
893 |
-> NonNegative Int -> NonNegative Int -> Bool |
894 |
-> Types.DiskTemplate -> Int -> Property |
895 |
prop_Text_Load_Instance name mem dsk vcpus status |
896 |
(NonEmpty pnode) snode |
897 |
(NonNegative pdx) (NonNegative sdx) autobal dt su = |
898 |
pnode /= snode && pdx /= sdx ==> |
899 |
let vcpus_s = show vcpus |
900 |
dsk_s = show dsk |
901 |
mem_s = show mem |
902 |
su_s = show su |
903 |
status_s = Types.instanceStatusToRaw status |
904 |
ndx = if null snode |
905 |
then [(pnode, pdx)] |
906 |
else [(pnode, pdx), (snode, sdx)] |
907 |
nl = Data.Map.fromList ndx |
908 |
tags = "" |
909 |
sbal = if autobal then "Y" else "N" |
910 |
sdt = Types.diskTemplateToRaw dt |
911 |
inst = Text.loadInst nl |
912 |
[name, mem_s, dsk_s, vcpus_s, status_s, |
913 |
sbal, pnode, snode, sdt, tags, su_s] |
914 |
fail1 = Text.loadInst nl |
915 |
[name, mem_s, dsk_s, vcpus_s, status_s, |
916 |
sbal, pnode, pnode, tags] |
917 |
in case inst of |
918 |
Types.Bad msg -> failTest $ "Failed to load instance: " ++ msg |
919 |
Types.Ok (_, i) -> printTestCase "Mismatch in some field while\ |
920 |
\ loading the instance" $ |
921 |
Instance.name i == name && |
922 |
Instance.vcpus i == vcpus && |
923 |
Instance.mem i == mem && |
924 |
Instance.pNode i == pdx && |
925 |
Instance.sNode i == (if null snode |
926 |
then Node.noSecondary |
927 |
else sdx) && |
928 |
Instance.autoBalance i == autobal && |
929 |
Instance.spindleUse i == su && |
930 |
Types.isBad fail1 |
931 |
|
932 |
prop_Text_Load_InstanceFail :: [(String, Int)] -> [String] -> Property |
933 |
prop_Text_Load_InstanceFail ktn fields = |
934 |
length fields /= 10 && length fields /= 11 ==> |
935 |
case Text.loadInst nl fields of |
936 |
Types.Ok _ -> failTest "Managed to load instance from invalid data" |
937 |
Types.Bad msg -> printTestCase ("Unrecognised error message: " ++ msg) $ |
938 |
"Invalid/incomplete instance data: '" `isPrefixOf` msg |
939 |
where nl = Data.Map.fromList ktn |
940 |
|
941 |
prop_Text_Load_Node :: String -> Int -> Int -> Int -> Int -> Int |
942 |
-> Int -> Bool -> Bool |
943 |
prop_Text_Load_Node name tm nm fm td fd tc fo = |
944 |
let conv v = if v < 0 |
945 |
then "?" |
946 |
else show v |
947 |
tm_s = conv tm |
948 |
nm_s = conv nm |
949 |
fm_s = conv fm |
950 |
td_s = conv td |
951 |
fd_s = conv fd |
952 |
tc_s = conv tc |
953 |
fo_s = if fo |
954 |
then "Y" |
955 |
else "N" |
956 |
any_broken = any (< 0) [tm, nm, fm, td, fd, tc] |
957 |
gid = Group.uuid defGroup |
958 |
in case Text.loadNode defGroupAssoc |
959 |
[name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of |
960 |
Nothing -> False |
961 |
Just (name', node) -> |
962 |
if fo || any_broken |
963 |
then Node.offline node |
964 |
else Node.name node == name' && name' == name && |
965 |
Node.alias node == name && |
966 |
Node.tMem node == fromIntegral tm && |
967 |
Node.nMem node == nm && |
968 |
Node.fMem node == fm && |
969 |
Node.tDsk node == fromIntegral td && |
970 |
Node.fDsk node == fd && |
971 |
Node.tCpu node == fromIntegral tc |
972 |
|
973 |
prop_Text_Load_NodeFail :: [String] -> Property |
974 |
prop_Text_Load_NodeFail fields = |
975 |
length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields |
976 |
|
977 |
prop_Text_NodeLSIdempotent :: Property |
978 |
prop_Text_NodeLSIdempotent = |
979 |
forAll (genNode (Just 1) Nothing) $ \node -> |
980 |
-- override failN1 to what loadNode returns by default |
981 |
let n = Node.setPolicy Types.defIPolicy $ |
982 |
node { Node.failN1 = True, Node.offline = False } |
983 |
in |
984 |
(Text.loadNode defGroupAssoc. |
985 |
Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==? |
986 |
Just (Node.name n, n) |
987 |
|
988 |
prop_Text_ISpecIdempotent :: Types.ISpec -> Property |
989 |
prop_Text_ISpecIdempotent ispec = |
990 |
case Text.loadISpec "dummy" . Utils.sepSplit ',' . |
991 |
Text.serializeISpec $ ispec of |
992 |
Types.Bad msg -> failTest $ "Failed to load ispec: " ++ msg |
993 |
Types.Ok ispec' -> ispec ==? ispec' |
994 |
|
995 |
prop_Text_IPolicyIdempotent :: Types.IPolicy -> Property |
996 |
prop_Text_IPolicyIdempotent ipol = |
997 |
case Text.loadIPolicy . Utils.sepSplit '|' $ |
998 |
Text.serializeIPolicy owner ipol of |
999 |
Types.Bad msg -> failTest $ "Failed to load ispec: " ++ msg |
1000 |
Types.Ok res -> (owner, ipol) ==? res |
1001 |
where owner = "dummy" |
1002 |
|
1003 |
-- | This property, while being in the text tests, does more than just |
1004 |
-- test end-to-end the serialisation and loading back workflow; it |
1005 |
-- also tests the Loader.mergeData and the actuall |
1006 |
-- Cluster.iterateAlloc (for well-behaving w.r.t. instance |
1007 |
-- allocations, not for the business logic). As such, it's a quite |
1008 |
-- complex and slow test, and that's the reason we restrict it to |
1009 |
-- small cluster sizes. |
1010 |
prop_Text_CreateSerialise :: Property |
1011 |
prop_Text_CreateSerialise = |
1012 |
forAll genTags $ \ctags -> |
1013 |
forAll (choose (1, 20)) $ \maxiter -> |
1014 |
forAll (choose (2, 10)) $ \count -> |
1015 |
forAll genOnlineNode $ \node -> |
1016 |
forAll (genInstanceSmallerThanNode node) $ \inst -> |
1017 |
let nl = makeSmallCluster node count |
1018 |
reqnodes = Instance.requiredNodes $ Instance.diskTemplate inst |
1019 |
in case Cluster.genAllocNodes defGroupList nl reqnodes True >>= \allocn -> |
1020 |
Cluster.iterateAlloc nl Container.empty (Just maxiter) inst allocn [] [] |
1021 |
of |
1022 |
Types.Bad msg -> failTest $ "Failed to allocate: " ++ msg |
1023 |
Types.Ok (_, _, _, [], _) -> printTestCase |
1024 |
"Failed to allocate: no allocations" False |
1025 |
Types.Ok (_, nl', il', _, _) -> |
1026 |
let cdata = Loader.ClusterData defGroupList nl' il' ctags |
1027 |
Types.defIPolicy |
1028 |
saved = Text.serializeCluster cdata |
1029 |
in case Text.parseData saved >>= Loader.mergeData [] [] [] [] of |
1030 |
Types.Bad msg -> failTest $ "Failed to load/merge: " ++ msg |
1031 |
Types.Ok (Loader.ClusterData gl2 nl2 il2 ctags2 cpol2) -> |
1032 |
ctags ==? ctags2 .&&. |
1033 |
Types.defIPolicy ==? cpol2 .&&. |
1034 |
il' ==? il2 .&&. |
1035 |
defGroupList ==? gl2 .&&. |
1036 |
nl' ==? nl2 |
1037 |
|
1038 |
testSuite "Text" |
1039 |
[ 'prop_Text_Load_Instance |
1040 |
, 'prop_Text_Load_InstanceFail |
1041 |
, 'prop_Text_Load_Node |
1042 |
, 'prop_Text_Load_NodeFail |
1043 |
, 'prop_Text_NodeLSIdempotent |
1044 |
, 'prop_Text_ISpecIdempotent |
1045 |
, 'prop_Text_IPolicyIdempotent |
1046 |
, 'prop_Text_CreateSerialise |
1047 |
] |
1048 |
|
1049 |
-- *** Simu backend |
1050 |
|
1051 |
-- | Generates a tuple of specs for simulation. |
1052 |
genSimuSpec :: Gen (String, Int, Int, Int, Int) |
1053 |
genSimuSpec = do |
1054 |
pol <- elements [C.allocPolicyPreferred, |
1055 |
C.allocPolicyLastResort, C.allocPolicyUnallocable, |
1056 |
"p", "a", "u"] |
1057 |
-- should be reasonable (nodes/group), bigger values only complicate |
1058 |
-- the display of failed tests, and we don't care (in this particular |
1059 |
-- test) about big node groups |
1060 |
nodes <- choose (0, 20) |
1061 |
dsk <- choose (0, maxDsk) |
1062 |
mem <- choose (0, maxMem) |
1063 |
cpu <- choose (0, maxCpu) |
1064 |
return (pol, nodes, dsk, mem, cpu) |
1065 |
|
1066 |
-- | Checks that given a set of corrects specs, we can load them |
1067 |
-- successfully, and that at high-level the values look right. |
1068 |
prop_SimuLoad :: Property |
1069 |
prop_SimuLoad = |
1070 |
forAll (choose (0, 10)) $ \ngroups -> |
1071 |
forAll (replicateM ngroups genSimuSpec) $ \specs -> |
1072 |
let strspecs = map (\(p, n, d, m, c) -> printf "%s,%d,%d,%d,%d" |
1073 |
p n d m c::String) specs |
1074 |
totnodes = sum $ map (\(_, n, _, _, _) -> n) specs |
1075 |
mdc_in = concatMap (\(_, n, d, m, c) -> |
1076 |
replicate n (fromIntegral m, fromIntegral d, |
1077 |
fromIntegral c, |
1078 |
fromIntegral m, fromIntegral d)) |
1079 |
specs :: [(Double, Double, Double, Int, Int)] |
1080 |
in case Simu.parseData strspecs of |
1081 |
Types.Bad msg -> failTest $ "Failed to load specs: " ++ msg |
1082 |
Types.Ok (Loader.ClusterData gl nl il tags ipol) -> |
1083 |
let nodes = map snd $ IntMap.toAscList nl |
1084 |
nidx = map Node.idx nodes |
1085 |
mdc_out = map (\n -> (Node.tMem n, Node.tDsk n, Node.tCpu n, |
1086 |
Node.fMem n, Node.fDsk n)) nodes |
1087 |
in |
1088 |
Container.size gl ==? ngroups .&&. |
1089 |
Container.size nl ==? totnodes .&&. |
1090 |
Container.size il ==? 0 .&&. |
1091 |
length tags ==? 0 .&&. |
1092 |
ipol ==? Types.defIPolicy .&&. |
1093 |
nidx ==? [1..totnodes] .&&. |
1094 |
mdc_in ==? mdc_out .&&. |
1095 |
map Group.iPolicy (Container.elems gl) ==? |
1096 |
replicate ngroups Types.defIPolicy |
1097 |
|
1098 |
testSuite "Simu" |
1099 |
[ 'prop_SimuLoad |
1100 |
] |
1101 |
|
1102 |
-- ** Node tests |
1103 |
|
1104 |
prop_Node_setAlias :: Node.Node -> String -> Bool |
1105 |
prop_Node_setAlias node name = |
1106 |
Node.name newnode == Node.name node && |
1107 |
Node.alias newnode == name |
1108 |
where newnode = Node.setAlias node name |
1109 |
|
1110 |
prop_Node_setOffline :: Node.Node -> Bool -> Property |
1111 |
prop_Node_setOffline node status = |
1112 |
Node.offline newnode ==? status |
1113 |
where newnode = Node.setOffline node status |
1114 |
|
1115 |
prop_Node_setXmem :: Node.Node -> Int -> Property |
1116 |
prop_Node_setXmem node xm = |
1117 |
Node.xMem newnode ==? xm |
1118 |
where newnode = Node.setXmem node xm |
1119 |
|
1120 |
prop_Node_setMcpu :: Node.Node -> Double -> Property |
1121 |
prop_Node_setMcpu node mc = |
1122 |
Types.iPolicyVcpuRatio (Node.iPolicy newnode) ==? mc |
1123 |
where newnode = Node.setMcpu node mc |
1124 |
|
1125 |
-- | Check that an instance add with too high memory or disk will be |
1126 |
-- rejected. |
1127 |
prop_Node_addPriFM :: Node.Node -> Instance.Instance -> Property |
1128 |
prop_Node_addPriFM node inst = |
1129 |
Instance.mem inst >= Node.fMem node && not (Node.failN1 node) && |
1130 |
not (Instance.isOffline inst) ==> |
1131 |
case Node.addPri node inst'' of |
1132 |
Types.OpFail Types.FailMem -> True |
1133 |
_ -> False |
1134 |
where inst' = setInstanceSmallerThanNode node inst |
1135 |
inst'' = inst' { Instance.mem = Instance.mem inst } |
1136 |
|
1137 |
-- | Check that adding a primary instance with too much disk fails |
1138 |
-- with type FailDisk. |
1139 |
prop_Node_addPriFD :: Node.Node -> Instance.Instance -> Property |
1140 |
prop_Node_addPriFD node inst = |
1141 |
forAll (elements Instance.localStorageTemplates) $ \dt -> |
1142 |
Instance.dsk inst >= Node.fDsk node && not (Node.failN1 node) ==> |
1143 |
let inst' = setInstanceSmallerThanNode node inst |
1144 |
inst'' = inst' { Instance.dsk = Instance.dsk inst |
1145 |
, Instance.diskTemplate = dt } |
1146 |
in case Node.addPri node inst'' of |
1147 |
Types.OpFail Types.FailDisk -> True |
1148 |
_ -> False |
1149 |
|
1150 |
-- | Check that adding a primary instance with too many VCPUs fails |
1151 |
-- with type FailCPU. |
1152 |
prop_Node_addPriFC :: Property |
1153 |
prop_Node_addPriFC = |
1154 |
forAll (choose (1, maxCpu)) $ \extra -> |
1155 |
forAll genOnlineNode $ \node -> |
1156 |
forAll (arbitrary `suchThat` Instance.notOffline) $ \inst -> |
1157 |
let inst' = setInstanceSmallerThanNode node inst |
1158 |
inst'' = inst' { Instance.vcpus = Node.availCpu node + extra } |
1159 |
in case Node.addPri node inst'' of |
1160 |
Types.OpFail Types.FailCPU -> property True |
1161 |
v -> failTest $ "Expected OpFail FailCPU, but got " ++ show v |
1162 |
|
1163 |
-- | Check that an instance add with too high memory or disk will be |
1164 |
-- rejected. |
1165 |
prop_Node_addSec :: Node.Node -> Instance.Instance -> Int -> Property |
1166 |
prop_Node_addSec node inst pdx = |
1167 |
((Instance.mem inst >= (Node.fMem node - Node.rMem node) && |
1168 |
not (Instance.isOffline inst)) || |
1169 |
Instance.dsk inst >= Node.fDsk node) && |
1170 |
not (Node.failN1 node) ==> |
1171 |
isFailure (Node.addSec node inst pdx) |
1172 |
|
1173 |
-- | Check that an offline instance with reasonable disk size but |
1174 |
-- extra mem/cpu can always be added. |
1175 |
prop_Node_addOfflinePri :: NonNegative Int -> NonNegative Int -> Property |
1176 |
prop_Node_addOfflinePri (NonNegative extra_mem) (NonNegative extra_cpu) = |
1177 |
forAll genOnlineNode $ \node -> |
1178 |
forAll (genInstanceSmallerThanNode node) $ \inst -> |
1179 |
let inst' = inst { Instance.runSt = Types.AdminOffline |
1180 |
, Instance.mem = Node.availMem node + extra_mem |
1181 |
, Instance.vcpus = Node.availCpu node + extra_cpu } |
1182 |
in case Node.addPri node inst' of |
1183 |
Types.OpGood _ -> property True |
1184 |
v -> failTest $ "Expected OpGood, but got: " ++ show v |
1185 |
|
1186 |
-- | Check that an offline instance with reasonable disk size but |
1187 |
-- extra mem/cpu can always be added. |
1188 |
prop_Node_addOfflineSec :: NonNegative Int -> NonNegative Int |
1189 |
-> Types.Ndx -> Property |
1190 |
prop_Node_addOfflineSec (NonNegative extra_mem) (NonNegative extra_cpu) pdx = |
1191 |
forAll genOnlineNode $ \node -> |
1192 |
forAll (genInstanceSmallerThanNode node) $ \inst -> |
1193 |
let inst' = inst { Instance.runSt = Types.AdminOffline |
1194 |
, Instance.mem = Node.availMem node + extra_mem |
1195 |
, Instance.vcpus = Node.availCpu node + extra_cpu |
1196 |
, Instance.diskTemplate = Types.DTDrbd8 } |
1197 |
in case Node.addSec node inst' pdx of |
1198 |
Types.OpGood _ -> property True |
1199 |
v -> failTest $ "Expected OpGood/OpGood, but got: " ++ show v |
1200 |
|
1201 |
-- | Checks for memory reservation changes. |
1202 |
prop_Node_rMem :: Instance.Instance -> Property |
1203 |
prop_Node_rMem inst = |
1204 |
not (Instance.isOffline inst) ==> |
1205 |
forAll (genOnlineNode `suchThat` ((> Types.unitMem) . Node.fMem)) $ \node -> |
1206 |
-- ab = auto_balance, nb = non-auto_balance |
1207 |
-- we use -1 as the primary node of the instance |
1208 |
let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True |
1209 |
, Instance.diskTemplate = Types.DTDrbd8 } |
1210 |
inst_ab = setInstanceSmallerThanNode node inst' |
1211 |
inst_nb = inst_ab { Instance.autoBalance = False } |
1212 |
-- now we have the two instances, identical except the |
1213 |
-- autoBalance attribute |
1214 |
orig_rmem = Node.rMem node |
1215 |
inst_idx = Instance.idx inst_ab |
1216 |
node_add_ab = Node.addSec node inst_ab (-1) |
1217 |
node_add_nb = Node.addSec node inst_nb (-1) |
1218 |
node_del_ab = liftM (`Node.removeSec` inst_ab) node_add_ab |
1219 |
node_del_nb = liftM (`Node.removeSec` inst_nb) node_add_nb |
1220 |
in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of |
1221 |
(Types.OpGood a_ab, Types.OpGood a_nb, |
1222 |
Types.OpGood d_ab, Types.OpGood d_nb) -> |
1223 |
printTestCase "Consistency checks failed" $ |
1224 |
Node.rMem a_ab > orig_rmem && |
1225 |
Node.rMem a_ab - orig_rmem == Instance.mem inst_ab && |
1226 |
Node.rMem a_nb == orig_rmem && |
1227 |
Node.rMem d_ab == orig_rmem && |
1228 |
Node.rMem d_nb == orig_rmem && |
1229 |
-- this is not related to rMem, but as good a place to |
1230 |
-- test as any |
1231 |
inst_idx `elem` Node.sList a_ab && |
1232 |
inst_idx `notElem` Node.sList d_ab |
1233 |
x -> failTest $ "Failed to add/remove instances: " ++ show x |
1234 |
|
1235 |
-- | Check mdsk setting. |
1236 |
prop_Node_setMdsk :: Node.Node -> SmallRatio -> Bool |
1237 |
prop_Node_setMdsk node mx = |
1238 |
Node.loDsk node' >= 0 && |
1239 |
fromIntegral (Node.loDsk node') <= Node.tDsk node && |
1240 |
Node.availDisk node' >= 0 && |
1241 |
Node.availDisk node' <= Node.fDsk node' && |
1242 |
fromIntegral (Node.availDisk node') <= Node.tDsk node' && |
1243 |
Node.mDsk node' == mx' |
1244 |
where node' = Node.setMdsk node mx' |
1245 |
SmallRatio mx' = mx |
1246 |
|
1247 |
-- Check tag maps |
1248 |
prop_Node_tagMaps_idempotent :: Property |
1249 |
prop_Node_tagMaps_idempotent = |
1250 |
forAll genTags $ \tags -> |
1251 |
Node.delTags (Node.addTags m tags) tags ==? m |
1252 |
where m = Data.Map.empty |
1253 |
|
1254 |
prop_Node_tagMaps_reject :: Property |
1255 |
prop_Node_tagMaps_reject = |
1256 |
forAll (genTags `suchThat` (not . null)) $ \tags -> |
1257 |
let m = Node.addTags Data.Map.empty tags |
1258 |
in all (\t -> Node.rejectAddTags m [t]) tags |
1259 |
|
1260 |
prop_Node_showField :: Node.Node -> Property |
1261 |
prop_Node_showField node = |
1262 |
forAll (elements Node.defaultFields) $ \ field -> |
1263 |
fst (Node.showHeader field) /= Types.unknownField && |
1264 |
Node.showField node field /= Types.unknownField |
1265 |
|
1266 |
prop_Node_computeGroups :: [Node.Node] -> Bool |
1267 |
prop_Node_computeGroups nodes = |
1268 |
let ng = Node.computeGroups nodes |
1269 |
onlyuuid = map fst ng |
1270 |
in length nodes == sum (map (length . snd) ng) && |
1271 |
all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng && |
1272 |
length (nub onlyuuid) == length onlyuuid && |
1273 |
(null nodes || not (null ng)) |
1274 |
|
1275 |
-- Check idempotence of add/remove operations |
1276 |
prop_Node_addPri_idempotent :: Property |
1277 |
prop_Node_addPri_idempotent = |
1278 |
forAll genOnlineNode $ \node -> |
1279 |
forAll (genInstanceSmallerThanNode node) $ \inst -> |
1280 |
case Node.addPri node inst of |
1281 |
Types.OpGood node' -> Node.removePri node' inst ==? node |
1282 |
_ -> failTest "Can't add instance" |
1283 |
|
1284 |
prop_Node_addSec_idempotent :: Property |
1285 |
prop_Node_addSec_idempotent = |
1286 |
forAll genOnlineNode $ \node -> |
1287 |
forAll (genInstanceSmallerThanNode node) $ \inst -> |
1288 |
let pdx = Node.idx node + 1 |
1289 |
inst' = Instance.setPri inst pdx |
1290 |
inst'' = inst' { Instance.diskTemplate = Types.DTDrbd8 } |
1291 |
in case Node.addSec node inst'' pdx of |
1292 |
Types.OpGood node' -> Node.removeSec node' inst'' ==? node |
1293 |
_ -> failTest "Can't add instance" |
1294 |
|
1295 |
testSuite "Node" |
1296 |
[ 'prop_Node_setAlias |
1297 |
, 'prop_Node_setOffline |
1298 |
, 'prop_Node_setMcpu |
1299 |
, 'prop_Node_setXmem |
1300 |
, 'prop_Node_addPriFM |
1301 |
, 'prop_Node_addPriFD |
1302 |
, 'prop_Node_addPriFC |
1303 |
, 'prop_Node_addSec |
1304 |
, 'prop_Node_addOfflinePri |
1305 |
, 'prop_Node_addOfflineSec |
1306 |
, 'prop_Node_rMem |
1307 |
, 'prop_Node_setMdsk |
1308 |
, 'prop_Node_tagMaps_idempotent |
1309 |
, 'prop_Node_tagMaps_reject |
1310 |
, 'prop_Node_showField |
1311 |
, 'prop_Node_computeGroups |
1312 |
, 'prop_Node_addPri_idempotent |
1313 |
, 'prop_Node_addSec_idempotent |
1314 |
] |
1315 |
|
1316 |
-- ** Cluster tests |
1317 |
|
1318 |
-- | Check that the cluster score is close to zero for a homogeneous |
1319 |
-- cluster. |
1320 |
prop_Score_Zero :: Node.Node -> Property |
1321 |
prop_Score_Zero node = |
1322 |
forAll (choose (1, 1024)) $ \count -> |
1323 |
(not (Node.offline node) && not (Node.failN1 node) && (count > 0) && |
1324 |
(Node.tDsk node > 0) && (Node.tMem node > 0)) ==> |
1325 |
let fn = Node.buildPeers node Container.empty |
1326 |
nlst = replicate count fn |
1327 |
score = Cluster.compCVNodes nlst |
1328 |
-- we can't say == 0 here as the floating point errors accumulate; |
1329 |
-- this should be much lower than the default score in CLI.hs |
1330 |
in score <= 1e-12 |
1331 |
|
1332 |
-- | Check that cluster stats are sane. |
1333 |
prop_CStats_sane :: Property |
1334 |
prop_CStats_sane = |
1335 |
forAll (choose (1, 1024)) $ \count -> |
1336 |
forAll genOnlineNode $ \node -> |
1337 |
let fn = Node.buildPeers node Container.empty |
1338 |
nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)] |
1339 |
nl = Container.fromList nlst |
1340 |
cstats = Cluster.totalResources nl |
1341 |
in Cluster.csAdsk cstats >= 0 && |
1342 |
Cluster.csAdsk cstats <= Cluster.csFdsk cstats |
1343 |
|
1344 |
-- | Check that one instance is allocated correctly, without |
1345 |
-- rebalances needed. |
1346 |
prop_ClusterAlloc_sane :: Instance.Instance -> Property |
1347 |
prop_ClusterAlloc_sane inst = |
1348 |
forAll (choose (5, 20)) $ \count -> |
1349 |
forAll genOnlineNode $ \node -> |
1350 |
let (nl, il, inst') = makeSmallEmptyCluster node count inst |
1351 |
reqnodes = Instance.requiredNodes $ Instance.diskTemplate inst |
1352 |
in case Cluster.genAllocNodes defGroupList nl reqnodes True >>= |
1353 |
Cluster.tryAlloc nl il inst' of |
1354 |
Types.Bad _ -> False |
1355 |
Types.Ok as -> |
1356 |
case Cluster.asSolution as of |
1357 |
Nothing -> False |
1358 |
Just (xnl, xi, _, cv) -> |
1359 |
let il' = Container.add (Instance.idx xi) xi il |
1360 |
tbl = Cluster.Table xnl il' cv [] |
1361 |
in not (canBalance tbl True True False) |
1362 |
|
1363 |
-- | Checks that on a 2-5 node cluster, we can allocate a random |
1364 |
-- instance spec via tiered allocation (whatever the original instance |
1365 |
-- spec), on either one or two nodes. Furthermore, we test that |
1366 |
-- computed allocation statistics are correct. |
1367 |
prop_ClusterCanTieredAlloc :: Instance.Instance -> Property |
1368 |
prop_ClusterCanTieredAlloc inst = |
1369 |
forAll (choose (2, 5)) $ \count -> |
1370 |
forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node -> |
1371 |
let nl = makeSmallCluster node count |
1372 |
il = Container.empty |
1373 |
rqnodes = Instance.requiredNodes $ Instance.diskTemplate inst |
1374 |
allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True |
1375 |
in case allocnodes >>= \allocnodes' -> |
1376 |
Cluster.tieredAlloc nl il (Just 1) inst allocnodes' [] [] of |
1377 |
Types.Bad msg -> failTest $ "Failed to tiered alloc: " ++ msg |
1378 |
Types.Ok (_, nl', il', ixes, cstats) -> |
1379 |
let (ai_alloc, ai_pool, ai_unav) = |
1380 |
Cluster.computeAllocationDelta |
1381 |
(Cluster.totalResources nl) |
1382 |
(Cluster.totalResources nl') |
1383 |
all_nodes = Container.elems nl |
1384 |
in property (not (null ixes)) .&&. |
1385 |
IntMap.size il' ==? length ixes .&&. |
1386 |
length ixes ==? length cstats .&&. |
1387 |
sum (map Types.allocInfoVCpus [ai_alloc, ai_pool, ai_unav]) ==? |
1388 |
sum (map Node.hiCpu all_nodes) .&&. |
1389 |
sum (map Types.allocInfoNCpus [ai_alloc, ai_pool, ai_unav]) ==? |
1390 |
sum (map Node.tCpu all_nodes) .&&. |
1391 |
sum (map Types.allocInfoMem [ai_alloc, ai_pool, ai_unav]) ==? |
1392 |
truncate (sum (map Node.tMem all_nodes)) .&&. |
1393 |
sum (map Types.allocInfoDisk [ai_alloc, ai_pool, ai_unav]) ==? |
1394 |
truncate (sum (map Node.tDsk all_nodes)) |
1395 |
|
1396 |
-- | Helper function to create a cluster with the given range of nodes |
1397 |
-- and allocate an instance on it. |
1398 |
genClusterAlloc :: Int -> Node.Node -> Instance.Instance |
1399 |
-> Types.Result (Node.List, Instance.List, Instance.Instance) |
1400 |
genClusterAlloc count node inst = |
1401 |
let nl = makeSmallCluster node count |
1402 |
reqnodes = Instance.requiredNodes $ Instance.diskTemplate inst |
1403 |
in case Cluster.genAllocNodes defGroupList nl reqnodes True >>= |
1404 |
Cluster.tryAlloc nl Container.empty inst of |
1405 |
Types.Bad _ -> Types.Bad "Can't allocate" |
1406 |
Types.Ok as -> |
1407 |
case Cluster.asSolution as of |
1408 |
Nothing -> Types.Bad "Empty solution?" |
1409 |
Just (xnl, xi, _, _) -> |
1410 |
let xil = Container.add (Instance.idx xi) xi Container.empty |
1411 |
in Types.Ok (xnl, xil, xi) |
1412 |
|
1413 |
-- | Checks that on a 4-8 node cluster, once we allocate an instance, |
1414 |
-- we can also relocate it. |
1415 |
prop_ClusterAllocRelocate :: Property |
1416 |
prop_ClusterAllocRelocate = |
1417 |
forAll (choose (4, 8)) $ \count -> |
1418 |
forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node -> |
1419 |
forAll (genInstanceSmallerThanNode node `suchThat` isMirrored) $ \inst -> |
1420 |
case genClusterAlloc count node inst of |
1421 |
Types.Bad msg -> failTest msg |
1422 |
Types.Ok (nl, il, inst') -> |
1423 |
case IAlloc.processRelocate defGroupList nl il |
1424 |
(Instance.idx inst) 1 |
1425 |
[(if Instance.diskTemplate inst' == Types.DTDrbd8 |
1426 |
then Instance.sNode |
1427 |
else Instance.pNode) inst'] of |
1428 |
Types.Ok _ -> property True |
1429 |
Types.Bad msg -> failTest $ "Failed to relocate: " ++ msg |
1430 |
|
1431 |
-- | Helper property checker for the result of a nodeEvac or |
1432 |
-- changeGroup operation. |
1433 |
check_EvacMode :: Group.Group -> Instance.Instance |
1434 |
-> Types.Result (Node.List, Instance.List, Cluster.EvacSolution) |
1435 |
-> Property |
1436 |
check_EvacMode grp inst result = |
1437 |
case result of |
1438 |
Types.Bad msg -> failTest $ "Couldn't evacuate/change group:" ++ msg |
1439 |
Types.Ok (_, _, es) -> |
1440 |
let moved = Cluster.esMoved es |
1441 |
failed = Cluster.esFailed es |
1442 |
opcodes = not . null $ Cluster.esOpCodes es |
1443 |
in failmsg ("'failed' not empty: " ++ show failed) (null failed) .&&. |
1444 |
failmsg "'opcodes' is null" opcodes .&&. |
1445 |
case moved of |
1446 |
[(idx', gdx, _)] -> failmsg "invalid instance moved" (idx == idx') |
1447 |
.&&. |
1448 |
failmsg "wrong target group" |
1449 |
(gdx == Group.idx grp) |
1450 |
v -> failmsg ("invalid solution: " ++ show v) False |
1451 |
where failmsg :: String -> Bool -> Property |
1452 |
failmsg = \msg -> printTestCase ("Failed to evacuate: " ++ msg) |
1453 |
idx = Instance.idx inst |
1454 |
|
1455 |
-- | Checks that on a 4-8 node cluster, once we allocate an instance, |
1456 |
-- we can also node-evacuate it. |
1457 |
prop_ClusterAllocEvacuate :: Property |
1458 |
prop_ClusterAllocEvacuate = |
1459 |
forAll (choose (4, 8)) $ \count -> |
1460 |
forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node -> |
1461 |
forAll (genInstanceSmallerThanNode node `suchThat` isMirrored) $ \inst -> |
1462 |
case genClusterAlloc count node inst of |
1463 |
Types.Bad msg -> failTest msg |
1464 |
Types.Ok (nl, il, inst') -> |
1465 |
conjoin . map (\mode -> check_EvacMode defGroup inst' $ |
1466 |
Cluster.tryNodeEvac defGroupList nl il mode |
1467 |
[Instance.idx inst']) . |
1468 |
evacModeOptions . |
1469 |
Instance.mirrorType $ inst' |
1470 |
|
1471 |
-- | Checks that on a 4-8 node cluster with two node groups, once we |
1472 |
-- allocate an instance on the first node group, we can also change |
1473 |
-- its group. |
1474 |
prop_ClusterAllocChangeGroup :: Property |
1475 |
prop_ClusterAllocChangeGroup = |
1476 |
forAll (choose (4, 8)) $ \count -> |
1477 |
forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node -> |
1478 |
forAll (genInstanceSmallerThanNode node `suchThat` isMirrored) $ \inst -> |
1479 |
case genClusterAlloc count node inst of |
1480 |
Types.Bad msg -> failTest msg |
1481 |
Types.Ok (nl, il, inst') -> |
1482 |
-- we need to add a second node group and nodes to the cluster |
1483 |
let nl2 = Container.elems $ makeSmallCluster node count |
1484 |
grp2 = Group.setIdx defGroup (Group.idx defGroup + 1) |
1485 |
maxndx = maximum . map Node.idx $ nl2 |
1486 |
nl3 = map (\n -> n { Node.group = Group.idx grp2 |
1487 |
, Node.idx = Node.idx n + maxndx }) nl2 |
1488 |
nl4 = Container.fromList . map (\n -> (Node.idx n, n)) $ nl3 |
1489 |
gl' = Container.add (Group.idx grp2) grp2 defGroupList |
1490 |
nl' = IntMap.union nl nl4 |
1491 |
in check_EvacMode grp2 inst' $ |
1492 |
Cluster.tryChangeGroup gl' nl' il [] [Instance.idx inst'] |
1493 |
|
1494 |
-- | Check that allocating multiple instances on a cluster, then |
1495 |
-- adding an empty node, results in a valid rebalance. |
1496 |
prop_ClusterAllocBalance :: Property |
1497 |
prop_ClusterAllocBalance = |
1498 |
forAll (genNode (Just 5) (Just 128)) $ \node -> |
1499 |
forAll (choose (3, 5)) $ \count -> |
1500 |
not (Node.offline node) && not (Node.failN1 node) ==> |
1501 |
let nl = makeSmallCluster node count |
1502 |
(hnode, nl') = IntMap.deleteFindMax nl |
1503 |
il = Container.empty |
1504 |
allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True |
1505 |
i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu |
1506 |
in case allocnodes >>= \allocnodes' -> |
1507 |
Cluster.iterateAlloc nl' il (Just 5) i_templ allocnodes' [] [] of |
1508 |
Types.Bad msg -> failTest $ "Failed to allocate: " ++ msg |
1509 |
Types.Ok (_, _, _, [], _) -> failTest "Failed to allocate: no instances" |
1510 |
Types.Ok (_, xnl, il', _, _) -> |
1511 |
let ynl = Container.add (Node.idx hnode) hnode xnl |
1512 |
cv = Cluster.compCV ynl |
1513 |
tbl = Cluster.Table ynl il' cv [] |
1514 |
in printTestCase "Failed to rebalance" $ |
1515 |
canBalance tbl True True False |
1516 |
|
1517 |
-- | Checks consistency. |
1518 |
prop_ClusterCheckConsistency :: Node.Node -> Instance.Instance -> Bool |
1519 |
prop_ClusterCheckConsistency node inst = |
1520 |
let nl = makeSmallCluster node 3 |
1521 |
[node1, node2, node3] = Container.elems nl |
1522 |
node3' = node3 { Node.group = 1 } |
1523 |
nl' = Container.add (Node.idx node3') node3' nl |
1524 |
inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2) |
1525 |
inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary |
1526 |
inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3) |
1527 |
ccheck = Cluster.findSplitInstances nl' . Container.fromList |
1528 |
in null (ccheck [(0, inst1)]) && |
1529 |
null (ccheck [(0, inst2)]) && |
1530 |
(not . null $ ccheck [(0, inst3)]) |
1531 |
|
1532 |
-- | For now, we only test that we don't lose instances during the split. |
1533 |
prop_ClusterSplitCluster :: Node.Node -> Instance.Instance -> Property |
1534 |
prop_ClusterSplitCluster node inst = |
1535 |
forAll (choose (0, 100)) $ \icnt -> |
1536 |
let nl = makeSmallCluster node 2 |
1537 |
(nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1) |
1538 |
(nl, Container.empty) [1..icnt] |
1539 |
gni = Cluster.splitCluster nl' il' |
1540 |
in sum (map (Container.size . snd . snd) gni) == icnt && |
1541 |
all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group) |
1542 |
(Container.elems nl'')) gni |
1543 |
|
1544 |
-- | Helper function to check if we can allocate an instance on a |
1545 |
-- given node list. |
1546 |
canAllocOn :: Node.List -> Int -> Instance.Instance -> Bool |
1547 |
canAllocOn nl reqnodes inst = |
1548 |
case Cluster.genAllocNodes defGroupList nl reqnodes True >>= |
1549 |
Cluster.tryAlloc nl (Container.empty) inst of |
1550 |
Types.Bad _ -> False |
1551 |
Types.Ok as -> |
1552 |
case Cluster.asSolution as of |
1553 |
Nothing -> False |
1554 |
Just _ -> True |
1555 |
|
1556 |
-- | Checks that allocation obeys minimum and maximum instance |
1557 |
-- policies. The unittest generates a random node, duplicates it /count/ |
1558 |
-- times, and generates a random instance that can be allocated on |
1559 |
-- this mini-cluster; it then checks that after applying a policy that |
1560 |
-- the instance doesn't fits, the allocation fails. |
1561 |
prop_ClusterAllocPolicy :: Node.Node -> Property |
1562 |
prop_ClusterAllocPolicy node = |
1563 |
-- rqn is the required nodes (1 or 2) |
1564 |
forAll (choose (1, 2)) $ \rqn -> |
1565 |
forAll (choose (5, 20)) $ \count -> |
1566 |
forAll (arbitrary `suchThat` (canAllocOn (makeSmallCluster node count) rqn)) |
1567 |
$ \inst -> |
1568 |
forAll (arbitrary `suchThat` (isFailure . |
1569 |
Instance.instMatchesPolicy inst)) $ \ipol -> |
1570 |
let node' = Node.setPolicy ipol node |
1571 |
nl = makeSmallCluster node' count |
1572 |
in not $ canAllocOn nl rqn inst |
1573 |
|
1574 |
testSuite "Cluster" |
1575 |
[ 'prop_Score_Zero |
1576 |
, 'prop_CStats_sane |
1577 |
, 'prop_ClusterAlloc_sane |
1578 |
, 'prop_ClusterCanTieredAlloc |
1579 |
, 'prop_ClusterAllocRelocate |
1580 |
, 'prop_ClusterAllocEvacuate |
1581 |
, 'prop_ClusterAllocChangeGroup |
1582 |
, 'prop_ClusterAllocBalance |
1583 |
, 'prop_ClusterCheckConsistency |
1584 |
, 'prop_ClusterSplitCluster |
1585 |
, 'prop_ClusterAllocPolicy |
1586 |
] |
1587 |
|
1588 |
-- ** OpCodes tests |
1589 |
|
1590 |
-- | Check that opcode serialization is idempotent. |
1591 |
prop_OpCodes_serialization :: OpCodes.OpCode -> Property |
1592 |
prop_OpCodes_serialization op = |
1593 |
case J.readJSON (J.showJSON op) of |
1594 |
J.Error e -> failTest $ "Cannot deserialise: " ++ e |
1595 |
J.Ok op' -> op ==? op' |
1596 |
|
1597 |
testSuite "OpCodes" |
1598 |
[ 'prop_OpCodes_serialization ] |
1599 |
|
1600 |
-- ** Jobs tests |
1601 |
|
1602 |
-- | Check that (queued) job\/opcode status serialization is idempotent. |
1603 |
prop_OpStatus_serialization :: Jobs.OpStatus -> Property |
1604 |
prop_OpStatus_serialization os = |
1605 |
case J.readJSON (J.showJSON os) of |
1606 |
J.Error e -> failTest $ "Cannot deserialise: " ++ e |
1607 |
J.Ok os' -> os ==? os' |
1608 |
|
1609 |
prop_JobStatus_serialization :: Jobs.JobStatus -> Property |
1610 |
prop_JobStatus_serialization js = |
1611 |
case J.readJSON (J.showJSON js) of |
1612 |
J.Error e -> failTest $ "Cannot deserialise: " ++ e |
1613 |
J.Ok js' -> js ==? js' |
1614 |
|
1615 |
testSuite "Jobs" |
1616 |
[ 'prop_OpStatus_serialization |
1617 |
, 'prop_JobStatus_serialization |
1618 |
] |
1619 |
|
1620 |
-- ** Loader tests |
1621 |
|
1622 |
prop_Loader_lookupNode :: [(String, Int)] -> String -> String -> Property |
1623 |
prop_Loader_lookupNode ktn inst node = |
1624 |
Loader.lookupNode nl inst node ==? Data.Map.lookup node nl |
1625 |
where nl = Data.Map.fromList ktn |
1626 |
|
1627 |
prop_Loader_lookupInstance :: [(String, Int)] -> String -> Property |
1628 |
prop_Loader_lookupInstance kti inst = |
1629 |
Loader.lookupInstance il inst ==? Data.Map.lookup inst il |
1630 |
where il = Data.Map.fromList kti |
1631 |
|
1632 |
prop_Loader_assignIndices :: Property |
1633 |
prop_Loader_assignIndices = |
1634 |
-- generate nodes with unique names |
1635 |
forAll (arbitrary `suchThat` |
1636 |
(\nodes -> |
1637 |
let names = map Node.name nodes |
1638 |
in length names == length (nub names))) $ \nodes -> |
1639 |
let (nassoc, kt) = |
1640 |
Loader.assignIndices (map (\n -> (Node.name n, n)) nodes) |
1641 |
in Data.Map.size nassoc == length nodes && |
1642 |
Container.size kt == length nodes && |
1643 |
if not (null nodes) |
1644 |
then maximum (IntMap.keys kt) == length nodes - 1 |
1645 |
else True |
1646 |
|
1647 |
-- | Checks that the number of primary instances recorded on the nodes |
1648 |
-- is zero. |
1649 |
prop_Loader_mergeData :: [Node.Node] -> Bool |
1650 |
prop_Loader_mergeData ns = |
1651 |
let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns |
1652 |
in case Loader.mergeData [] [] [] [] |
1653 |
(Loader.emptyCluster {Loader.cdNodes = na}) of |
1654 |
Types.Bad _ -> False |
1655 |
Types.Ok (Loader.ClusterData _ nl il _ _) -> |
1656 |
let nodes = Container.elems nl |
1657 |
instances = Container.elems il |
1658 |
in (sum . map (length . Node.pList)) nodes == 0 && |
1659 |
null instances |
1660 |
|
1661 |
-- | Check that compareNameComponent on equal strings works. |
1662 |
prop_Loader_compareNameComponent_equal :: String -> Bool |
1663 |
prop_Loader_compareNameComponent_equal s = |
1664 |
BasicTypes.compareNameComponent s s == |
1665 |
BasicTypes.LookupResult BasicTypes.ExactMatch s |
1666 |
|
1667 |
-- | Check that compareNameComponent on prefix strings works. |
1668 |
prop_Loader_compareNameComponent_prefix :: NonEmptyList Char -> String -> Bool |
1669 |
prop_Loader_compareNameComponent_prefix (NonEmpty s1) s2 = |
1670 |
BasicTypes.compareNameComponent (s1 ++ "." ++ s2) s1 == |
1671 |
BasicTypes.LookupResult BasicTypes.PartialMatch s1 |
1672 |
|
1673 |
testSuite "Loader" |
1674 |
[ 'prop_Loader_lookupNode |
1675 |
, 'prop_Loader_lookupInstance |
1676 |
, 'prop_Loader_assignIndices |
1677 |
, 'prop_Loader_mergeData |
1678 |
, 'prop_Loader_compareNameComponent_equal |
1679 |
, 'prop_Loader_compareNameComponent_prefix |
1680 |
] |
1681 |
|
1682 |
-- ** Types tests |
1683 |
|
1684 |
prop_Types_AllocPolicy_serialisation :: Types.AllocPolicy -> Property |
1685 |
prop_Types_AllocPolicy_serialisation apol = |
1686 |
case J.readJSON (J.showJSON apol) of |
1687 |
J.Ok p -> p ==? apol |
1688 |
J.Error s -> failTest $ "Failed to deserialise: " ++ s |
1689 |
|
1690 |
prop_Types_DiskTemplate_serialisation :: Types.DiskTemplate -> Property |
1691 |
prop_Types_DiskTemplate_serialisation dt = |
1692 |
case J.readJSON (J.showJSON dt) of |
1693 |
J.Ok p -> p ==? dt |
1694 |
J.Error s -> failTest $ "Failed to deserialise: " ++ s |
1695 |
|
1696 |
prop_Types_ISpec_serialisation :: Types.ISpec -> Property |
1697 |
prop_Types_ISpec_serialisation ispec = |
1698 |
case J.readJSON (J.showJSON ispec) of |
1699 |
J.Ok p -> p ==? ispec |
1700 |
J.Error s -> failTest $ "Failed to deserialise: " ++ s |
1701 |
|
1702 |
prop_Types_IPolicy_serialisation :: Types.IPolicy -> Property |
1703 |
prop_Types_IPolicy_serialisation ipol = |
1704 |
case J.readJSON (J.showJSON ipol) of |
1705 |
J.Ok p -> p ==? ipol |
1706 |
J.Error s -> failTest $ "Failed to deserialise: " ++ s |
1707 |
|
1708 |
prop_Types_EvacMode_serialisation :: Types.EvacMode -> Property |
1709 |
prop_Types_EvacMode_serialisation em = |
1710 |
case J.readJSON (J.showJSON em) of |
1711 |
J.Ok p -> p ==? em |
1712 |
J.Error s -> failTest $ "Failed to deserialise: " ++ s |
1713 |
|
1714 |
prop_Types_opToResult :: Types.OpResult Int -> Bool |
1715 |
prop_Types_opToResult op = |
1716 |
case op of |
1717 |
Types.OpFail _ -> Types.isBad r |
1718 |
Types.OpGood v -> case r of |
1719 |
Types.Bad _ -> False |
1720 |
Types.Ok v' -> v == v' |
1721 |
where r = Types.opToResult op |
1722 |
|
1723 |
prop_Types_eitherToResult :: Either String Int -> Bool |
1724 |
prop_Types_eitherToResult ei = |
1725 |
case ei of |
1726 |
Left _ -> Types.isBad r |
1727 |
Right v -> case r of |
1728 |
Types.Bad _ -> False |
1729 |
Types.Ok v' -> v == v' |
1730 |
where r = Types.eitherToResult ei |
1731 |
|
1732 |
testSuite "Types" |
1733 |
[ 'prop_Types_AllocPolicy_serialisation |
1734 |
, 'prop_Types_DiskTemplate_serialisation |
1735 |
, 'prop_Types_ISpec_serialisation |
1736 |
, 'prop_Types_IPolicy_serialisation |
1737 |
, 'prop_Types_EvacMode_serialisation |
1738 |
, 'prop_Types_opToResult |
1739 |
, 'prop_Types_eitherToResult |
1740 |
] |
1741 |
|
1742 |
-- ** CLI tests |
1743 |
|
1744 |
-- | Test correct parsing. |
1745 |
prop_CLI_parseISpec :: String -> Int -> Int -> Int -> Property |
1746 |
prop_CLI_parseISpec descr dsk mem cpu = |
1747 |
let str = printf "%d,%d,%d" dsk mem cpu::String |
1748 |
in CLI.parseISpecString descr str ==? Types.Ok (Types.RSpec cpu mem dsk) |
1749 |
|
1750 |
-- | Test parsing failure due to wrong section count. |
1751 |
prop_CLI_parseISpecFail :: String -> Property |
1752 |
prop_CLI_parseISpecFail descr = |
1753 |
forAll (choose (0,100) `suchThat` ((/=) 3)) $ \nelems -> |
1754 |
forAll (replicateM nelems arbitrary) $ \values -> |
1755 |
let str = intercalate "," $ map show (values::[Int]) |
1756 |
in case CLI.parseISpecString descr str of |
1757 |
Types.Ok v -> failTest $ "Expected failure, got " ++ show v |
1758 |
_ -> property True |
1759 |
|
1760 |
-- | Test parseYesNo. |
1761 |
prop_CLI_parseYesNo :: Bool -> Bool -> [Char] -> Property |
1762 |
prop_CLI_parseYesNo def testval val = |
1763 |
forAll (elements [val, "yes", "no"]) $ \actual_val -> |
1764 |
if testval |
1765 |
then CLI.parseYesNo def Nothing ==? Types.Ok def |
1766 |
else let result = CLI.parseYesNo def (Just actual_val) |
1767 |
in if actual_val `elem` ["yes", "no"] |
1768 |
then result ==? Types.Ok (actual_val == "yes") |
1769 |
else property $ Types.isBad result |
1770 |
|
1771 |
-- | Helper to check for correct parsing of string arg. |
1772 |
checkStringArg :: [Char] |
1773 |
-> (GetOpt.OptDescr (CLI.Options -> Types.Result CLI.Options), |
1774 |
CLI.Options -> Maybe [Char]) |
1775 |
-> Property |
1776 |
checkStringArg val (opt, fn) = |
1777 |
let GetOpt.Option _ longs _ _ = opt |
1778 |
in case longs of |
1779 |
[] -> failTest "no long options?" |
1780 |
cmdarg:_ -> |
1781 |
case CLI.parseOptsInner ["--" ++ cmdarg ++ "=" ++ val] "prog" [opt] of |
1782 |
Left e -> failTest $ "Failed to parse option: " ++ show e |
1783 |
Right (options, _) -> fn options ==? Just val |
1784 |
|
1785 |
-- | Test a few string arguments. |
1786 |
prop_CLI_StringArg :: [Char] -> Property |
1787 |
prop_CLI_StringArg argument = |
1788 |
let args = [ (CLI.oDataFile, CLI.optDataFile) |
1789 |
, (CLI.oDynuFile, CLI.optDynuFile) |
1790 |
, (CLI.oSaveCluster, CLI.optSaveCluster) |
1791 |
, (CLI.oReplay, CLI.optReplay) |
1792 |
, (CLI.oPrintCommands, CLI.optShowCmds) |
1793 |
, (CLI.oLuxiSocket, CLI.optLuxi) |
1794 |
] |
1795 |
in conjoin $ map (checkStringArg argument) args |
1796 |
|
1797 |
-- | Helper to test that a given option is accepted OK with quick exit. |
1798 |
checkEarlyExit :: String -> [CLI.OptType] -> String -> Property |
1799 |
checkEarlyExit name options param = |
1800 |
case CLI.parseOptsInner [param] name options of |
1801 |
Left (code, _) -> if code == 0 |
1802 |
then property True |
1803 |
else failTest $ "Program " ++ name ++ |
1804 |
" returns invalid code " ++ show code ++ |
1805 |
" for option " ++ param |
1806 |
_ -> failTest $ "Program " ++ name ++ " doesn't consider option " ++ |
1807 |
param ++ " as early exit one" |
1808 |
|
1809 |
-- | Test that all binaries support some common options. There is |
1810 |
-- nothing actually random about this test... |
1811 |
prop_CLI_stdopts :: Property |
1812 |
prop_CLI_stdopts = |
1813 |
let params = ["-h", "--help", "-V", "--version"] |
1814 |
opts = map (\(name, (_, o)) -> (name, o)) Program.personalities |
1815 |
-- apply checkEarlyExit across the cartesian product of params and opts |
1816 |
in conjoin [checkEarlyExit n o p | p <- params, (n, o) <- opts] |
1817 |
|
1818 |
testSuite "CLI" |
1819 |
[ 'prop_CLI_parseISpec |
1820 |
, 'prop_CLI_parseISpecFail |
1821 |
, 'prop_CLI_parseYesNo |
1822 |
, 'prop_CLI_StringArg |
1823 |
, 'prop_CLI_stdopts |
1824 |
] |
1825 |
|
1826 |
-- * JSON tests |
1827 |
|
1828 |
prop_JSON_toArray :: [Int] -> Property |
1829 |
prop_JSON_toArray intarr = |
1830 |
let arr = map J.showJSON intarr in |
1831 |
case JSON.toArray (J.JSArray arr) of |
1832 |
Types.Ok arr' -> arr ==? arr' |
1833 |
Types.Bad err -> failTest $ "Failed to parse array: " ++ err |
1834 |
|
1835 |
prop_JSON_toArrayFail :: Int -> String -> Bool -> Property |
1836 |
prop_JSON_toArrayFail i s b = |
1837 |
-- poor man's instance Arbitrary JSValue |
1838 |
forAll (elements [J.showJSON i, J.showJSON s, J.showJSON b]) $ \item -> |
1839 |
case JSON.toArray item of |
1840 |
Types.Bad _ -> property True |
1841 |
Types.Ok result -> failTest $ "Unexpected parse, got " ++ show result |
1842 |
|
1843 |
testSuite "JSON" |
1844 |
[ 'prop_JSON_toArray |
1845 |
, 'prop_JSON_toArrayFail |
1846 |
] |
1847 |
|
1848 |
-- * Luxi tests |
1849 |
|
1850 |
instance Arbitrary Luxi.TagObject where |
1851 |
arbitrary = elements [minBound..maxBound] |
1852 |
|
1853 |
instance Arbitrary Luxi.LuxiReq where |
1854 |
arbitrary = elements [minBound..maxBound] |
1855 |
|
1856 |
instance Arbitrary Luxi.LuxiOp where |
1857 |
arbitrary = do |
1858 |
lreq <- arbitrary |
1859 |
case lreq of |
1860 |
Luxi.ReqQuery -> Luxi.Query <$> arbitrary <*> getFields <*> genFilter |
1861 |
Luxi.ReqQueryNodes -> Luxi.QueryNodes <$> (listOf getFQDN) <*> |
1862 |
getFields <*> arbitrary |
1863 |
Luxi.ReqQueryGroups -> Luxi.QueryGroups <$> arbitrary <*> |
1864 |
arbitrary <*> arbitrary |
1865 |
Luxi.ReqQueryInstances -> Luxi.QueryInstances <$> (listOf getFQDN) <*> |
1866 |
getFields <*> arbitrary |
1867 |
Luxi.ReqQueryJobs -> Luxi.QueryJobs <$> arbitrary <*> getFields |
1868 |
Luxi.ReqQueryExports -> Luxi.QueryExports <$> |
1869 |
(listOf getFQDN) <*> arbitrary |
1870 |
Luxi.ReqQueryConfigValues -> Luxi.QueryConfigValues <$> getFields |
1871 |
Luxi.ReqQueryClusterInfo -> pure Luxi.QueryClusterInfo |
1872 |
Luxi.ReqQueryTags -> Luxi.QueryTags <$> arbitrary <*> getFQDN |
1873 |
Luxi.ReqSubmitJob -> Luxi.SubmitJob <$> (resize maxOpCodes arbitrary) |
1874 |
Luxi.ReqSubmitManyJobs -> Luxi.SubmitManyJobs <$> |
1875 |
(resize maxOpCodes arbitrary) |
1876 |
Luxi.ReqWaitForJobChange -> Luxi.WaitForJobChange <$> arbitrary <*> |
1877 |
getFields <*> pure J.JSNull <*> |
1878 |
pure J.JSNull <*> arbitrary |
1879 |
Luxi.ReqArchiveJob -> Luxi.ArchiveJob <$> arbitrary |
1880 |
Luxi.ReqAutoArchiveJobs -> Luxi.AutoArchiveJobs <$> arbitrary <*> |
1881 |
arbitrary |
1882 |
Luxi.ReqCancelJob -> Luxi.CancelJob <$> arbitrary |
1883 |
Luxi.ReqSetDrainFlag -> Luxi.SetDrainFlag <$> arbitrary |
1884 |
Luxi.ReqSetWatcherPause -> Luxi.SetWatcherPause <$> arbitrary |
1885 |
|
1886 |
-- | Simple check that encoding/decoding of LuxiOp works. |
1887 |
prop_Luxi_CallEncoding :: Luxi.LuxiOp -> Property |
1888 |
prop_Luxi_CallEncoding op = |
1889 |
(Luxi.validateCall (Luxi.buildCall op) >>= Luxi.decodeCall) ==? Types.Ok op |
1890 |
|
1891 |
-- | Helper to a get a temporary file name. |
1892 |
getTempFileName :: IO FilePath |
1893 |
getTempFileName = do |
1894 |
tempdir <- getTemporaryDirectory |
1895 |
(fpath, handle) <- openTempFile tempdir "luxitest" |
1896 |
_ <- hClose handle |
1897 |
removeFile fpath |
1898 |
return fpath |
1899 |
|
1900 |
-- | Helper to execute recvMsg but return Nothing if we reach EOF. |
1901 |
handleEOF :: (IO a) -> IO (Maybe a) |
1902 |
handleEOF action = |
1903 |
catchJust |
1904 |
(\e -> if isEOFErrorType (ioeGetErrorType e) then Just () else Nothing) |
1905 |
(liftM Just action) |
1906 |
(\_ -> return Nothing) |
1907 |
|
1908 |
-- | Server ping-pong helper. |
1909 |
luxiServerPong :: Luxi.Client -> IO () |
1910 |
luxiServerPong c = do |
1911 |
msg <- handleEOF (Luxi.recvMsg c) |
1912 |
case msg of |
1913 |
Nothing -> return () |
1914 |
Just m -> Luxi.sendMsg c m >> luxiServerPong c |
1915 |
|
1916 |
-- | Client ping-pong helper. |
1917 |
luxiClientPong :: Luxi.Client -> [String] -> IO [String] |
1918 |
luxiClientPong c = |
1919 |
mapM (\m -> Luxi.sendMsg c m >> Luxi.recvMsg c) |
1920 |
|
1921 |
-- | Monadic check that, given a server socket, we can connect via a |
1922 |
-- client to it, and that we can send a list of arbitrary messages and |
1923 |
-- get back what we sent. |
1924 |
prop_Luxi_ClientServer :: [[DNSChar]] -> Property |
1925 |
prop_Luxi_ClientServer dnschars = monadicIO $ do |
1926 |
let msgs = map (map dnsGetChar) dnschars |
1927 |
fpath <- run $ getTempFileName |
1928 |
-- we need to create the server first, otherwise (if we do it in the |
1929 |
-- forked thread) the client could try to connect to it before it's |
1930 |
-- ready |
1931 |
server <- run $ Luxi.getServer fpath |
1932 |
-- fork the server responder |
1933 |
_ <- run . forkIO $ |
1934 |
bracket |
1935 |
(Luxi.acceptClient server) |
1936 |
(\c -> Luxi.closeClient c >> removeFile fpath) |
1937 |
luxiServerPong |
1938 |
replies <- run $ |
1939 |
bracket |
1940 |
(Luxi.getClient fpath) |
1941 |
Luxi.closeClient |
1942 |
(\c -> luxiClientPong c msgs) |
1943 |
assert $ replies == msgs |
1944 |
|
1945 |
testSuite "LUXI" |
1946 |
[ 'prop_Luxi_CallEncoding |
1947 |
, 'prop_Luxi_ClientServer |
1948 |
] |
1949 |
|
1950 |
-- * Ssconf tests |
1951 |
|
1952 |
instance Arbitrary Ssconf.SSKey where |
1953 |
arbitrary = elements [minBound..maxBound] |
1954 |
|
1955 |
prop_Ssconf_filename :: Ssconf.SSKey -> Property |
1956 |
prop_Ssconf_filename key = |
1957 |
printTestCase "Key doesn't start with correct prefix" $ |
1958 |
Ssconf.sSFilePrefix `isPrefixOf` Ssconf.keyToFilename (Just "") key |
1959 |
|
1960 |
testSuite "Ssconf" |
1961 |
[ 'prop_Ssconf_filename |
1962 |
] |
1963 |
|
1964 |
-- * Rpc tests |
1965 |
|
1966 |
-- | Monadic check that, for an offline node and a call that does not |
1967 |
-- offline nodes, we get a OfflineNodeError response. |
1968 |
-- FIXME: We need a way of generalizing this, running it for |
1969 |
-- every call manually will soon get problematic |
1970 |
prop_Rpc_noffl_request_allinstinfo :: Rpc.RpcCallAllInstancesInfo -> Property |
1971 |
prop_Rpc_noffl_request_allinstinfo call = |
1972 |
forAll (arbitrary `suchThat` Objects.nodeOffline) $ \node -> monadicIO $ do |
1973 |
res <- run $ Rpc.executeRpcCall [node] call |
1974 |
stop $ res ==? [(node, Left (Rpc.OfflineNodeError node))] |
1975 |
|
1976 |
prop_Rpc_noffl_request_instlist :: Rpc.RpcCallInstanceList -> Property |
1977 |
prop_Rpc_noffl_request_instlist call = |
1978 |
forAll (arbitrary `suchThat` Objects.nodeOffline) $ \node -> monadicIO $ do |
1979 |
res <- run $ Rpc.executeRpcCall [node] call |
1980 |
stop $ res ==? [(node, Left (Rpc.OfflineNodeError node))] |
1981 |
|
1982 |
prop_Rpc_noffl_request_nodeinfo :: Rpc.RpcCallNodeInfo -> Property |
1983 |
prop_Rpc_noffl_request_nodeinfo call = |
1984 |
forAll (arbitrary `suchThat` Objects.nodeOffline) $ \node -> monadicIO $ do |
1985 |
res <- run $ Rpc.executeRpcCall [node] call |
1986 |
stop $ res ==? [(node, Left (Rpc.OfflineNodeError node))] |
1987 |
|
1988 |
testSuite "Rpc" |
1989 |
[ 'prop_Rpc_noffl_request_allinstinfo |
1990 |
, 'prop_Rpc_noffl_request_instlist |
1991 |
, 'prop_Rpc_noffl_request_nodeinfo |
1992 |
] |
1993 |
|
1994 |
-- * Qlang tests |
1995 |
|
1996 |
-- | Tests that serialisation/deserialisation of filters is |
1997 |
-- idempotent. |
1998 |
prop_Qlang_Serialisation :: Property |
1999 |
prop_Qlang_Serialisation = |
2000 |
forAll genFilter $ \flt -> |
2001 |
J.readJSON (J.showJSON flt) ==? J.Ok flt |
2002 |
|
2003 |
testSuite "Qlang" |
2004 |
[ 'prop_Qlang_Serialisation |
2005 |
] |