Statistics
| Branch: | Tag: | Revision:

root / htools / Ganeti / HTools / QC.hs @ 22fac87d

History | View | Annotate | Download (37.1 kB)

1
{-| Unittests for ganeti-htools.
2

    
3
-}
4

    
5
{-
6

    
7
Copyright (C) 2009, 2010, 2011 Google Inc.
8

    
9
This program is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 2 of the License, or
12
(at your option) any later version.
13

    
14
This program is distributed in the hope that it will be useful, but
15
WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
General Public License for more details.
18

    
19
You should have received a copy of the GNU General Public License
20
along with this program; if not, write to the Free Software
21
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22
02110-1301, USA.
23

    
24
-}
25

    
26
module Ganeti.HTools.QC
27
    ( testUtils
28
    , testPeerMap
29
    , testContainer
30
    , testInstance
31
    , testNode
32
    , testText
33
    , testOpCodes
34
    , testJobs
35
    , testCluster
36
    , testLoader
37
    , testTypes
38
    ) where
39

    
40
import Test.QuickCheck
41
import Data.List (findIndex, intercalate, nub, isPrefixOf)
42
import Data.Maybe
43
import Control.Monad
44
import qualified Text.JSON as J
45
import qualified Data.Map
46
import qualified Data.IntMap as IntMap
47
import qualified Ganeti.OpCodes as OpCodes
48
import qualified Ganeti.Jobs as Jobs
49
import qualified Ganeti.Luxi
50
import qualified Ganeti.HTools.CLI as CLI
51
import qualified Ganeti.HTools.Cluster as Cluster
52
import qualified Ganeti.HTools.Container as Container
53
import qualified Ganeti.HTools.ExtLoader
54
import qualified Ganeti.HTools.IAlloc as IAlloc
55
import qualified Ganeti.HTools.Instance as Instance
56
import qualified Ganeti.HTools.Loader as Loader
57
import qualified Ganeti.HTools.Luxi
58
import qualified Ganeti.HTools.Node as Node
59
import qualified Ganeti.HTools.Group as Group
60
import qualified Ganeti.HTools.PeerMap as PeerMap
61
import qualified Ganeti.HTools.Rapi
62
import qualified Ganeti.HTools.Simu
63
import qualified Ganeti.HTools.Text as Text
64
import qualified Ganeti.HTools.Types as Types
65
import qualified Ganeti.HTools.Utils as Utils
66
import qualified Ganeti.HTools.Version
67
import qualified Ganeti.Constants as C
68

    
69
run :: Testable prop => prop -> Args -> IO Result
70
run = flip quickCheckWithResult
71

    
72
-- * Constants
73

    
74
-- | Maximum memory (1TiB, somewhat random value).
75
maxMem :: Int
76
maxMem = 1024 * 1024
77

    
78
-- | Maximum disk (8TiB, somewhat random value).
79
maxDsk :: Int
80
maxDsk = 1024 * 1024 * 8
81

    
82
-- | Max CPUs (1024, somewhat random value).
83
maxCpu :: Int
84
maxCpu = 1024
85

    
86
defGroup :: Group.Group
87
defGroup = flip Group.setIdx 0 $
88
               Group.create "default" Utils.defaultGroupID
89
                    Types.AllocPreferred
90

    
91
defGroupList :: Group.List
92
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
93

    
94
defGroupAssoc :: Data.Map.Map String Types.Gdx
95
defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
96

    
97
-- * Helper functions
98

    
99
-- | Simple checker for whether OpResult is fail or pass.
100
isFailure :: Types.OpResult a -> Bool
101
isFailure (Types.OpFail _) = True
102
isFailure _ = False
103

    
104
-- | Update an instance to be smaller than a node.
105
setInstanceSmallerThanNode node inst =
106
    inst { Instance.mem = Node.availMem node `div` 2
107
         , Instance.dsk = Node.availDisk node `div` 2
108
         , Instance.vcpus = Node.availCpu node `div` 2
109
         }
110

    
111
-- | Create an instance given its spec.
112
createInstance mem dsk vcpus =
113
    Instance.create "inst-unnamed" mem dsk vcpus "running" [] True (-1) (-1)
114
                    Types.DTDrbd8
115

    
116
-- | Create a small cluster by repeating a node spec.
117
makeSmallCluster :: Node.Node -> Int -> Node.List
118
makeSmallCluster node count =
119
    let fn = Node.buildPeers node Container.empty
120
        namelst = map (\n -> (Node.name n, n)) (replicate count fn)
121
        (_, nlst) = Loader.assignIndices namelst
122
    in nlst
123

    
124
-- | Checks if a node is "big" enough.
125
isNodeBig :: Node.Node -> Int -> Bool
126
isNodeBig node size = Node.availDisk node > size * Types.unitDsk
127
                      && Node.availMem node > size * Types.unitMem
128
                      && Node.availCpu node > size * Types.unitCpu
129

    
130
canBalance :: Cluster.Table -> Bool -> Bool -> Bool -> Bool
131
canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac 0 0
132

    
133
-- | Assigns a new fresh instance to a cluster; this is not
134
-- allocation, so no resource checks are done.
135
assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
136
                  Types.Idx -> Types.Idx ->
137
                  (Node.List, Instance.List)
138
assignInstance nl il inst pdx sdx =
139
  let pnode = Container.find pdx nl
140
      snode = Container.find sdx nl
141
      maxiidx = if Container.null il
142
                then 0
143
                else fst (Container.findMax il) + 1
144
      inst' = inst { Instance.idx = maxiidx,
145
                     Instance.pNode = pdx, Instance.sNode = sdx }
146
      pnode' = Node.setPri pnode inst'
147
      snode' = Node.setSec snode inst'
148
      nl' = Container.addTwo pdx pnode' sdx snode' nl
149
      il' = Container.add maxiidx inst' il
150
  in (nl', il')
151

    
152
-- * Arbitrary instances
153

    
154
-- | Defines a DNS name.
155
newtype DNSChar = DNSChar { dnsGetChar::Char }
156

    
157
instance Arbitrary DNSChar where
158
    arbitrary = do
159
      x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
160
      return (DNSChar x)
161

    
162
getName :: Gen String
163
getName = do
164
  n <- choose (1, 64)
165
  dn <- vector n::Gen [DNSChar]
166
  return (map dnsGetChar dn)
167

    
168

    
169
getFQDN :: Gen String
170
getFQDN = do
171
  felem <- getName
172
  ncomps <- choose (1, 4)
173
  frest <- vector ncomps::Gen [[DNSChar]]
174
  let frest' = map (map dnsGetChar) frest
175
  return (felem ++ "." ++ intercalate "." frest')
176

    
177
-- let's generate a random instance
178
instance Arbitrary Instance.Instance where
179
    arbitrary = do
180
      name <- getFQDN
181
      mem <- choose (0, maxMem)
182
      dsk <- choose (0, maxDsk)
183
      run_st <- elements [ C.inststErrorup
184
                         , C.inststErrordown
185
                         , C.inststAdmindown
186
                         , C.inststNodedown
187
                         , C.inststNodeoffline
188
                         , C.inststRunning
189
                         , "no_such_status1"
190
                         , "no_such_status2"]
191
      pn <- arbitrary
192
      sn <- arbitrary
193
      vcpus <- choose (0, maxCpu)
194
      return $ Instance.create name mem dsk vcpus run_st [] True pn sn
195
                               Types.DTDrbd8
196

    
197
-- | Generas an arbitrary node based on sizing information.
198
genNode :: Maybe Int -- ^ Minimum node size in terms of units
199
        -> Maybe Int -- ^ Maximum node size (when Nothing, bounded
200
                     -- just by the max... constants)
201
        -> Gen Node.Node
202
genNode min_multiplier max_multiplier = do
203
  let (base_mem, base_dsk, base_cpu) =
204
          case min_multiplier of
205
            Just mm -> (mm * Types.unitMem,
206
                        mm * Types.unitDsk,
207
                        mm * Types.unitCpu)
208
            Nothing -> (0, 0, 0)
209
      (top_mem, top_dsk, top_cpu)  =
210
          case max_multiplier of
211
            Just mm -> (mm * Types.unitMem,
212
                        mm * Types.unitDsk,
213
                        mm * Types.unitCpu)
214
            Nothing -> (maxMem, maxDsk, maxCpu)
215
  name  <- getFQDN
216
  mem_t <- choose (base_mem, top_mem)
217
  mem_f <- choose (base_mem, mem_t)
218
  mem_n <- choose (0, mem_t - mem_f)
219
  dsk_t <- choose (base_dsk, top_dsk)
220
  dsk_f <- choose (base_dsk, dsk_t)
221
  cpu_t <- choose (base_cpu, top_cpu)
222
  offl  <- arbitrary
223
  let n = Node.create name (fromIntegral mem_t) mem_n mem_f
224
          (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl 0
225
  return $ Node.buildPeers n Container.empty
226

    
227
-- and a random node
228
instance Arbitrary Node.Node where
229
    arbitrary = genNode Nothing Nothing
230

    
231
-- replace disks
232
instance Arbitrary OpCodes.ReplaceDisksMode where
233
  arbitrary = elements [ OpCodes.ReplaceOnPrimary
234
                       , OpCodes.ReplaceOnSecondary
235
                       , OpCodes.ReplaceNewSecondary
236
                       , OpCodes.ReplaceAuto
237
                       ]
238

    
239
instance Arbitrary OpCodes.OpCode where
240
  arbitrary = do
241
    op_id <- elements [ "OP_TEST_DELAY"
242
                      , "OP_INSTANCE_REPLACE_DISKS"
243
                      , "OP_INSTANCE_FAILOVER"
244
                      , "OP_INSTANCE_MIGRATE"
245
                      ]
246
    (case op_id of
247
        "OP_TEST_DELAY" ->
248
          liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
249
        "OP_INSTANCE_REPLACE_DISKS" ->
250
          liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
251
          arbitrary arbitrary arbitrary
252
        "OP_INSTANCE_FAILOVER" ->
253
          liftM2 OpCodes.OpInstanceFailover arbitrary arbitrary
254
        "OP_INSTANCE_MIGRATE" ->
255
          liftM4 OpCodes.OpInstanceMigrate arbitrary arbitrary arbitrary
256
          arbitrary
257
        _ -> fail "Wrong opcode")
258

    
259
instance Arbitrary Jobs.OpStatus where
260
  arbitrary = elements [minBound..maxBound]
261

    
262
instance Arbitrary Jobs.JobStatus where
263
  arbitrary = elements [minBound..maxBound]
264

    
265
newtype SmallRatio = SmallRatio Double deriving Show
266
instance Arbitrary SmallRatio where
267
    arbitrary = do
268
      v <- choose (0, 1)
269
      return $ SmallRatio v
270

    
271
instance Arbitrary Types.AllocPolicy where
272
  arbitrary = elements [minBound..maxBound]
273

    
274
instance Arbitrary Types.DiskTemplate where
275
  arbitrary = elements [minBound..maxBound]
276

    
277
-- * Actual tests
278

    
279
-- ** Utils tests
280

    
281
-- | If the list is not just an empty element, and if the elements do
282
-- not contain commas, then join+split should be idempotent.
283
prop_Utils_commaJoinSplit =
284
    forAll (arbitrary `suchThat`
285
            (\l -> l /= [""] && all (not . elem ',') l )) $ \lst ->
286
    Utils.sepSplit ',' (Utils.commaJoin lst) == lst
287

    
288
-- | Split and join should always be idempotent.
289
prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
290

    
291
-- | fromObjWithDefault, we test using the Maybe monad and an integer
292
-- value.
293
prop_Utils_fromObjWithDefault def_value random_key =
294
    -- a missing key will be returned with the default
295
    Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
296
    -- a found key will be returned as is, not with default
297
    Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
298
         random_key (def_value+1) == Just def_value
299
        where _types = def_value :: Integer
300

    
301
-- | Test that functional if' behaves like the syntactic sugar if.
302
prop_Utils_if'if :: Bool -> Int -> Int -> Bool
303
prop_Utils_if'if cnd a b = Utils.if' cnd a b == if cnd then a else b
304

    
305
-- | Test basic select functionality
306
prop_Utils_select :: Int   -- ^ Default result
307
                  -> [Int] -- ^ List of False values
308
                  -> [Int] -- ^ List of True values
309
                  -> Bool  -- ^ Test result
310
prop_Utils_select def lst1 lst2 =
311
  Utils.select def cndlist == expectedresult
312
  where expectedresult = Utils.if' (null lst2) def (head lst2)
313
        flist = map (\e -> (False, e)) lst1
314
        tlist = map (\e -> (True, e)) lst2
315
        cndlist = flist ++ tlist
316

    
317
-- | Test basic select functionality with undefined default
318
prop_Utils_select_undefd :: [Int] -- ^ List of False values
319
                         -> NonEmptyList Int -- ^ List of True values
320
                         -> Bool  -- ^ Test result
321
prop_Utils_select_undefd lst1 (NonEmpty lst2) =
322
  Utils.select undefined cndlist == head lst2
323
  where flist = map (\e -> (False, e)) lst1
324
        tlist = map (\e -> (True, e)) lst2
325
        cndlist = flist ++ tlist
326

    
327
-- | Test basic select functionality with undefined list values
328
prop_Utils_select_undefv :: [Int] -- ^ List of False values
329
                         -> NonEmptyList Int -- ^ List of True values
330
                         -> Bool  -- ^ Test result
331
prop_Utils_select_undefv lst1 (NonEmpty lst2) =
332
  Utils.select undefined cndlist == head lst2
333
  where flist = map (\e -> (False, e)) lst1
334
        tlist = map (\e -> (True, e)) lst2
335
        cndlist = flist ++ tlist ++ [undefined]
336

    
337
-- | Test list for the Utils module.
338
testUtils =
339
  [ run prop_Utils_commaJoinSplit
340
  , run prop_Utils_commaSplitJoin
341
  , run prop_Utils_fromObjWithDefault
342
  , run prop_Utils_if'if
343
  , run prop_Utils_select
344
  , run prop_Utils_select_undefd
345
  , run prop_Utils_select_undefv
346
  ]
347

    
348
-- ** PeerMap tests
349

    
350
-- | Make sure add is idempotent.
351
prop_PeerMap_addIdempotent pmap key em =
352
    fn puniq == fn (fn puniq)
353
    where _types = (pmap::PeerMap.PeerMap,
354
                    key::PeerMap.Key, em::PeerMap.Elem)
355
          fn = PeerMap.add key em
356
          puniq = PeerMap.accumArray const pmap
357

    
358
-- | Make sure remove is idempotent.
359
prop_PeerMap_removeIdempotent pmap key =
360
    fn puniq == fn (fn puniq)
361
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
362
          fn = PeerMap.remove key
363
          puniq = PeerMap.accumArray const pmap
364

    
365
-- | Make sure a missing item returns 0.
366
prop_PeerMap_findMissing pmap key =
367
    PeerMap.find key (PeerMap.remove key puniq) == 0
368
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
369
          puniq = PeerMap.accumArray const pmap
370

    
371
-- | Make sure an added item is found.
372
prop_PeerMap_addFind pmap key em =
373
    PeerMap.find key (PeerMap.add key em puniq) == em
374
    where _types = (pmap::PeerMap.PeerMap,
375
                    key::PeerMap.Key, em::PeerMap.Elem)
376
          puniq = PeerMap.accumArray const pmap
377

    
378
-- | Manual check that maxElem returns the maximum indeed, or 0 for null.
379
prop_PeerMap_maxElem pmap =
380
    PeerMap.maxElem puniq == if null puniq then 0
381
                             else (maximum . snd . unzip) puniq
382
    where _types = pmap::PeerMap.PeerMap
383
          puniq = PeerMap.accumArray const pmap
384

    
385
-- | List of tests for the PeerMap module.
386
testPeerMap =
387
    [ run prop_PeerMap_addIdempotent
388
    , run prop_PeerMap_removeIdempotent
389
    , run prop_PeerMap_maxElem
390
    , run prop_PeerMap_addFind
391
    , run prop_PeerMap_findMissing
392
    ]
393

    
394
-- ** Container tests
395

    
396
prop_Container_addTwo cdata i1 i2 =
397
    fn i1 i2 cont == fn i2 i1 cont &&
398
       fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
399
    where _types = (cdata::[Int],
400
                    i1::Int, i2::Int)
401
          cont = foldl (\c x -> Container.add x x c) Container.empty cdata
402
          fn x1 x2 = Container.addTwo x1 x1 x2 x2
403

    
404
prop_Container_nameOf node =
405
  let nl = makeSmallCluster node 1
406
      fnode = head (Container.elems nl)
407
  in Container.nameOf nl (Node.idx fnode) == Node.name fnode
408

    
409
-- | We test that in a cluster, given a random node, we can find it by
410
-- its name and alias, as long as all names and aliases are unique,
411
-- and that we fail to find a non-existing name.
412
prop_Container_findByName node othername =
413
  forAll (choose (1, 20)) $ \ cnt ->
414
  forAll (choose (0, cnt - 1)) $ \ fidx ->
415
  forAll (vector cnt) $ \ names ->
416
  (length . nub) (map fst names ++ map snd names) ==
417
  length names * 2 &&
418
  not (othername `elem` (map fst names ++ map snd names)) ==>
419
  let nl = makeSmallCluster node cnt
420
      nodes = Container.elems nl
421
      nodes' = map (\((name, alias), nn) -> (Node.idx nn,
422
                                             nn { Node.name = name,
423
                                                  Node.alias = alias }))
424
               $ zip names nodes
425
      nl' = Container.fromList nodes'
426
      target = snd (nodes' !! fidx)
427
  in Container.findByName nl' (Node.name target) == Just target &&
428
     Container.findByName nl' (Node.alias target) == Just target &&
429
     Container.findByName nl' othername == Nothing
430

    
431
testContainer =
432
    [ run prop_Container_addTwo
433
    , run prop_Container_nameOf
434
    , run prop_Container_findByName
435
    ]
436

    
437
-- ** Instance tests
438

    
439
-- Simple instance tests, we only have setter/getters
440

    
441
prop_Instance_creat inst =
442
    Instance.name inst == Instance.alias inst
443

    
444
prop_Instance_setIdx inst idx =
445
    Instance.idx (Instance.setIdx inst idx) == idx
446
    where _types = (inst::Instance.Instance, idx::Types.Idx)
447

    
448
prop_Instance_setName inst name =
449
    Instance.name newinst == name &&
450
    Instance.alias newinst == name
451
    where _types = (inst::Instance.Instance, name::String)
452
          newinst = Instance.setName inst name
453

    
454
prop_Instance_setAlias inst name =
455
    Instance.name newinst == Instance.name inst &&
456
    Instance.alias newinst == name
457
    where _types = (inst::Instance.Instance, name::String)
458
          newinst = Instance.setAlias inst name
459

    
460
prop_Instance_setPri inst pdx =
461
    Instance.pNode (Instance.setPri inst pdx) == pdx
462
    where _types = (inst::Instance.Instance, pdx::Types.Ndx)
463

    
464
prop_Instance_setSec inst sdx =
465
    Instance.sNode (Instance.setSec inst sdx) == sdx
466
    where _types = (inst::Instance.Instance, sdx::Types.Ndx)
467

    
468
prop_Instance_setBoth inst pdx sdx =
469
    Instance.pNode si == pdx && Instance.sNode si == sdx
470
    where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
471
          si = Instance.setBoth inst pdx sdx
472

    
473
prop_Instance_runStatus_True =
474
    forAll (arbitrary `suchThat`
475
            ((`elem` Instance.runningStates) . Instance.runSt))
476
    Instance.running
477

    
478
prop_Instance_runStatus_False inst =
479
    let run_st = Instance.running inst
480
        run_tx = Instance.runSt inst
481
    in
482
      run_tx `notElem` Instance.runningStates ==> not run_st
483

    
484
prop_Instance_shrinkMG inst =
485
    Instance.mem inst >= 2 * Types.unitMem ==>
486
        case Instance.shrinkByType inst Types.FailMem of
487
          Types.Ok inst' ->
488
              Instance.mem inst' == Instance.mem inst - Types.unitMem
489
          _ -> False
490

    
491
prop_Instance_shrinkMF inst =
492
    forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
493
    let inst' = inst { Instance.mem = mem}
494
    in Types.isBad $ Instance.shrinkByType inst' Types.FailMem
495

    
496
prop_Instance_shrinkCG inst =
497
    Instance.vcpus inst >= 2 * Types.unitCpu ==>
498
        case Instance.shrinkByType inst Types.FailCPU of
499
          Types.Ok inst' ->
500
              Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
501
          _ -> False
502

    
503
prop_Instance_shrinkCF inst =
504
    forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
505
    let inst' = inst { Instance.vcpus = vcpus }
506
    in Types.isBad $ Instance.shrinkByType inst' Types.FailCPU
507

    
508
prop_Instance_shrinkDG inst =
509
    Instance.dsk inst >= 2 * Types.unitDsk ==>
510
        case Instance.shrinkByType inst Types.FailDisk of
511
          Types.Ok inst' ->
512
              Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
513
          _ -> False
514

    
515
prop_Instance_shrinkDF inst =
516
    forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
517
    let inst' = inst { Instance.dsk = dsk }
518
    in Types.isBad $ Instance.shrinkByType inst' Types.FailDisk
519

    
520
prop_Instance_setMovable inst m =
521
    Instance.movable inst' == m
522
    where inst' = Instance.setMovable inst m
523

    
524
testInstance =
525
    [ run prop_Instance_creat
526
    , run prop_Instance_setIdx
527
    , run prop_Instance_setName
528
    , run prop_Instance_setAlias
529
    , run prop_Instance_setPri
530
    , run prop_Instance_setSec
531
    , run prop_Instance_setBoth
532
    , run prop_Instance_runStatus_True
533
    , run prop_Instance_runStatus_False
534
    , run prop_Instance_shrinkMG
535
    , run prop_Instance_shrinkMF
536
    , run prop_Instance_shrinkCG
537
    , run prop_Instance_shrinkCF
538
    , run prop_Instance_shrinkDG
539
    , run prop_Instance_shrinkDF
540
    , run prop_Instance_setMovable
541
    ]
542

    
543
-- ** Text backend tests
544

    
545
-- Instance text loader tests
546

    
547
prop_Text_Load_Instance name mem dsk vcpus status
548
                        (NonEmpty pnode) snode
549
                        (NonNegative pdx) (NonNegative sdx) autobal dt =
550
    pnode /= snode && pdx /= sdx ==>
551
    let vcpus_s = show vcpus
552
        dsk_s = show dsk
553
        mem_s = show mem
554
        ndx = if null snode
555
              then [(pnode, pdx)]
556
              else [(pnode, pdx), (snode, sdx)]
557
        nl = Data.Map.fromList ndx
558
        tags = ""
559
        sbal = if autobal then "Y" else "N"
560
        sdt = Types.dtToString dt
561
        inst = Text.loadInst nl
562
               [name, mem_s, dsk_s, vcpus_s, status,
563
                sbal, pnode, snode, sdt, tags]
564
        fail1 = Text.loadInst nl
565
               [name, mem_s, dsk_s, vcpus_s, status,
566
                sbal, pnode, pnode, tags]
567
        _types = ( name::String, mem::Int, dsk::Int
568
                 , vcpus::Int, status::String
569
                 , snode::String
570
                 , autobal::Bool)
571
    in
572
      case inst of
573
        Types.Bad msg -> printTestCase ("Failed to load instance: " ++ msg)
574
                         False
575
        Types.Ok (_, i) -> printTestCase ("Mismatch in some field while\
576
                                          \ loading the instance") $
577
            Instance.name i == name &&
578
            Instance.vcpus i == vcpus &&
579
            Instance.mem i == mem &&
580
            Instance.pNode i == pdx &&
581
            Instance.sNode i == (if null snode
582
                                 then Node.noSecondary
583
                                 else sdx) &&
584
            Instance.autoBalance i == autobal &&
585
            Types.isBad fail1
586

    
587
prop_Text_Load_InstanceFail ktn fields =
588
    length fields /= 10 ==>
589
    case Text.loadInst nl fields of
590
      Types.Ok _ -> printTestCase "Managed to load instance from invalid\
591
                                  \ data" False
592
      Types.Bad msg -> printTestCase ("Unrecognised error message: " ++ msg) $
593
                       "Invalid/incomplete instance data: '" `isPrefixOf` msg
594
    where nl = Data.Map.fromList ktn
595

    
596
prop_Text_Load_Node name tm nm fm td fd tc fo =
597
    let conv v = if v < 0
598
                    then "?"
599
                    else show v
600
        tm_s = conv tm
601
        nm_s = conv nm
602
        fm_s = conv fm
603
        td_s = conv td
604
        fd_s = conv fd
605
        tc_s = conv tc
606
        fo_s = if fo
607
               then "Y"
608
               else "N"
609
        any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
610
        gid = Group.uuid defGroup
611
    in case Text.loadNode defGroupAssoc
612
           [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
613
         Nothing -> False
614
         Just (name', node) ->
615
             if fo || any_broken
616
             then Node.offline node
617
             else Node.name node == name' && name' == name &&
618
                  Node.alias node == name &&
619
                  Node.tMem node == fromIntegral tm &&
620
                  Node.nMem node == nm &&
621
                  Node.fMem node == fm &&
622
                  Node.tDsk node == fromIntegral td &&
623
                  Node.fDsk node == fd &&
624
                  Node.tCpu node == fromIntegral tc
625

    
626
prop_Text_Load_NodeFail fields =
627
    length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
628

    
629
prop_Text_NodeLSIdempotent node =
630
    (Text.loadNode defGroupAssoc.
631
         Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
632
    Just (Node.name n, n)
633
    -- override failN1 to what loadNode returns by default
634
    where n = node { Node.failN1 = True, Node.offline = False }
635

    
636
testText =
637
    [ run prop_Text_Load_Instance
638
    , run prop_Text_Load_InstanceFail
639
    , run prop_Text_Load_Node
640
    , run prop_Text_Load_NodeFail
641
    , run prop_Text_NodeLSIdempotent
642
    ]
643

    
644
-- ** Node tests
645

    
646
prop_Node_setAlias node name =
647
    Node.name newnode == Node.name node &&
648
    Node.alias newnode == name
649
    where _types = (node::Node.Node, name::String)
650
          newnode = Node.setAlias node name
651

    
652
prop_Node_setOffline node status =
653
    Node.offline newnode == status
654
    where newnode = Node.setOffline node status
655

    
656
prop_Node_setXmem node xm =
657
    Node.xMem newnode == xm
658
    where newnode = Node.setXmem node xm
659

    
660
prop_Node_setMcpu node mc =
661
    Node.mCpu newnode == mc
662
    where newnode = Node.setMcpu node mc
663

    
664
-- | Check that an instance add with too high memory or disk will be
665
-- rejected.
666
prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
667
                               not (Node.failN1 node)
668
                               ==>
669
                               case Node.addPri node inst'' of
670
                                 Types.OpFail Types.FailMem -> True
671
                                 _ -> False
672
    where _types = (node::Node.Node, inst::Instance.Instance)
673
          inst' = setInstanceSmallerThanNode node inst
674
          inst'' = inst' { Instance.mem = Instance.mem inst }
675

    
676
prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
677
                               not (Node.failN1 node)
678
                               ==>
679
                               case Node.addPri node inst'' of
680
                                 Types.OpFail Types.FailDisk -> True
681
                                 _ -> False
682
    where _types = (node::Node.Node, inst::Instance.Instance)
683
          inst' = setInstanceSmallerThanNode node inst
684
          inst'' = inst' { Instance.dsk = Instance.dsk inst }
685

    
686
prop_Node_addPriFC node inst (Positive extra) =
687
    not (Node.failN1 node) ==>
688
        case Node.addPri node inst'' of
689
          Types.OpFail Types.FailCPU -> True
690
          _ -> False
691
    where _types = (node::Node.Node, inst::Instance.Instance)
692
          inst' = setInstanceSmallerThanNode node inst
693
          inst'' = inst' { Instance.vcpus = Node.availCpu node + extra }
694

    
695
-- | Check that an instance add with too high memory or disk will be
696
-- rejected.
697
prop_Node_addSec node inst pdx =
698
    (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
699
     Instance.dsk inst >= Node.fDsk node) &&
700
    not (Node.failN1 node)
701
    ==> isFailure (Node.addSec node inst pdx)
702
        where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
703

    
704
-- | Checks for memory reservation changes.
705
prop_Node_rMem inst =
706
    forAll (arbitrary `suchThat` ((> 0) . Node.fMem)) $ \node ->
707
    -- ab = auto_balance, nb = non-auto_balance
708
    -- we use -1 as the primary node of the instance
709
    let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True }
710
        inst_ab = setInstanceSmallerThanNode node inst'
711
        inst_nb = inst_ab { Instance.autoBalance = False }
712
        -- now we have the two instances, identical except the
713
        -- autoBalance attribute
714
        orig_rmem = Node.rMem node
715
        inst_idx = Instance.idx inst_ab
716
        node_add_ab = Node.addSec node inst_ab (-1)
717
        node_add_nb = Node.addSec node inst_nb (-1)
718
        node_del_ab = liftM (flip Node.removeSec inst_ab) node_add_ab
719
        node_del_nb = liftM (flip Node.removeSec inst_nb) node_add_nb
720
    in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
721
         (Types.OpGood a_ab, Types.OpGood a_nb,
722
          Types.OpGood d_ab, Types.OpGood d_nb) ->
723
             printTestCase "Consistency checks failed" $
724
             Node.rMem a_ab >  orig_rmem &&
725
             Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
726
             Node.rMem a_nb == orig_rmem &&
727
             Node.rMem d_ab == orig_rmem &&
728
             Node.rMem d_nb == orig_rmem &&
729
             -- this is not related to rMem, but as good a place to
730
             -- test as any
731
             inst_idx `elem` Node.sList a_ab &&
732
             not (inst_idx `elem` Node.sList d_ab)
733
         x -> printTestCase ("Failed to add/remove instances: " ++ show x)
734
              False
735

    
736
-- | Check mdsk setting.
737
prop_Node_setMdsk node mx =
738
    Node.loDsk node' >= 0 &&
739
    fromIntegral (Node.loDsk node') <= Node.tDsk node &&
740
    Node.availDisk node' >= 0 &&
741
    Node.availDisk node' <= Node.fDsk node' &&
742
    fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
743
    Node.mDsk node' == mx'
744
    where _types = (node::Node.Node, mx::SmallRatio)
745
          node' = Node.setMdsk node mx'
746
          SmallRatio mx' = mx
747

    
748
-- Check tag maps
749
prop_Node_tagMaps_idempotent tags =
750
    Node.delTags (Node.addTags m tags) tags == m
751
    where m = Data.Map.empty
752

    
753
prop_Node_tagMaps_reject tags =
754
    not (null tags) ==>
755
    any (\t -> Node.rejectAddTags m [t]) tags
756
    where m = Node.addTags Data.Map.empty tags
757

    
758
prop_Node_showField node =
759
  forAll (elements Node.defaultFields) $ \ field ->
760
  fst (Node.showHeader field) /= Types.unknownField &&
761
  Node.showField node field /= Types.unknownField
762

    
763

    
764
prop_Node_computeGroups nodes =
765
  let ng = Node.computeGroups nodes
766
      onlyuuid = map fst ng
767
  in length nodes == sum (map (length . snd) ng) &&
768
     all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
769
     length (nub onlyuuid) == length onlyuuid &&
770
     (null nodes || not (null ng))
771

    
772
testNode =
773
    [ run prop_Node_setAlias
774
    , run prop_Node_setOffline
775
    , run prop_Node_setMcpu
776
    , run prop_Node_setXmem
777
    , run prop_Node_addPriFM
778
    , run prop_Node_addPriFD
779
    , run prop_Node_addPriFC
780
    , run prop_Node_addSec
781
    , run prop_Node_rMem
782
    , run prop_Node_setMdsk
783
    , run prop_Node_tagMaps_idempotent
784
    , run prop_Node_tagMaps_reject
785
    , run prop_Node_showField
786
    , run prop_Node_computeGroups
787
    ]
788

    
789

    
790
-- ** Cluster tests
791

    
792
-- | Check that the cluster score is close to zero for a homogeneous
793
-- cluster.
794
prop_Score_Zero node =
795
    forAll (choose (1, 1024)) $ \count ->
796
    (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
797
     (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
798
    let fn = Node.buildPeers node Container.empty
799
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
800
        nl = Container.fromList nlst
801
        score = Cluster.compCV nl
802
    -- we can't say == 0 here as the floating point errors accumulate;
803
    -- this should be much lower than the default score in CLI.hs
804
    in score <= 1e-12
805

    
806
-- | Check that cluster stats are sane.
807
prop_CStats_sane node =
808
    forAll (choose (1, 1024)) $ \count ->
809
    (not (Node.offline node) && not (Node.failN1 node) &&
810
     (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
811
    let fn = Node.buildPeers node Container.empty
812
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
813
        nl = Container.fromList nlst
814
        cstats = Cluster.totalResources nl
815
    in Cluster.csAdsk cstats >= 0 &&
816
       Cluster.csAdsk cstats <= Cluster.csFdsk cstats
817

    
818
-- | Check that one instance is allocated correctly, without
819
-- rebalances needed.
820
prop_ClusterAlloc_sane node inst =
821
    forAll (choose (5, 20)) $ \count ->
822
    not (Node.offline node)
823
            && not (Node.failN1 node)
824
            && Node.availDisk node > 0
825
            && Node.availMem node > 0
826
            ==>
827
    let nl = makeSmallCluster node count
828
        il = Container.empty
829
        inst' = setInstanceSmallerThanNode node inst
830
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
831
       Cluster.tryAlloc nl il inst' of
832
         Types.Bad _ -> False
833
         Types.Ok as ->
834
             case Cluster.asSolutions as of
835
               [] -> False
836
               (xnl, xi, _, cv):[] ->
837
                   let il' = Container.add (Instance.idx xi) xi il
838
                       tbl = Cluster.Table xnl il' cv []
839
                   in not (canBalance tbl True True False)
840
               _ -> False
841

    
842
-- | Checks that on a 2-5 node cluster, we can allocate a random
843
-- instance spec via tiered allocation (whatever the original instance
844
-- spec), on either one or two nodes.
845
prop_ClusterCanTieredAlloc node inst =
846
    forAll (choose (2, 5)) $ \count ->
847
    forAll (choose (1, 2)) $ \rqnodes ->
848
    not (Node.offline node)
849
            && not (Node.failN1 node)
850
            && isNodeBig node 4
851
            ==>
852
    let nl = makeSmallCluster node count
853
        il = Container.empty
854
        allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
855
    in case allocnodes >>= \allocnodes' ->
856
        Cluster.tieredAlloc nl il inst allocnodes' [] [] of
857
         Types.Bad _ -> False
858
         Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
859
                                      IntMap.size il' == length ixes &&
860
                                      length ixes == length cstats
861

    
862
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
863
-- we can also evacuate it.
864
prop_ClusterAllocEvac node inst =
865
    forAll (choose (4, 8)) $ \count ->
866
    not (Node.offline node)
867
            && not (Node.failN1 node)
868
            && isNodeBig node 4
869
            ==>
870
    let nl = makeSmallCluster node count
871
        il = Container.empty
872
        inst' = setInstanceSmallerThanNode node inst
873
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
874
       Cluster.tryAlloc nl il inst' of
875
         Types.Bad _ -> False
876
         Types.Ok as ->
877
             case Cluster.asSolutions as of
878
               [] -> False
879
               (xnl, xi, _, _):[] ->
880
                   let sdx = Instance.sNode xi
881
                       il' = Container.add (Instance.idx xi) xi il
882
                   in case Cluster.tryEvac xnl il' [Instance.idx xi] [sdx] of
883
                        Just _ -> True
884
                        _ -> False
885
               _ -> False
886

    
887
-- | Check that allocating multiple instances on a cluster, then
888
-- adding an empty node, results in a valid rebalance.
889
prop_ClusterAllocBalance =
890
    forAll (genNode (Just 5) (Just 128)) $ \node ->
891
    forAll (choose (3, 5)) $ \count ->
892
    not (Node.offline node) && not (Node.failN1 node) ==>
893
    let nl = makeSmallCluster node count
894
        (hnode, nl') = IntMap.deleteFindMax nl
895
        il = Container.empty
896
        allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
897
        i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
898
    in case allocnodes >>= \allocnodes' ->
899
        Cluster.iterateAlloc nl' il i_templ allocnodes' [] [] of
900
         Types.Bad _ -> False
901
         Types.Ok (_, xnl, il', _, _) ->
902
                   let ynl = Container.add (Node.idx hnode) hnode xnl
903
                       cv = Cluster.compCV ynl
904
                       tbl = Cluster.Table ynl il' cv []
905
                   in canBalance tbl True True False
906

    
907
-- | Checks consistency.
908
prop_ClusterCheckConsistency node inst =
909
  let nl = makeSmallCluster node 3
910
      [node1, node2, node3] = Container.elems nl
911
      node3' = node3 { Node.group = 1 }
912
      nl' = Container.add (Node.idx node3') node3' nl
913
      inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
914
      inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
915
      inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
916
      ccheck = Cluster.findSplitInstances nl' . Container.fromList
917
  in null (ccheck [(0, inst1)]) &&
918
     null (ccheck [(0, inst2)]) &&
919
     (not . null $ ccheck [(0, inst3)])
920

    
921
-- | For now, we only test that we don't lose instances during the split.
922
prop_ClusterSplitCluster node inst =
923
  forAll (choose (0, 100)) $ \icnt ->
924
  let nl = makeSmallCluster node 2
925
      (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
926
                   (nl, Container.empty) [1..icnt]
927
      gni = Cluster.splitCluster nl' il'
928
  in sum (map (Container.size . snd . snd) gni) == icnt &&
929
     all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
930
                                 (Container.elems nl'')) gni
931

    
932
testCluster =
933
    [ run prop_Score_Zero
934
    , run prop_CStats_sane
935
    , run prop_ClusterAlloc_sane
936
    , run prop_ClusterCanTieredAlloc
937
    , run prop_ClusterAllocEvac
938
    , run prop_ClusterAllocBalance
939
    , run prop_ClusterCheckConsistency
940
    , run prop_ClusterSplitCluster
941
    ]
942

    
943
-- ** OpCodes tests
944

    
945
-- | Check that opcode serialization is idempotent.
946
prop_OpCodes_serialization op =
947
  case J.readJSON (J.showJSON op) of
948
    J.Error _ -> False
949
    J.Ok op' -> op == op'
950
  where _types = op::OpCodes.OpCode
951

    
952
testOpCodes =
953
  [ run prop_OpCodes_serialization
954
  ]
955

    
956
-- ** Jobs tests
957

    
958
-- | Check that (queued) job\/opcode status serialization is idempotent.
959
prop_OpStatus_serialization os =
960
  case J.readJSON (J.showJSON os) of
961
    J.Error _ -> False
962
    J.Ok os' -> os == os'
963
  where _types = os::Jobs.OpStatus
964

    
965
prop_JobStatus_serialization js =
966
  case J.readJSON (J.showJSON js) of
967
    J.Error _ -> False
968
    J.Ok js' -> js == js'
969
  where _types = js::Jobs.JobStatus
970

    
971
testJobs =
972
  [ run prop_OpStatus_serialization
973
  , run prop_JobStatus_serialization
974
  ]
975

    
976
-- ** Loader tests
977

    
978
prop_Loader_lookupNode ktn inst node =
979
  Loader.lookupNode nl inst node == Data.Map.lookup node nl
980
  where nl = Data.Map.fromList ktn
981

    
982
prop_Loader_lookupInstance kti inst =
983
  Loader.lookupInstance il inst == Data.Map.lookup inst il
984
  where il = Data.Map.fromList kti
985

    
986
prop_Loader_assignIndices nodes =
987
  Data.Map.size nassoc == length nodes &&
988
  Container.size kt == length nodes &&
989
  (if not (null nodes)
990
   then maximum (IntMap.keys kt) == length nodes - 1
991
   else True)
992
  where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
993

    
994
-- | Checks that the number of primary instances recorded on the nodes
995
-- is zero.
996
prop_Loader_mergeData ns =
997
  let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
998
  in case Loader.mergeData [] [] [] []
999
         (Loader.emptyCluster {Loader.cdNodes = na}) of
1000
    Types.Bad _ -> False
1001
    Types.Ok (Loader.ClusterData _ nl il _) ->
1002
      let nodes = Container.elems nl
1003
          instances = Container.elems il
1004
      in (sum . map (length . Node.pList)) nodes == 0 &&
1005
         null instances
1006

    
1007
-- | Check that compareNameComponent on equal strings works.
1008
prop_Loader_compareNameComponent_equal :: String -> Bool
1009
prop_Loader_compareNameComponent_equal s =
1010
  Loader.compareNameComponent s s ==
1011
    Loader.LookupResult Loader.ExactMatch s
1012

    
1013
-- | Check that compareNameComponent on prefix strings works.
1014
prop_Loader_compareNameComponent_prefix :: NonEmptyList Char -> String -> Bool
1015
prop_Loader_compareNameComponent_prefix (NonEmpty s1) s2 =
1016
  Loader.compareNameComponent (s1 ++ "." ++ s2) s1 ==
1017
    Loader.LookupResult Loader.PartialMatch s1
1018

    
1019
testLoader =
1020
  [ run prop_Loader_lookupNode
1021
  , run prop_Loader_lookupInstance
1022
  , run prop_Loader_assignIndices
1023
  , run prop_Loader_mergeData
1024
  , run prop_Loader_compareNameComponent_equal
1025
  , run prop_Loader_compareNameComponent_prefix
1026
  ]
1027

    
1028
-- ** Types tests
1029

    
1030
prop_AllocPolicy_serialisation apol =
1031
    case Types.apolFromString (Types.apolToString apol) of
1032
      Types.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
1033
                    p == apol
1034
      Types.Bad s -> printTestCase ("failed to deserialise: " ++ s) False
1035

    
1036
prop_DiskTemplate_serialisation dt =
1037
    case Types.dtFromString (Types.dtToString dt) of
1038
      Types.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
1039
                    p == dt
1040
      Types.Bad s -> printTestCase ("failed to deserialise: " ++ s) False
1041

    
1042
testTypes =
1043
    [ run prop_AllocPolicy_serialisation
1044
    , run prop_DiskTemplate_serialisation
1045
    ]