Statistics
| Branch: | Tag: | Revision:

root / htools / Ganeti / HTools / QC.hs @ bfe6c954

History | View | Annotate | Download (36.1 kB)

1
{-| Unittests for ganeti-htools.
2

    
3
-}
4

    
5
{-
6

    
7
Copyright (C) 2009, 2010, 2011 Google Inc.
8

    
9
This program is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 2 of the License, or
12
(at your option) any later version.
13

    
14
This program is distributed in the hope that it will be useful, but
15
WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
General Public License for more details.
18

    
19
You should have received a copy of the GNU General Public License
20
along with this program; if not, write to the Free Software
21
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22
02110-1301, USA.
23

    
24
-}
25

    
26
module Ganeti.HTools.QC
27
    ( testUtils
28
    , testPeerMap
29
    , testContainer
30
    , testInstance
31
    , testNode
32
    , testText
33
    , testOpCodes
34
    , testJobs
35
    , testCluster
36
    , testLoader
37
    , testTypes
38
    ) where
39

    
40
import Test.QuickCheck
41
import Data.List (findIndex, intercalate, nub, isPrefixOf)
42
import Data.Maybe
43
import Control.Monad
44
import qualified Text.JSON as J
45
import qualified Data.Map
46
import qualified Data.IntMap as IntMap
47
import qualified Ganeti.OpCodes as OpCodes
48
import qualified Ganeti.Jobs as Jobs
49
import qualified Ganeti.Luxi
50
import qualified Ganeti.HTools.CLI as CLI
51
import qualified Ganeti.HTools.Cluster as Cluster
52
import qualified Ganeti.HTools.Container as Container
53
import qualified Ganeti.HTools.ExtLoader
54
import qualified Ganeti.HTools.IAlloc as IAlloc
55
import qualified Ganeti.HTools.Instance as Instance
56
import qualified Ganeti.HTools.Loader as Loader
57
import qualified Ganeti.HTools.Luxi
58
import qualified Ganeti.HTools.Node as Node
59
import qualified Ganeti.HTools.Group as Group
60
import qualified Ganeti.HTools.PeerMap as PeerMap
61
import qualified Ganeti.HTools.Rapi
62
import qualified Ganeti.HTools.Simu
63
import qualified Ganeti.HTools.Text as Text
64
import qualified Ganeti.HTools.Types as Types
65
import qualified Ganeti.HTools.Utils as Utils
66
import qualified Ganeti.HTools.Version
67
import qualified Ganeti.Constants as C
68

    
69
run :: Testable prop => prop -> Args -> IO Result
70
run = flip quickCheckWithResult
71

    
72
-- * Constants
73

    
74
-- | Maximum memory (1TiB, somewhat random value).
75
maxMem :: Int
76
maxMem = 1024 * 1024
77

    
78
-- | Maximum disk (8TiB, somewhat random value).
79
maxDsk :: Int
80
maxDsk = 1024 * 1024 * 8
81

    
82
-- | Max CPUs (1024, somewhat random value).
83
maxCpu :: Int
84
maxCpu = 1024
85

    
86
defGroup :: Group.Group
87
defGroup = flip Group.setIdx 0 $
88
               Group.create "default" Utils.defaultGroupID
89
                    Types.AllocPreferred
90

    
91
defGroupList :: Group.List
92
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
93

    
94
defGroupAssoc :: Data.Map.Map String Types.Gdx
95
defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
96

    
97
-- * Helper functions
98

    
99
-- | Simple checker for whether OpResult is fail or pass.
100
isFailure :: Types.OpResult a -> Bool
101
isFailure (Types.OpFail _) = True
102
isFailure _ = False
103

    
104
-- | Update an instance to be smaller than a node.
105
setInstanceSmallerThanNode node inst =
106
    inst { Instance.mem = Node.availMem node `div` 2
107
         , Instance.dsk = Node.availDisk node `div` 2
108
         , Instance.vcpus = Node.availCpu node `div` 2
109
         }
110

    
111
-- | Create an instance given its spec.
112
createInstance mem dsk vcpus =
113
    Instance.create "inst-unnamed" mem dsk vcpus "running" [] True (-1) (-1)
114
                    Types.DTDrbd8
115

    
116
-- | Create a small cluster by repeating a node spec.
117
makeSmallCluster :: Node.Node -> Int -> Node.List
118
makeSmallCluster node count =
119
    let fn = Node.buildPeers node Container.empty
120
        namelst = map (\n -> (Node.name n, n)) (replicate count fn)
121
        (_, nlst) = Loader.assignIndices namelst
122
    in nlst
123

    
124
-- | Checks if a node is "big" enough.
125
isNodeBig :: Node.Node -> Int -> Bool
126
isNodeBig node size = Node.availDisk node > size * Types.unitDsk
127
                      && Node.availMem node > size * Types.unitMem
128
                      && Node.availCpu node > size * Types.unitCpu
129

    
130
canBalance :: Cluster.Table -> Bool -> Bool -> Bool -> Bool
131
canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac 0 0
132

    
133
-- | Assigns a new fresh instance to a cluster; this is not
134
-- allocation, so no resource checks are done.
135
assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
136
                  Types.Idx -> Types.Idx ->
137
                  (Node.List, Instance.List)
138
assignInstance nl il inst pdx sdx =
139
  let pnode = Container.find pdx nl
140
      snode = Container.find sdx nl
141
      maxiidx = if Container.null il
142
                then 0
143
                else fst (Container.findMax il) + 1
144
      inst' = inst { Instance.idx = maxiidx,
145
                     Instance.pNode = pdx, Instance.sNode = sdx }
146
      pnode' = Node.setPri pnode inst'
147
      snode' = Node.setSec snode inst'
148
      nl' = Container.addTwo pdx pnode' sdx snode' nl
149
      il' = Container.add maxiidx inst' il
150
  in (nl', il')
151

    
152
-- * Arbitrary instances
153

    
154
-- | Defines a DNS name.
155
newtype DNSChar = DNSChar { dnsGetChar::Char }
156

    
157
instance Arbitrary DNSChar where
158
    arbitrary = do
159
      x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
160
      return (DNSChar x)
161

    
162
getName :: Gen String
163
getName = do
164
  n <- choose (1, 64)
165
  dn <- vector n::Gen [DNSChar]
166
  return (map dnsGetChar dn)
167

    
168

    
169
getFQDN :: Gen String
170
getFQDN = do
171
  felem <- getName
172
  ncomps <- choose (1, 4)
173
  frest <- vector ncomps::Gen [[DNSChar]]
174
  let frest' = map (map dnsGetChar) frest
175
  return (felem ++ "." ++ intercalate "." frest')
176

    
177
-- let's generate a random instance
178
instance Arbitrary Instance.Instance where
179
    arbitrary = do
180
      name <- getFQDN
181
      mem <- choose (0, maxMem)
182
      dsk <- choose (0, maxDsk)
183
      run_st <- elements [ C.inststErrorup
184
                         , C.inststErrordown
185
                         , C.inststAdmindown
186
                         , C.inststNodedown
187
                         , C.inststNodeoffline
188
                         , C.inststRunning
189
                         , "no_such_status1"
190
                         , "no_such_status2"]
191
      pn <- arbitrary
192
      sn <- arbitrary
193
      vcpus <- choose (0, maxCpu)
194
      return $ Instance.create name mem dsk vcpus run_st [] True pn sn
195
                               Types.DTDrbd8
196

    
197
-- | Generas an arbitrary node based on sizing information.
198
genNode :: Maybe Int -- ^ Minimum node size in terms of units
199
        -> Maybe Int -- ^ Maximum node size (when Nothing, bounded
200
                     -- just by the max... constants)
201
        -> Gen Node.Node
202
genNode min_multiplier max_multiplier = do
203
  let (base_mem, base_dsk, base_cpu) =
204
          case min_multiplier of
205
            Just mm -> (mm * Types.unitMem,
206
                        mm * Types.unitDsk,
207
                        mm * Types.unitCpu)
208
            Nothing -> (0, 0, 0)
209
      (top_mem, top_dsk, top_cpu)  =
210
          case max_multiplier of
211
            Just mm -> (mm * Types.unitMem,
212
                        mm * Types.unitDsk,
213
                        mm * Types.unitCpu)
214
            Nothing -> (maxMem, maxDsk, maxCpu)
215
  name  <- getFQDN
216
  mem_t <- choose (base_mem, top_mem)
217
  mem_f <- choose (base_mem, mem_t)
218
  mem_n <- choose (0, mem_t - mem_f)
219
  dsk_t <- choose (base_dsk, top_dsk)
220
  dsk_f <- choose (base_dsk, dsk_t)
221
  cpu_t <- choose (base_cpu, top_cpu)
222
  offl  <- arbitrary
223
  let n = Node.create name (fromIntegral mem_t) mem_n mem_f
224
          (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl 0
225
  return $ Node.buildPeers n Container.empty
226

    
227
-- and a random node
228
instance Arbitrary Node.Node where
229
    arbitrary = genNode Nothing Nothing
230

    
231
-- replace disks
232
instance Arbitrary OpCodes.ReplaceDisksMode where
233
  arbitrary = elements [ OpCodes.ReplaceOnPrimary
234
                       , OpCodes.ReplaceOnSecondary
235
                       , OpCodes.ReplaceNewSecondary
236
                       , OpCodes.ReplaceAuto
237
                       ]
238

    
239
instance Arbitrary OpCodes.OpCode where
240
  arbitrary = do
241
    op_id <- elements [ "OP_TEST_DELAY"
242
                      , "OP_INSTANCE_REPLACE_DISKS"
243
                      , "OP_INSTANCE_FAILOVER"
244
                      , "OP_INSTANCE_MIGRATE"
245
                      ]
246
    (case op_id of
247
        "OP_TEST_DELAY" ->
248
          liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
249
        "OP_INSTANCE_REPLACE_DISKS" ->
250
          liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
251
          arbitrary arbitrary arbitrary
252
        "OP_INSTANCE_FAILOVER" ->
253
          liftM2 OpCodes.OpInstanceFailover arbitrary arbitrary
254
        "OP_INSTANCE_MIGRATE" ->
255
          liftM4 OpCodes.OpInstanceMigrate arbitrary arbitrary arbitrary
256
          arbitrary
257
        _ -> fail "Wrong opcode")
258

    
259
instance Arbitrary Jobs.OpStatus where
260
  arbitrary = elements [minBound..maxBound]
261

    
262
instance Arbitrary Jobs.JobStatus where
263
  arbitrary = elements [minBound..maxBound]
264

    
265
newtype SmallRatio = SmallRatio Double deriving Show
266
instance Arbitrary SmallRatio where
267
    arbitrary = do
268
      v <- choose (0, 1)
269
      return $ SmallRatio v
270

    
271
instance Arbitrary Types.AllocPolicy where
272
  arbitrary = elements [minBound..maxBound]
273

    
274
instance Arbitrary Types.DiskTemplate where
275
  arbitrary = elements [minBound..maxBound]
276

    
277
-- * Actual tests
278

    
279
-- ** Utils tests
280

    
281
-- | If the list is not just an empty element, and if the elements do
282
-- not contain commas, then join+split should be idempotent.
283
prop_Utils_commaJoinSplit =
284
    forAll (arbitrary `suchThat`
285
            (\l -> l /= [""] && all (not . elem ',') l )) $ \lst ->
286
    Utils.sepSplit ',' (Utils.commaJoin lst) == lst
287

    
288
-- | Split and join should always be idempotent.
289
prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
290

    
291
-- | fromObjWithDefault, we test using the Maybe monad and an integer
292
-- value.
293
prop_Utils_fromObjWithDefault def_value random_key =
294
    -- a missing key will be returned with the default
295
    Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
296
    -- a found key will be returned as is, not with default
297
    Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
298
         random_key (def_value+1) == Just def_value
299
        where _types = def_value :: Integer
300

    
301
-- | Test that functional if' behaves like the syntactic sugar if.
302
prop_Utils_if'if :: Bool -> Int -> Int -> Bool
303
prop_Utils_if'if cnd a b = Utils.if' cnd a b == if cnd then a else b
304

    
305
-- | Test select
306
prop_Utils_select :: Int   -- ^ Default result
307
                  -> [Int] -- ^ List of False values
308
                  -> [Int] -- ^ List of True values
309
                  -> Bool  -- ^ Try undef result (if a true value exists)
310
                  -> Bool  -- ^ Try undef true value (if a true value exists)
311
                  -> Bool  -- ^ Try undef false value (if a true value exists)
312
                  -> Bool  -- ^ Test result
313
prop_Utils_select di lst1 lst2 rundefd rundeft rundeff =
314
  Utils.select def cndlist == expectedresult
315
  where has_nondef_result = not (null lst2)
316
        try_undefd = has_nondef_result && rundefd
317
        try_undeft = has_nondef_result && rundeft
318
        try_undeff = has_nondef_result && rundeff
319
        def = Utils.if' try_undefd undefined di
320
        utl = Utils.if' try_undeft [(True, undefined)] []
321
        ufl = Utils.if' try_undeff [(False, undefined)] []
322
        expectedresult = Utils.if' has_nondef_result (head lst2) def
323
        flist = map (\e -> (False, e)) lst1
324
        tlist = map (\e -> (True, e)) lst2
325
        cndlist = flist ++ tlist ++ utl ++ ufl
326

    
327
-- | Test list for the Utils module.
328
testUtils =
329
  [ run prop_Utils_commaJoinSplit
330
  , run prop_Utils_commaSplitJoin
331
  , run prop_Utils_fromObjWithDefault
332
  , run prop_Utils_if'if
333
  , run prop_Utils_select
334
  ]
335

    
336
-- ** PeerMap tests
337

    
338
-- | Make sure add is idempotent.
339
prop_PeerMap_addIdempotent pmap key em =
340
    fn puniq == fn (fn puniq)
341
    where _types = (pmap::PeerMap.PeerMap,
342
                    key::PeerMap.Key, em::PeerMap.Elem)
343
          fn = PeerMap.add key em
344
          puniq = PeerMap.accumArray const pmap
345

    
346
-- | Make sure remove is idempotent.
347
prop_PeerMap_removeIdempotent pmap key =
348
    fn puniq == fn (fn puniq)
349
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
350
          fn = PeerMap.remove key
351
          puniq = PeerMap.accumArray const pmap
352

    
353
-- | Make sure a missing item returns 0.
354
prop_PeerMap_findMissing pmap key =
355
    PeerMap.find key (PeerMap.remove key puniq) == 0
356
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
357
          puniq = PeerMap.accumArray const pmap
358

    
359
-- | Make sure an added item is found.
360
prop_PeerMap_addFind pmap key em =
361
    PeerMap.find key (PeerMap.add key em puniq) == em
362
    where _types = (pmap::PeerMap.PeerMap,
363
                    key::PeerMap.Key, em::PeerMap.Elem)
364
          puniq = PeerMap.accumArray const pmap
365

    
366
-- | Manual check that maxElem returns the maximum indeed, or 0 for null.
367
prop_PeerMap_maxElem pmap =
368
    PeerMap.maxElem puniq == if null puniq then 0
369
                             else (maximum . snd . unzip) puniq
370
    where _types = pmap::PeerMap.PeerMap
371
          puniq = PeerMap.accumArray const pmap
372

    
373
-- | List of tests for the PeerMap module.
374
testPeerMap =
375
    [ run prop_PeerMap_addIdempotent
376
    , run prop_PeerMap_removeIdempotent
377
    , run prop_PeerMap_maxElem
378
    , run prop_PeerMap_addFind
379
    , run prop_PeerMap_findMissing
380
    ]
381

    
382
-- ** Container tests
383

    
384
prop_Container_addTwo cdata i1 i2 =
385
    fn i1 i2 cont == fn i2 i1 cont &&
386
       fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
387
    where _types = (cdata::[Int],
388
                    i1::Int, i2::Int)
389
          cont = foldl (\c x -> Container.add x x c) Container.empty cdata
390
          fn x1 x2 = Container.addTwo x1 x1 x2 x2
391

    
392
prop_Container_nameOf node =
393
  let nl = makeSmallCluster node 1
394
      fnode = head (Container.elems nl)
395
  in Container.nameOf nl (Node.idx fnode) == Node.name fnode
396

    
397
-- | We test that in a cluster, given a random node, we can find it by
398
-- its name and alias, as long as all names and aliases are unique,
399
-- and that we fail to find a non-existing name.
400
prop_Container_findByName node othername =
401
  forAll (choose (1, 20)) $ \ cnt ->
402
  forAll (choose (0, cnt - 1)) $ \ fidx ->
403
  forAll (vector cnt) $ \ names ->
404
  (length . nub) (map fst names ++ map snd names) ==
405
  length names * 2 &&
406
  not (othername `elem` (map fst names ++ map snd names)) ==>
407
  let nl = makeSmallCluster node cnt
408
      nodes = Container.elems nl
409
      nodes' = map (\((name, alias), nn) -> (Node.idx nn,
410
                                             nn { Node.name = name,
411
                                                  Node.alias = alias }))
412
               $ zip names nodes
413
      nl' = Container.fromList nodes'
414
      target = snd (nodes' !! fidx)
415
  in Container.findByName nl' (Node.name target) == Just target &&
416
     Container.findByName nl' (Node.alias target) == Just target &&
417
     Container.findByName nl' othername == Nothing
418

    
419
testContainer =
420
    [ run prop_Container_addTwo
421
    , run prop_Container_nameOf
422
    , run prop_Container_findByName
423
    ]
424

    
425
-- ** Instance tests
426

    
427
-- Simple instance tests, we only have setter/getters
428

    
429
prop_Instance_creat inst =
430
    Instance.name inst == Instance.alias inst
431

    
432
prop_Instance_setIdx inst idx =
433
    Instance.idx (Instance.setIdx inst idx) == idx
434
    where _types = (inst::Instance.Instance, idx::Types.Idx)
435

    
436
prop_Instance_setName inst name =
437
    Instance.name newinst == name &&
438
    Instance.alias newinst == name
439
    where _types = (inst::Instance.Instance, name::String)
440
          newinst = Instance.setName inst name
441

    
442
prop_Instance_setAlias inst name =
443
    Instance.name newinst == Instance.name inst &&
444
    Instance.alias newinst == name
445
    where _types = (inst::Instance.Instance, name::String)
446
          newinst = Instance.setAlias inst name
447

    
448
prop_Instance_setPri inst pdx =
449
    Instance.pNode (Instance.setPri inst pdx) == pdx
450
    where _types = (inst::Instance.Instance, pdx::Types.Ndx)
451

    
452
prop_Instance_setSec inst sdx =
453
    Instance.sNode (Instance.setSec inst sdx) == sdx
454
    where _types = (inst::Instance.Instance, sdx::Types.Ndx)
455

    
456
prop_Instance_setBoth inst pdx sdx =
457
    Instance.pNode si == pdx && Instance.sNode si == sdx
458
    where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
459
          si = Instance.setBoth inst pdx sdx
460

    
461
prop_Instance_runStatus_True =
462
    forAll (arbitrary `suchThat`
463
            ((`elem` Instance.runningStates) . Instance.runSt))
464
    Instance.running
465

    
466
prop_Instance_runStatus_False inst =
467
    let run_st = Instance.running inst
468
        run_tx = Instance.runSt inst
469
    in
470
      run_tx `notElem` Instance.runningStates ==> not run_st
471

    
472
prop_Instance_shrinkMG inst =
473
    Instance.mem inst >= 2 * Types.unitMem ==>
474
        case Instance.shrinkByType inst Types.FailMem of
475
          Types.Ok inst' ->
476
              Instance.mem inst' == Instance.mem inst - Types.unitMem
477
          _ -> False
478

    
479
prop_Instance_shrinkMF inst =
480
    forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
481
    let inst' = inst { Instance.mem = mem}
482
    in Types.isBad $ Instance.shrinkByType inst' Types.FailMem
483

    
484
prop_Instance_shrinkCG inst =
485
    Instance.vcpus inst >= 2 * Types.unitCpu ==>
486
        case Instance.shrinkByType inst Types.FailCPU of
487
          Types.Ok inst' ->
488
              Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
489
          _ -> False
490

    
491
prop_Instance_shrinkCF inst =
492
    forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
493
    let inst' = inst { Instance.vcpus = vcpus }
494
    in Types.isBad $ Instance.shrinkByType inst' Types.FailCPU
495

    
496
prop_Instance_shrinkDG inst =
497
    Instance.dsk inst >= 2 * Types.unitDsk ==>
498
        case Instance.shrinkByType inst Types.FailDisk of
499
          Types.Ok inst' ->
500
              Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
501
          _ -> False
502

    
503
prop_Instance_shrinkDF inst =
504
    forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
505
    let inst' = inst { Instance.dsk = dsk }
506
    in Types.isBad $ Instance.shrinkByType inst' Types.FailDisk
507

    
508
prop_Instance_setMovable inst m =
509
    Instance.movable inst' == m
510
    where inst' = Instance.setMovable inst m
511

    
512
testInstance =
513
    [ run prop_Instance_creat
514
    , run prop_Instance_setIdx
515
    , run prop_Instance_setName
516
    , run prop_Instance_setAlias
517
    , run prop_Instance_setPri
518
    , run prop_Instance_setSec
519
    , run prop_Instance_setBoth
520
    , run prop_Instance_runStatus_True
521
    , run prop_Instance_runStatus_False
522
    , run prop_Instance_shrinkMG
523
    , run prop_Instance_shrinkMF
524
    , run prop_Instance_shrinkCG
525
    , run prop_Instance_shrinkCF
526
    , run prop_Instance_shrinkDG
527
    , run prop_Instance_shrinkDF
528
    , run prop_Instance_setMovable
529
    ]
530

    
531
-- ** Text backend tests
532

    
533
-- Instance text loader tests
534

    
535
prop_Text_Load_Instance name mem dsk vcpus status
536
                        (NonEmpty pnode) snode
537
                        (NonNegative pdx) (NonNegative sdx) autobal dt =
538
    pnode /= snode && pdx /= sdx ==>
539
    let vcpus_s = show vcpus
540
        dsk_s = show dsk
541
        mem_s = show mem
542
        ndx = if null snode
543
              then [(pnode, pdx)]
544
              else [(pnode, pdx), (snode, sdx)]
545
        nl = Data.Map.fromList ndx
546
        tags = ""
547
        sbal = if autobal then "Y" else "N"
548
        sdt = Types.dtToString dt
549
        inst = Text.loadInst nl
550
               [name, mem_s, dsk_s, vcpus_s, status,
551
                sbal, pnode, snode, sdt, tags]
552
        fail1 = Text.loadInst nl
553
               [name, mem_s, dsk_s, vcpus_s, status,
554
                sbal, pnode, pnode, tags]
555
        _types = ( name::String, mem::Int, dsk::Int
556
                 , vcpus::Int, status::String
557
                 , snode::String
558
                 , autobal::Bool)
559
    in
560
      case inst of
561
        Types.Bad msg -> printTestCase ("Failed to load instance: " ++ msg)
562
                         False
563
        Types.Ok (_, i) -> printTestCase ("Mismatch in some field while\
564
                                          \ loading the instance") $
565
            Instance.name i == name &&
566
            Instance.vcpus i == vcpus &&
567
            Instance.mem i == mem &&
568
            Instance.pNode i == pdx &&
569
            Instance.sNode i == (if null snode
570
                                 then Node.noSecondary
571
                                 else sdx) &&
572
            Instance.autoBalance i == autobal &&
573
            Types.isBad fail1
574

    
575
prop_Text_Load_InstanceFail ktn fields =
576
    length fields /= 10 ==>
577
    case Text.loadInst nl fields of
578
      Types.Ok _ -> printTestCase "Managed to load instance from invalid\
579
                                  \ data" False
580
      Types.Bad msg -> printTestCase ("Unrecognised error message: " ++ msg) $
581
                       "Invalid/incomplete instance data: '" `isPrefixOf` msg
582
    where nl = Data.Map.fromList ktn
583

    
584
prop_Text_Load_Node name tm nm fm td fd tc fo =
585
    let conv v = if v < 0
586
                    then "?"
587
                    else show v
588
        tm_s = conv tm
589
        nm_s = conv nm
590
        fm_s = conv fm
591
        td_s = conv td
592
        fd_s = conv fd
593
        tc_s = conv tc
594
        fo_s = if fo
595
               then "Y"
596
               else "N"
597
        any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
598
        gid = Group.uuid defGroup
599
    in case Text.loadNode defGroupAssoc
600
           [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
601
         Nothing -> False
602
         Just (name', node) ->
603
             if fo || any_broken
604
             then Node.offline node
605
             else Node.name node == name' && name' == name &&
606
                  Node.alias node == name &&
607
                  Node.tMem node == fromIntegral tm &&
608
                  Node.nMem node == nm &&
609
                  Node.fMem node == fm &&
610
                  Node.tDsk node == fromIntegral td &&
611
                  Node.fDsk node == fd &&
612
                  Node.tCpu node == fromIntegral tc
613

    
614
prop_Text_Load_NodeFail fields =
615
    length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
616

    
617
prop_Text_NodeLSIdempotent node =
618
    (Text.loadNode defGroupAssoc.
619
         Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
620
    Just (Node.name n, n)
621
    -- override failN1 to what loadNode returns by default
622
    where n = node { Node.failN1 = True, Node.offline = False }
623

    
624
testText =
625
    [ run prop_Text_Load_Instance
626
    , run prop_Text_Load_InstanceFail
627
    , run prop_Text_Load_Node
628
    , run prop_Text_Load_NodeFail
629
    , run prop_Text_NodeLSIdempotent
630
    ]
631

    
632
-- ** Node tests
633

    
634
prop_Node_setAlias node name =
635
    Node.name newnode == Node.name node &&
636
    Node.alias newnode == name
637
    where _types = (node::Node.Node, name::String)
638
          newnode = Node.setAlias node name
639

    
640
prop_Node_setOffline node status =
641
    Node.offline newnode == status
642
    where newnode = Node.setOffline node status
643

    
644
prop_Node_setXmem node xm =
645
    Node.xMem newnode == xm
646
    where newnode = Node.setXmem node xm
647

    
648
prop_Node_setMcpu node mc =
649
    Node.mCpu newnode == mc
650
    where newnode = Node.setMcpu node mc
651

    
652
-- | Check that an instance add with too high memory or disk will be
653
-- rejected.
654
prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
655
                               not (Node.failN1 node)
656
                               ==>
657
                               case Node.addPri node inst'' of
658
                                 Types.OpFail Types.FailMem -> True
659
                                 _ -> False
660
    where _types = (node::Node.Node, inst::Instance.Instance)
661
          inst' = setInstanceSmallerThanNode node inst
662
          inst'' = inst' { Instance.mem = Instance.mem inst }
663

    
664
prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
665
                               not (Node.failN1 node)
666
                               ==>
667
                               case Node.addPri node inst'' of
668
                                 Types.OpFail Types.FailDisk -> True
669
                                 _ -> False
670
    where _types = (node::Node.Node, inst::Instance.Instance)
671
          inst' = setInstanceSmallerThanNode node inst
672
          inst'' = inst' { Instance.dsk = Instance.dsk inst }
673

    
674
prop_Node_addPriFC node inst (Positive extra) =
675
    not (Node.failN1 node) ==>
676
        case Node.addPri node inst'' of
677
          Types.OpFail Types.FailCPU -> True
678
          _ -> False
679
    where _types = (node::Node.Node, inst::Instance.Instance)
680
          inst' = setInstanceSmallerThanNode node inst
681
          inst'' = inst' { Instance.vcpus = Node.availCpu node + extra }
682

    
683
-- | Check that an instance add with too high memory or disk will be
684
-- rejected.
685
prop_Node_addSec node inst pdx =
686
    (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
687
     Instance.dsk inst >= Node.fDsk node) &&
688
    not (Node.failN1 node)
689
    ==> isFailure (Node.addSec node inst pdx)
690
        where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
691

    
692
-- | Checks for memory reservation changes.
693
prop_Node_rMem inst =
694
    forAll (arbitrary `suchThat` ((> 0) . Node.fMem)) $ \node ->
695
    -- ab = auto_balance, nb = non-auto_balance
696
    -- we use -1 as the primary node of the instance
697
    let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True }
698
        inst_ab = setInstanceSmallerThanNode node inst'
699
        inst_nb = inst_ab { Instance.autoBalance = False }
700
        -- now we have the two instances, identical except the
701
        -- autoBalance attribute
702
        orig_rmem = Node.rMem node
703
        inst_idx = Instance.idx inst_ab
704
        node_add_ab = Node.addSec node inst_ab (-1)
705
        node_add_nb = Node.addSec node inst_nb (-1)
706
        node_del_ab = liftM (flip Node.removeSec inst_ab) node_add_ab
707
        node_del_nb = liftM (flip Node.removeSec inst_nb) node_add_nb
708
    in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
709
         (Types.OpGood a_ab, Types.OpGood a_nb,
710
          Types.OpGood d_ab, Types.OpGood d_nb) ->
711
             printTestCase "Consistency checks failed" $
712
             Node.rMem a_ab >  orig_rmem &&
713
             Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
714
             Node.rMem a_nb == orig_rmem &&
715
             Node.rMem d_ab == orig_rmem &&
716
             Node.rMem d_nb == orig_rmem &&
717
             -- this is not related to rMem, but as good a place to
718
             -- test as any
719
             inst_idx `elem` Node.sList a_ab &&
720
             not (inst_idx `elem` Node.sList d_ab)
721
         x -> printTestCase ("Failed to add/remove instances: " ++ show x)
722
              False
723

    
724
-- | Check mdsk setting.
725
prop_Node_setMdsk node mx =
726
    Node.loDsk node' >= 0 &&
727
    fromIntegral (Node.loDsk node') <= Node.tDsk node &&
728
    Node.availDisk node' >= 0 &&
729
    Node.availDisk node' <= Node.fDsk node' &&
730
    fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
731
    Node.mDsk node' == mx'
732
    where _types = (node::Node.Node, mx::SmallRatio)
733
          node' = Node.setMdsk node mx'
734
          SmallRatio mx' = mx
735

    
736
-- Check tag maps
737
prop_Node_tagMaps_idempotent tags =
738
    Node.delTags (Node.addTags m tags) tags == m
739
    where m = Data.Map.empty
740

    
741
prop_Node_tagMaps_reject tags =
742
    not (null tags) ==>
743
    any (\t -> Node.rejectAddTags m [t]) tags
744
    where m = Node.addTags Data.Map.empty tags
745

    
746
prop_Node_showField node =
747
  forAll (elements Node.defaultFields) $ \ field ->
748
  fst (Node.showHeader field) /= Types.unknownField &&
749
  Node.showField node field /= Types.unknownField
750

    
751

    
752
prop_Node_computeGroups nodes =
753
  let ng = Node.computeGroups nodes
754
      onlyuuid = map fst ng
755
  in length nodes == sum (map (length . snd) ng) &&
756
     all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
757
     length (nub onlyuuid) == length onlyuuid &&
758
     (null nodes || not (null ng))
759

    
760
testNode =
761
    [ run prop_Node_setAlias
762
    , run prop_Node_setOffline
763
    , run prop_Node_setMcpu
764
    , run prop_Node_setXmem
765
    , run prop_Node_addPriFM
766
    , run prop_Node_addPriFD
767
    , run prop_Node_addPriFC
768
    , run prop_Node_addSec
769
    , run prop_Node_rMem
770
    , run prop_Node_setMdsk
771
    , run prop_Node_tagMaps_idempotent
772
    , run prop_Node_tagMaps_reject
773
    , run prop_Node_showField
774
    , run prop_Node_computeGroups
775
    ]
776

    
777

    
778
-- ** Cluster tests
779

    
780
-- | Check that the cluster score is close to zero for a homogeneous
781
-- cluster.
782
prop_Score_Zero node =
783
    forAll (choose (1, 1024)) $ \count ->
784
    (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
785
     (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
786
    let fn = Node.buildPeers node Container.empty
787
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
788
        nl = Container.fromList nlst
789
        score = Cluster.compCV nl
790
    -- we can't say == 0 here as the floating point errors accumulate;
791
    -- this should be much lower than the default score in CLI.hs
792
    in score <= 1e-12
793

    
794
-- | Check that cluster stats are sane.
795
prop_CStats_sane node =
796
    forAll (choose (1, 1024)) $ \count ->
797
    (not (Node.offline node) && not (Node.failN1 node) &&
798
     (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
799
    let fn = Node.buildPeers node Container.empty
800
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
801
        nl = Container.fromList nlst
802
        cstats = Cluster.totalResources nl
803
    in Cluster.csAdsk cstats >= 0 &&
804
       Cluster.csAdsk cstats <= Cluster.csFdsk cstats
805

    
806
-- | Check that one instance is allocated correctly, without
807
-- rebalances needed.
808
prop_ClusterAlloc_sane node inst =
809
    forAll (choose (5, 20)) $ \count ->
810
    not (Node.offline node)
811
            && not (Node.failN1 node)
812
            && Node.availDisk node > 0
813
            && Node.availMem node > 0
814
            ==>
815
    let nl = makeSmallCluster node count
816
        il = Container.empty
817
        inst' = setInstanceSmallerThanNode node inst
818
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
819
       Cluster.tryAlloc nl il inst' of
820
         Types.Bad _ -> False
821
         Types.Ok as ->
822
             case Cluster.asSolutions as of
823
               [] -> False
824
               (xnl, xi, _, cv):[] ->
825
                   let il' = Container.add (Instance.idx xi) xi il
826
                       tbl = Cluster.Table xnl il' cv []
827
                   in not (canBalance tbl True True False)
828
               _ -> False
829

    
830
-- | Checks that on a 2-5 node cluster, we can allocate a random
831
-- instance spec via tiered allocation (whatever the original instance
832
-- spec), on either one or two nodes.
833
prop_ClusterCanTieredAlloc node inst =
834
    forAll (choose (2, 5)) $ \count ->
835
    forAll (choose (1, 2)) $ \rqnodes ->
836
    not (Node.offline node)
837
            && not (Node.failN1 node)
838
            && isNodeBig node 4
839
            ==>
840
    let nl = makeSmallCluster node count
841
        il = Container.empty
842
        allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
843
    in case allocnodes >>= \allocnodes' ->
844
        Cluster.tieredAlloc nl il inst allocnodes' [] [] of
845
         Types.Bad _ -> False
846
         Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
847
                                      IntMap.size il' == length ixes &&
848
                                      length ixes == length cstats
849

    
850
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
851
-- we can also evacuate it.
852
prop_ClusterAllocEvac node inst =
853
    forAll (choose (4, 8)) $ \count ->
854
    not (Node.offline node)
855
            && not (Node.failN1 node)
856
            && isNodeBig node 4
857
            ==>
858
    let nl = makeSmallCluster node count
859
        il = Container.empty
860
        inst' = setInstanceSmallerThanNode node inst
861
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
862
       Cluster.tryAlloc nl il inst' of
863
         Types.Bad _ -> False
864
         Types.Ok as ->
865
             case Cluster.asSolutions as of
866
               [] -> False
867
               (xnl, xi, _, _):[] ->
868
                   let sdx = Instance.sNode xi
869
                       il' = Container.add (Instance.idx xi) xi il
870
                   in case Cluster.tryEvac xnl il' [Instance.idx xi] [sdx] of
871
                        Just _ -> True
872
                        _ -> False
873
               _ -> False
874

    
875
-- | Check that allocating multiple instances on a cluster, then
876
-- adding an empty node, results in a valid rebalance.
877
prop_ClusterAllocBalance =
878
    forAll (genNode (Just 5) (Just 128)) $ \node ->
879
    forAll (choose (3, 5)) $ \count ->
880
    not (Node.offline node) && not (Node.failN1 node) ==>
881
    let nl = makeSmallCluster node count
882
        (hnode, nl') = IntMap.deleteFindMax nl
883
        il = Container.empty
884
        allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
885
        i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
886
    in case allocnodes >>= \allocnodes' ->
887
        Cluster.iterateAlloc nl' il i_templ allocnodes' [] [] of
888
         Types.Bad _ -> False
889
         Types.Ok (_, xnl, il', _, _) ->
890
                   let ynl = Container.add (Node.idx hnode) hnode xnl
891
                       cv = Cluster.compCV ynl
892
                       tbl = Cluster.Table ynl il' cv []
893
                   in canBalance tbl True True False
894

    
895
-- | Checks consistency.
896
prop_ClusterCheckConsistency node inst =
897
  let nl = makeSmallCluster node 3
898
      [node1, node2, node3] = Container.elems nl
899
      node3' = node3 { Node.group = 1 }
900
      nl' = Container.add (Node.idx node3') node3' nl
901
      inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
902
      inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
903
      inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
904
      ccheck = Cluster.findSplitInstances nl' . Container.fromList
905
  in null (ccheck [(0, inst1)]) &&
906
     null (ccheck [(0, inst2)]) &&
907
     (not . null $ ccheck [(0, inst3)])
908

    
909
-- | For now, we only test that we don't lose instances during the split.
910
prop_ClusterSplitCluster node inst =
911
  forAll (choose (0, 100)) $ \icnt ->
912
  let nl = makeSmallCluster node 2
913
      (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
914
                   (nl, Container.empty) [1..icnt]
915
      gni = Cluster.splitCluster nl' il'
916
  in sum (map (Container.size . snd . snd) gni) == icnt &&
917
     all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
918
                                 (Container.elems nl'')) gni
919

    
920
testCluster =
921
    [ run prop_Score_Zero
922
    , run prop_CStats_sane
923
    , run prop_ClusterAlloc_sane
924
    , run prop_ClusterCanTieredAlloc
925
    , run prop_ClusterAllocEvac
926
    , run prop_ClusterAllocBalance
927
    , run prop_ClusterCheckConsistency
928
    , run prop_ClusterSplitCluster
929
    ]
930

    
931
-- ** OpCodes tests
932

    
933
-- | Check that opcode serialization is idempotent.
934
prop_OpCodes_serialization op =
935
  case J.readJSON (J.showJSON op) of
936
    J.Error _ -> False
937
    J.Ok op' -> op == op'
938
  where _types = op::OpCodes.OpCode
939

    
940
testOpCodes =
941
  [ run prop_OpCodes_serialization
942
  ]
943

    
944
-- ** Jobs tests
945

    
946
-- | Check that (queued) job\/opcode status serialization is idempotent.
947
prop_OpStatus_serialization os =
948
  case J.readJSON (J.showJSON os) of
949
    J.Error _ -> False
950
    J.Ok os' -> os == os'
951
  where _types = os::Jobs.OpStatus
952

    
953
prop_JobStatus_serialization js =
954
  case J.readJSON (J.showJSON js) of
955
    J.Error _ -> False
956
    J.Ok js' -> js == js'
957
  where _types = js::Jobs.JobStatus
958

    
959
testJobs =
960
  [ run prop_OpStatus_serialization
961
  , run prop_JobStatus_serialization
962
  ]
963

    
964
-- ** Loader tests
965

    
966
prop_Loader_lookupNode ktn inst node =
967
  Loader.lookupNode nl inst node == Data.Map.lookup node nl
968
  where nl = Data.Map.fromList ktn
969

    
970
prop_Loader_lookupInstance kti inst =
971
  Loader.lookupInstance il inst == Data.Map.lookup inst il
972
  where il = Data.Map.fromList kti
973

    
974
prop_Loader_assignIndices nodes =
975
  Data.Map.size nassoc == length nodes &&
976
  Container.size kt == length nodes &&
977
  (if not (null nodes)
978
   then maximum (IntMap.keys kt) == length nodes - 1
979
   else True)
980
  where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
981

    
982
-- | Checks that the number of primary instances recorded on the nodes
983
-- is zero.
984
prop_Loader_mergeData ns =
985
  let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
986
  in case Loader.mergeData [] [] [] []
987
         (Loader.emptyCluster {Loader.cdNodes = na}) of
988
    Types.Bad _ -> False
989
    Types.Ok (Loader.ClusterData _ nl il _) ->
990
      let nodes = Container.elems nl
991
          instances = Container.elems il
992
      in (sum . map (length . Node.pList)) nodes == 0 &&
993
         null instances
994

    
995
testLoader =
996
  [ run prop_Loader_lookupNode
997
  , run prop_Loader_lookupInstance
998
  , run prop_Loader_assignIndices
999
  , run prop_Loader_mergeData
1000
  ]
1001

    
1002
-- ** Types tests
1003

    
1004
prop_AllocPolicy_serialisation apol =
1005
    case Types.apolFromString (Types.apolToString apol) of
1006
      Types.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
1007
                    p == apol
1008
      Types.Bad s -> printTestCase ("failed to deserialise: " ++ s) False
1009

    
1010
prop_DiskTemplate_serialisation dt =
1011
    case Types.dtFromString (Types.dtToString dt) of
1012
      Types.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
1013
                    p == dt
1014
      Types.Bad s -> printTestCase ("failed to deserialise: " ++ s) False
1015

    
1016
testTypes =
1017
    [ run prop_AllocPolicy_serialisation
1018
    , run prop_DiskTemplate_serialisation
1019
    ]