Statistics
| Branch: | Tag: | Revision:

root / htools / Ganeti / HTools / QC.hs @ d25643d1

History | View | Annotate | Download (34.4 kB)

1
{-| Unittests for ganeti-htools.
2

    
3
-}
4

    
5
{-
6

    
7
Copyright (C) 2009, 2010, 2011 Google Inc.
8

    
9
This program is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 2 of the License, or
12
(at your option) any later version.
13

    
14
This program is distributed in the hope that it will be useful, but
15
WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
General Public License for more details.
18

    
19
You should have received a copy of the GNU General Public License
20
along with this program; if not, write to the Free Software
21
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22
02110-1301, USA.
23

    
24
-}
25

    
26
module Ganeti.HTools.QC
27
    ( testUtils
28
    , testPeerMap
29
    , testContainer
30
    , testInstance
31
    , testNode
32
    , testText
33
    , testOpCodes
34
    , testJobs
35
    , testCluster
36
    , testLoader
37
    , testTypes
38
    ) where
39

    
40
import Test.QuickCheck
41
import Data.List (findIndex, intercalate, nub, isPrefixOf)
42
import Data.Maybe
43
import Control.Monad
44
import qualified Text.JSON as J
45
import qualified Data.Map
46
import qualified Data.IntMap as IntMap
47
import qualified Ganeti.OpCodes as OpCodes
48
import qualified Ganeti.Jobs as Jobs
49
import qualified Ganeti.Luxi
50
import qualified Ganeti.HTools.CLI as CLI
51
import qualified Ganeti.HTools.Cluster as Cluster
52
import qualified Ganeti.HTools.Container as Container
53
import qualified Ganeti.HTools.ExtLoader
54
import qualified Ganeti.HTools.IAlloc as IAlloc
55
import qualified Ganeti.HTools.Instance as Instance
56
import qualified Ganeti.HTools.Loader as Loader
57
import qualified Ganeti.HTools.Luxi
58
import qualified Ganeti.HTools.Node as Node
59
import qualified Ganeti.HTools.Group as Group
60
import qualified Ganeti.HTools.PeerMap as PeerMap
61
import qualified Ganeti.HTools.Rapi
62
import qualified Ganeti.HTools.Simu
63
import qualified Ganeti.HTools.Text as Text
64
import qualified Ganeti.HTools.Types as Types
65
import qualified Ganeti.HTools.Utils as Utils
66
import qualified Ganeti.HTools.Version
67
import qualified Ganeti.Constants as C
68

    
69
run :: Testable prop => prop -> Args -> IO Result
70
run = flip quickCheckWithResult
71

    
72
-- * Constants
73

    
74
-- | Maximum memory (1TiB, somewhat random value).
75
maxMem :: Int
76
maxMem = 1024 * 1024
77

    
78
-- | Maximum disk (8TiB, somewhat random value).
79
maxDsk :: Int
80
maxDsk = 1024 * 1024 * 8
81

    
82
-- | Max CPUs (1024, somewhat random value).
83
maxCpu :: Int
84
maxCpu = 1024
85

    
86
defGroup :: Group.Group
87
defGroup = flip Group.setIdx 0 $
88
               Group.create "default" Utils.defaultGroupID
89
                    Types.AllocPreferred
90

    
91
defGroupList :: Group.List
92
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
93

    
94
defGroupAssoc :: Data.Map.Map String Types.Gdx
95
defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
96

    
97
-- * Helper functions
98

    
99
-- | Simple checker for whether OpResult is fail or pass.
100
isFailure :: Types.OpResult a -> Bool
101
isFailure (Types.OpFail _) = True
102
isFailure _ = False
103

    
104
-- | Update an instance to be smaller than a node.
105
setInstanceSmallerThanNode node inst =
106
    inst { Instance.mem = Node.availMem node `div` 2
107
         , Instance.dsk = Node.availDisk node `div` 2
108
         , Instance.vcpus = Node.availCpu node `div` 2
109
         }
110

    
111
-- | Create an instance given its spec.
112
createInstance mem dsk vcpus =
113
    Instance.create "inst-unnamed" mem dsk vcpus "running" [] True (-1) (-1)
114
                    Types.DTDrbd8
115

    
116
-- | Create a small cluster by repeating a node spec.
117
makeSmallCluster :: Node.Node -> Int -> Node.List
118
makeSmallCluster node count =
119
    let fn = Node.buildPeers node Container.empty
120
        namelst = map (\n -> (Node.name n, n)) (replicate count fn)
121
        (_, nlst) = Loader.assignIndices namelst
122
    in nlst
123

    
124
-- | Checks if a node is "big" enough.
125
isNodeBig :: Node.Node -> Int -> Bool
126
isNodeBig node size = Node.availDisk node > size * Types.unitDsk
127
                      && Node.availMem node > size * Types.unitMem
128
                      && Node.availCpu node > size * Types.unitCpu
129

    
130
canBalance :: Cluster.Table -> Bool -> Bool -> Bool -> Bool
131
canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac 0 0
132

    
133
-- | Assigns a new fresh instance to a cluster; this is not
134
-- allocation, so no resource checks are done.
135
assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
136
                  Types.Idx -> Types.Idx ->
137
                  (Node.List, Instance.List)
138
assignInstance nl il inst pdx sdx =
139
  let pnode = Container.find pdx nl
140
      snode = Container.find sdx nl
141
      maxiidx = if Container.null il
142
                then 0
143
                else fst (Container.findMax il) + 1
144
      inst' = inst { Instance.idx = maxiidx,
145
                     Instance.pNode = pdx, Instance.sNode = sdx }
146
      pnode' = Node.setPri pnode inst'
147
      snode' = Node.setSec snode inst'
148
      nl' = Container.addTwo pdx pnode' sdx snode' nl
149
      il' = Container.add maxiidx inst' il
150
  in (nl', il')
151

    
152
-- * Arbitrary instances
153

    
154
-- | Defines a DNS name.
155
newtype DNSChar = DNSChar { dnsGetChar::Char }
156

    
157
instance Arbitrary DNSChar where
158
    arbitrary = do
159
      x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
160
      return (DNSChar x)
161

    
162
getName :: Gen String
163
getName = do
164
  n <- choose (1, 64)
165
  dn <- vector n::Gen [DNSChar]
166
  return (map dnsGetChar dn)
167

    
168

    
169
getFQDN :: Gen String
170
getFQDN = do
171
  felem <- getName
172
  ncomps <- choose (1, 4)
173
  frest <- vector ncomps::Gen [[DNSChar]]
174
  let frest' = map (map dnsGetChar) frest
175
  return (felem ++ "." ++ intercalate "." frest')
176

    
177
-- let's generate a random instance
178
instance Arbitrary Instance.Instance where
179
    arbitrary = do
180
      name <- getFQDN
181
      mem <- choose (0, maxMem)
182
      dsk <- choose (0, maxDsk)
183
      run_st <- elements [ C.inststErrorup
184
                         , C.inststErrordown
185
                         , C.inststAdmindown
186
                         , C.inststNodedown
187
                         , C.inststNodeoffline
188
                         , C.inststRunning
189
                         , "no_such_status1"
190
                         , "no_such_status2"]
191
      pn <- arbitrary
192
      sn <- arbitrary
193
      vcpus <- choose (0, maxCpu)
194
      return $ Instance.create name mem dsk vcpus run_st [] True pn sn
195
                               Types.DTDrbd8
196

    
197
-- | Generas an arbitrary node based on sizing information.
198
genNode :: Maybe Int -- ^ Minimum node size in terms of units
199
        -> Maybe Int -- ^ Maximum node size (when Nothing, bounded
200
                     -- just by the max... constants)
201
        -> Gen Node.Node
202
genNode min_multiplier max_multiplier = do
203
  let (base_mem, base_dsk, base_cpu) =
204
          case min_multiplier of
205
            Just mm -> (mm * Types.unitMem,
206
                        mm * Types.unitDsk,
207
                        mm * Types.unitCpu)
208
            Nothing -> (0, 0, 0)
209
      (top_mem, top_dsk, top_cpu)  =
210
          case max_multiplier of
211
            Just mm -> (mm * Types.unitMem,
212
                        mm * Types.unitDsk,
213
                        mm * Types.unitCpu)
214
            Nothing -> (maxMem, maxDsk, maxCpu)
215
  name  <- getFQDN
216
  mem_t <- choose (base_mem, top_mem)
217
  mem_f <- choose (base_mem, mem_t)
218
  mem_n <- choose (0, mem_t - mem_f)
219
  dsk_t <- choose (base_dsk, top_dsk)
220
  dsk_f <- choose (base_dsk, dsk_t)
221
  cpu_t <- choose (base_cpu, top_cpu)
222
  offl  <- arbitrary
223
  let n = Node.create name (fromIntegral mem_t) mem_n mem_f
224
          (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl 0
225
  return $ Node.buildPeers n Container.empty
226

    
227
-- and a random node
228
instance Arbitrary Node.Node where
229
    arbitrary = genNode Nothing Nothing
230

    
231
-- replace disks
232
instance Arbitrary OpCodes.ReplaceDisksMode where
233
  arbitrary = elements [ OpCodes.ReplaceOnPrimary
234
                       , OpCodes.ReplaceOnSecondary
235
                       , OpCodes.ReplaceNewSecondary
236
                       , OpCodes.ReplaceAuto
237
                       ]
238

    
239
instance Arbitrary OpCodes.OpCode where
240
  arbitrary = do
241
    op_id <- elements [ "OP_TEST_DELAY"
242
                      , "OP_INSTANCE_REPLACE_DISKS"
243
                      , "OP_INSTANCE_FAILOVER"
244
                      , "OP_INSTANCE_MIGRATE"
245
                      ]
246
    (case op_id of
247
        "OP_TEST_DELAY" ->
248
          liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
249
        "OP_INSTANCE_REPLACE_DISKS" ->
250
          liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
251
          arbitrary arbitrary arbitrary
252
        "OP_INSTANCE_FAILOVER" ->
253
          liftM2 OpCodes.OpInstanceFailover arbitrary arbitrary
254
        "OP_INSTANCE_MIGRATE" ->
255
          liftM4 OpCodes.OpInstanceMigrate arbitrary arbitrary arbitrary
256
          arbitrary
257
        _ -> fail "Wrong opcode")
258

    
259
instance Arbitrary Jobs.OpStatus where
260
  arbitrary = elements [minBound..maxBound]
261

    
262
instance Arbitrary Jobs.JobStatus where
263
  arbitrary = elements [minBound..maxBound]
264

    
265
newtype SmallRatio = SmallRatio Double deriving Show
266
instance Arbitrary SmallRatio where
267
    arbitrary = do
268
      v <- choose (0, 1)
269
      return $ SmallRatio v
270

    
271
instance Arbitrary Types.AllocPolicy where
272
  arbitrary = elements [minBound..maxBound]
273

    
274
instance Arbitrary Types.DiskTemplate where
275
  arbitrary = elements [minBound..maxBound]
276

    
277
-- * Actual tests
278

    
279
-- ** Utils tests
280

    
281
-- | If the list is not just an empty element, and if the elements do
282
-- not contain commas, then join+split should be idempotent.
283
prop_Utils_commaJoinSplit =
284
    forAll (arbitrary `suchThat`
285
            (\l -> l /= [""] && all (not . elem ',') l )) $ \lst ->
286
    Utils.sepSplit ',' (Utils.commaJoin lst) == lst
287

    
288
-- | Split and join should always be idempotent.
289
prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
290

    
291
-- | fromObjWithDefault, we test using the Maybe monad and an integer
292
-- value.
293
prop_Utils_fromObjWithDefault def_value random_key =
294
    -- a missing key will be returned with the default
295
    Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
296
    -- a found key will be returned as is, not with default
297
    Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
298
         random_key (def_value+1) == Just def_value
299
        where _types = def_value :: Integer
300

    
301
-- | Test list for the Utils module.
302
testUtils =
303
  [ run prop_Utils_commaJoinSplit
304
  , run prop_Utils_commaSplitJoin
305
  , run prop_Utils_fromObjWithDefault
306
  ]
307

    
308
-- ** PeerMap tests
309

    
310
-- | Make sure add is idempotent.
311
prop_PeerMap_addIdempotent pmap key em =
312
    fn puniq == fn (fn puniq)
313
    where _types = (pmap::PeerMap.PeerMap,
314
                    key::PeerMap.Key, em::PeerMap.Elem)
315
          fn = PeerMap.add key em
316
          puniq = PeerMap.accumArray const pmap
317

    
318
-- | Make sure remove is idempotent.
319
prop_PeerMap_removeIdempotent pmap key =
320
    fn puniq == fn (fn puniq)
321
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
322
          fn = PeerMap.remove key
323
          puniq = PeerMap.accumArray const pmap
324

    
325
-- | Make sure a missing item returns 0.
326
prop_PeerMap_findMissing pmap key =
327
    PeerMap.find key (PeerMap.remove key puniq) == 0
328
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
329
          puniq = PeerMap.accumArray const pmap
330

    
331
-- | Make sure an added item is found.
332
prop_PeerMap_addFind pmap key em =
333
    PeerMap.find key (PeerMap.add key em puniq) == em
334
    where _types = (pmap::PeerMap.PeerMap,
335
                    key::PeerMap.Key, em::PeerMap.Elem)
336
          puniq = PeerMap.accumArray const pmap
337

    
338
-- | Manual check that maxElem returns the maximum indeed, or 0 for null.
339
prop_PeerMap_maxElem pmap =
340
    PeerMap.maxElem puniq == if null puniq then 0
341
                             else (maximum . snd . unzip) puniq
342
    where _types = pmap::PeerMap.PeerMap
343
          puniq = PeerMap.accumArray const pmap
344

    
345
-- | List of tests for the PeerMap module.
346
testPeerMap =
347
    [ run prop_PeerMap_addIdempotent
348
    , run prop_PeerMap_removeIdempotent
349
    , run prop_PeerMap_maxElem
350
    , run prop_PeerMap_addFind
351
    , run prop_PeerMap_findMissing
352
    ]
353

    
354
-- ** Container tests
355

    
356
prop_Container_addTwo cdata i1 i2 =
357
    fn i1 i2 cont == fn i2 i1 cont &&
358
       fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
359
    where _types = (cdata::[Int],
360
                    i1::Int, i2::Int)
361
          cont = foldl (\c x -> Container.add x x c) Container.empty cdata
362
          fn x1 x2 = Container.addTwo x1 x1 x2 x2
363

    
364
prop_Container_nameOf node =
365
  let nl = makeSmallCluster node 1
366
      fnode = head (Container.elems nl)
367
  in Container.nameOf nl (Node.idx fnode) == Node.name fnode
368

    
369
-- | We test that in a cluster, given a random node, we can find it by
370
-- its name and alias, as long as all names and aliases are unique,
371
-- and that we fail to find a non-existing name.
372
prop_Container_findByName node othername =
373
  forAll (choose (1, 20)) $ \ cnt ->
374
  forAll (choose (0, cnt - 1)) $ \ fidx ->
375
  forAll (vector cnt) $ \ names ->
376
  (length . nub) (map fst names ++ map snd names) ==
377
  length names * 2 &&
378
  not (othername `elem` (map fst names ++ map snd names)) ==>
379
  let nl = makeSmallCluster node cnt
380
      nodes = Container.elems nl
381
      nodes' = map (\((name, alias), nn) -> (Node.idx nn,
382
                                             nn { Node.name = name,
383
                                                  Node.alias = alias }))
384
               $ zip names nodes
385
      nl' = Container.fromList nodes'
386
      target = snd (nodes' !! fidx)
387
  in Container.findByName nl' (Node.name target) == Just target &&
388
     Container.findByName nl' (Node.alias target) == Just target &&
389
     Container.findByName nl' othername == Nothing
390

    
391
testContainer =
392
    [ run prop_Container_addTwo
393
    , run prop_Container_nameOf
394
    , run prop_Container_findByName
395
    ]
396

    
397
-- ** Instance tests
398

    
399
-- Simple instance tests, we only have setter/getters
400

    
401
prop_Instance_creat inst =
402
    Instance.name inst == Instance.alias inst
403

    
404
prop_Instance_setIdx inst idx =
405
    Instance.idx (Instance.setIdx inst idx) == idx
406
    where _types = (inst::Instance.Instance, idx::Types.Idx)
407

    
408
prop_Instance_setName inst name =
409
    Instance.name newinst == name &&
410
    Instance.alias newinst == name
411
    where _types = (inst::Instance.Instance, name::String)
412
          newinst = Instance.setName inst name
413

    
414
prop_Instance_setAlias inst name =
415
    Instance.name newinst == Instance.name inst &&
416
    Instance.alias newinst == name
417
    where _types = (inst::Instance.Instance, name::String)
418
          newinst = Instance.setAlias inst name
419

    
420
prop_Instance_setPri inst pdx =
421
    Instance.pNode (Instance.setPri inst pdx) == pdx
422
    where _types = (inst::Instance.Instance, pdx::Types.Ndx)
423

    
424
prop_Instance_setSec inst sdx =
425
    Instance.sNode (Instance.setSec inst sdx) == sdx
426
    where _types = (inst::Instance.Instance, sdx::Types.Ndx)
427

    
428
prop_Instance_setBoth inst pdx sdx =
429
    Instance.pNode si == pdx && Instance.sNode si == sdx
430
    where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
431
          si = Instance.setBoth inst pdx sdx
432

    
433
prop_Instance_runStatus_True =
434
    forAll (arbitrary `suchThat`
435
            ((`elem` Instance.runningStates) . Instance.runSt))
436
    Instance.running
437

    
438
prop_Instance_runStatus_False inst =
439
    let run_st = Instance.running inst
440
        run_tx = Instance.runSt inst
441
    in
442
      run_tx `notElem` Instance.runningStates ==> not run_st
443

    
444
prop_Instance_shrinkMG inst =
445
    Instance.mem inst >= 2 * Types.unitMem ==>
446
        case Instance.shrinkByType inst Types.FailMem of
447
          Types.Ok inst' ->
448
              Instance.mem inst' == Instance.mem inst - Types.unitMem
449
          _ -> False
450

    
451
prop_Instance_shrinkMF inst =
452
    forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
453
    let inst' = inst { Instance.mem = mem}
454
    in Types.isBad $ Instance.shrinkByType inst' Types.FailMem
455

    
456
prop_Instance_shrinkCG inst =
457
    Instance.vcpus inst >= 2 * Types.unitCpu ==>
458
        case Instance.shrinkByType inst Types.FailCPU of
459
          Types.Ok inst' ->
460
              Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
461
          _ -> False
462

    
463
prop_Instance_shrinkCF inst =
464
    forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
465
    let inst' = inst { Instance.vcpus = vcpus }
466
    in Types.isBad $ Instance.shrinkByType inst' Types.FailCPU
467

    
468
prop_Instance_shrinkDG inst =
469
    Instance.dsk inst >= 2 * Types.unitDsk ==>
470
        case Instance.shrinkByType inst Types.FailDisk of
471
          Types.Ok inst' ->
472
              Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
473
          _ -> False
474

    
475
prop_Instance_shrinkDF inst =
476
    forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
477
    let inst' = inst { Instance.dsk = dsk }
478
    in Types.isBad $ Instance.shrinkByType inst' Types.FailDisk
479

    
480
prop_Instance_setMovable inst m =
481
    Instance.movable inst' == m
482
    where inst' = Instance.setMovable inst m
483

    
484
testInstance =
485
    [ run prop_Instance_creat
486
    , run prop_Instance_setIdx
487
    , run prop_Instance_setName
488
    , run prop_Instance_setAlias
489
    , run prop_Instance_setPri
490
    , run prop_Instance_setSec
491
    , run prop_Instance_setBoth
492
    , run prop_Instance_runStatus_True
493
    , run prop_Instance_runStatus_False
494
    , run prop_Instance_shrinkMG
495
    , run prop_Instance_shrinkMF
496
    , run prop_Instance_shrinkCG
497
    , run prop_Instance_shrinkCF
498
    , run prop_Instance_shrinkDG
499
    , run prop_Instance_shrinkDF
500
    , run prop_Instance_setMovable
501
    ]
502

    
503
-- ** Text backend tests
504

    
505
-- Instance text loader tests
506

    
507
prop_Text_Load_Instance name mem dsk vcpus status
508
                        (NonEmpty pnode) snode
509
                        (NonNegative pdx) (NonNegative sdx) autobal =
510
    pnode /= snode && pdx /= sdx ==>
511
    let vcpus_s = show vcpus
512
        dsk_s = show dsk
513
        mem_s = show mem
514
        ndx = if null snode
515
              then [(pnode, pdx)]
516
              else [(pnode, pdx), (snode, sdx)]
517
        nl = Data.Map.fromList ndx
518
        tags = ""
519
        sbal = if autobal then "Y" else "N"
520
        inst = Text.loadInst nl
521
               [name, mem_s, dsk_s, vcpus_s, status,
522
                sbal, pnode, snode, tags]:: Maybe (String, Instance.Instance)
523
        fail1 = Text.loadInst nl
524
               [name, mem_s, dsk_s, vcpus_s, status,
525
                sbal, pnode, pnode, tags]:: Maybe (String, Instance.Instance)
526
        _types = ( name::String, mem::Int, dsk::Int
527
                 , vcpus::Int, status::String
528
                 , snode::String
529
                 , autobal::Bool)
530
    in
531
      case inst of
532
        Nothing -> False
533
        Just (_, i) ->
534
            Instance.name i == name &&
535
            Instance.vcpus i == vcpus &&
536
            Instance.mem i == mem &&
537
            Instance.pNode i == pdx &&
538
            Instance.sNode i == (if null snode
539
                                 then Node.noSecondary
540
                                 else sdx) &&
541
            Instance.autoBalance i == autobal &&
542
            isNothing fail1
543

    
544
prop_Text_Load_InstanceFail ktn fields =
545
    length fields /= 9 ==>
546
    case Text.loadInst nl fields of
547
      Types.Ok _ -> False
548
      Types.Bad msg -> "Invalid/incomplete instance data: '" `isPrefixOf` msg
549
    where nl = Data.Map.fromList ktn
550

    
551
prop_Text_Load_Node name tm nm fm td fd tc fo =
552
    let conv v = if v < 0
553
                    then "?"
554
                    else show v
555
        tm_s = conv tm
556
        nm_s = conv nm
557
        fm_s = conv fm
558
        td_s = conv td
559
        fd_s = conv fd
560
        tc_s = conv tc
561
        fo_s = if fo
562
               then "Y"
563
               else "N"
564
        any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
565
        gid = Group.uuid defGroup
566
    in case Text.loadNode defGroupAssoc
567
           [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
568
         Nothing -> False
569
         Just (name', node) ->
570
             if fo || any_broken
571
             then Node.offline node
572
             else Node.name node == name' && name' == name &&
573
                  Node.alias node == name &&
574
                  Node.tMem node == fromIntegral tm &&
575
                  Node.nMem node == nm &&
576
                  Node.fMem node == fm &&
577
                  Node.tDsk node == fromIntegral td &&
578
                  Node.fDsk node == fd &&
579
                  Node.tCpu node == fromIntegral tc
580

    
581
prop_Text_Load_NodeFail fields =
582
    length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
583

    
584
prop_Text_NodeLSIdempotent node =
585
    (Text.loadNode defGroupAssoc.
586
         Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
587
    Just (Node.name n, n)
588
    -- override failN1 to what loadNode returns by default
589
    where n = node { Node.failN1 = True, Node.offline = False }
590

    
591
testText =
592
    [ run prop_Text_Load_Instance
593
    , run prop_Text_Load_InstanceFail
594
    , run prop_Text_Load_Node
595
    , run prop_Text_Load_NodeFail
596
    , run prop_Text_NodeLSIdempotent
597
    ]
598

    
599
-- ** Node tests
600

    
601
prop_Node_setAlias node name =
602
    Node.name newnode == Node.name node &&
603
    Node.alias newnode == name
604
    where _types = (node::Node.Node, name::String)
605
          newnode = Node.setAlias node name
606

    
607
prop_Node_setOffline node status =
608
    Node.offline newnode == status
609
    where newnode = Node.setOffline node status
610

    
611
prop_Node_setXmem node xm =
612
    Node.xMem newnode == xm
613
    where newnode = Node.setXmem node xm
614

    
615
prop_Node_setMcpu node mc =
616
    Node.mCpu newnode == mc
617
    where newnode = Node.setMcpu node mc
618

    
619
-- | Check that an instance add with too high memory or disk will be
620
-- rejected.
621
prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
622
                               not (Node.failN1 node)
623
                               ==>
624
                               case Node.addPri node inst'' of
625
                                 Types.OpFail Types.FailMem -> True
626
                                 _ -> False
627
    where _types = (node::Node.Node, inst::Instance.Instance)
628
          inst' = setInstanceSmallerThanNode node inst
629
          inst'' = inst' { Instance.mem = Instance.mem inst }
630

    
631
prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
632
                               not (Node.failN1 node)
633
                               ==>
634
                               case Node.addPri node inst'' of
635
                                 Types.OpFail Types.FailDisk -> True
636
                                 _ -> False
637
    where _types = (node::Node.Node, inst::Instance.Instance)
638
          inst' = setInstanceSmallerThanNode node inst
639
          inst'' = inst' { Instance.dsk = Instance.dsk inst }
640

    
641
prop_Node_addPriFC node inst (Positive extra) =
642
    not (Node.failN1 node) ==>
643
        case Node.addPri node inst'' of
644
          Types.OpFail Types.FailCPU -> True
645
          _ -> False
646
    where _types = (node::Node.Node, inst::Instance.Instance)
647
          inst' = setInstanceSmallerThanNode node inst
648
          inst'' = inst' { Instance.vcpus = Node.availCpu node + extra }
649

    
650
-- | Check that an instance add with too high memory or disk will be
651
-- rejected.
652
prop_Node_addSec node inst pdx =
653
    (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
654
     Instance.dsk inst >= Node.fDsk node) &&
655
    not (Node.failN1 node)
656
    ==> isFailure (Node.addSec node inst pdx)
657
        where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
658

    
659
-- | Checks for memory reservation changes.
660
prop_Node_rMem inst =
661
    forAll (arbitrary `suchThat` ((> 0) . Node.fMem)) $ \node ->
662
    -- ab = auto_balance, nb = non-auto_balance
663
    -- we use -1 as the primary node of the instance
664
    let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True }
665
        inst_ab = setInstanceSmallerThanNode node inst'
666
        inst_nb = inst_ab { Instance.autoBalance = False }
667
        -- now we have the two instances, identical except the
668
        -- autoBalance attribute
669
        orig_rmem = Node.rMem node
670
        inst_idx = Instance.idx inst_ab
671
        node_add_ab = Node.addSec node inst_ab (-1)
672
        node_add_nb = Node.addSec node inst_nb (-1)
673
        node_del_ab = liftM (flip Node.removeSec inst_ab) node_add_ab
674
        node_del_nb = liftM (flip Node.removeSec inst_nb) node_add_nb
675
    in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
676
         (Types.OpGood a_ab, Types.OpGood a_nb,
677
          Types.OpGood d_ab, Types.OpGood d_nb) ->
678
             printTestCase "Consistency checks failed" $
679
             Node.rMem a_ab >  orig_rmem &&
680
             Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
681
             Node.rMem a_nb == orig_rmem &&
682
             Node.rMem d_ab == orig_rmem &&
683
             Node.rMem d_nb == orig_rmem &&
684
             -- this is not related to rMem, but as good a place to
685
             -- test as any
686
             inst_idx `elem` Node.sList a_ab &&
687
             not (inst_idx `elem` Node.sList d_ab)
688
         x -> printTestCase ("Failed to add/remove instances: " ++ show x)
689
              False
690

    
691
-- | Check mdsk setting.
692
prop_Node_setMdsk node mx =
693
    Node.loDsk node' >= 0 &&
694
    fromIntegral (Node.loDsk node') <= Node.tDsk node &&
695
    Node.availDisk node' >= 0 &&
696
    Node.availDisk node' <= Node.fDsk node' &&
697
    fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
698
    Node.mDsk node' == mx'
699
    where _types = (node::Node.Node, mx::SmallRatio)
700
          node' = Node.setMdsk node mx'
701
          SmallRatio mx' = mx
702

    
703
-- Check tag maps
704
prop_Node_tagMaps_idempotent tags =
705
    Node.delTags (Node.addTags m tags) tags == m
706
    where m = Data.Map.empty
707

    
708
prop_Node_tagMaps_reject tags =
709
    not (null tags) ==>
710
    any (\t -> Node.rejectAddTags m [t]) tags
711
    where m = Node.addTags Data.Map.empty tags
712

    
713
prop_Node_showField node =
714
  forAll (elements Node.defaultFields) $ \ field ->
715
  fst (Node.showHeader field) /= Types.unknownField &&
716
  Node.showField node field /= Types.unknownField
717

    
718

    
719
prop_Node_computeGroups nodes =
720
  let ng = Node.computeGroups nodes
721
      onlyuuid = map fst ng
722
  in length nodes == sum (map (length . snd) ng) &&
723
     all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
724
     length (nub onlyuuid) == length onlyuuid &&
725
     (null nodes || not (null ng))
726

    
727
testNode =
728
    [ run prop_Node_setAlias
729
    , run prop_Node_setOffline
730
    , run prop_Node_setMcpu
731
    , run prop_Node_setXmem
732
    , run prop_Node_addPriFM
733
    , run prop_Node_addPriFD
734
    , run prop_Node_addPriFC
735
    , run prop_Node_addSec
736
    , run prop_Node_rMem
737
    , run prop_Node_setMdsk
738
    , run prop_Node_tagMaps_idempotent
739
    , run prop_Node_tagMaps_reject
740
    , run prop_Node_showField
741
    , run prop_Node_computeGroups
742
    ]
743

    
744

    
745
-- ** Cluster tests
746

    
747
-- | Check that the cluster score is close to zero for a homogeneous
748
-- cluster.
749
prop_Score_Zero node =
750
    forAll (choose (1, 1024)) $ \count ->
751
    (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
752
     (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
753
    let fn = Node.buildPeers node Container.empty
754
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
755
        nl = Container.fromList nlst
756
        score = Cluster.compCV nl
757
    -- we can't say == 0 here as the floating point errors accumulate;
758
    -- this should be much lower than the default score in CLI.hs
759
    in score <= 1e-12
760

    
761
-- | Check that cluster stats are sane.
762
prop_CStats_sane node =
763
    forAll (choose (1, 1024)) $ \count ->
764
    (not (Node.offline node) && not (Node.failN1 node) &&
765
     (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
766
    let fn = Node.buildPeers node Container.empty
767
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
768
        nl = Container.fromList nlst
769
        cstats = Cluster.totalResources nl
770
    in Cluster.csAdsk cstats >= 0 &&
771
       Cluster.csAdsk cstats <= Cluster.csFdsk cstats
772

    
773
-- | Check that one instance is allocated correctly, without
774
-- rebalances needed.
775
prop_ClusterAlloc_sane node inst =
776
    forAll (choose (5, 20)) $ \count ->
777
    not (Node.offline node)
778
            && not (Node.failN1 node)
779
            && Node.availDisk node > 0
780
            && Node.availMem node > 0
781
            ==>
782
    let nl = makeSmallCluster node count
783
        il = Container.empty
784
        inst' = setInstanceSmallerThanNode node inst
785
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
786
       Cluster.tryAlloc nl il inst' of
787
         Types.Bad _ -> False
788
         Types.Ok as ->
789
             case Cluster.asSolutions as of
790
               [] -> False
791
               (xnl, xi, _, cv):[] ->
792
                   let il' = Container.add (Instance.idx xi) xi il
793
                       tbl = Cluster.Table xnl il' cv []
794
                   in not (canBalance tbl True True False)
795
               _ -> False
796

    
797
-- | Checks that on a 2-5 node cluster, we can allocate a random
798
-- instance spec via tiered allocation (whatever the original instance
799
-- spec), on either one or two nodes.
800
prop_ClusterCanTieredAlloc node inst =
801
    forAll (choose (2, 5)) $ \count ->
802
    forAll (choose (1, 2)) $ \rqnodes ->
803
    not (Node.offline node)
804
            && not (Node.failN1 node)
805
            && isNodeBig node 4
806
            ==>
807
    let nl = makeSmallCluster node count
808
        il = Container.empty
809
        allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
810
    in case allocnodes >>= \allocnodes' ->
811
        Cluster.tieredAlloc nl il inst allocnodes' [] [] of
812
         Types.Bad _ -> False
813
         Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
814
                                      IntMap.size il' == length ixes &&
815
                                      length ixes == length cstats
816

    
817
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
818
-- we can also evacuate it.
819
prop_ClusterAllocEvac node inst =
820
    forAll (choose (4, 8)) $ \count ->
821
    not (Node.offline node)
822
            && not (Node.failN1 node)
823
            && isNodeBig node 4
824
            ==>
825
    let nl = makeSmallCluster node count
826
        il = Container.empty
827
        inst' = setInstanceSmallerThanNode node inst
828
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
829
       Cluster.tryAlloc nl il inst' of
830
         Types.Bad _ -> False
831
         Types.Ok as ->
832
             case Cluster.asSolutions as of
833
               [] -> False
834
               (xnl, xi, _, _):[] ->
835
                   let sdx = Instance.sNode xi
836
                       il' = Container.add (Instance.idx xi) xi il
837
                   in case Cluster.tryEvac xnl il' [Instance.idx xi] [sdx] of
838
                        Just _ -> True
839
                        _ -> False
840
               _ -> False
841

    
842
-- | Check that allocating multiple instances on a cluster, then
843
-- adding an empty node, results in a valid rebalance.
844
prop_ClusterAllocBalance =
845
    forAll (genNode (Just 5) (Just 128)) $ \node ->
846
    forAll (choose (3, 5)) $ \count ->
847
    not (Node.offline node) && not (Node.failN1 node) ==>
848
    let nl = makeSmallCluster node count
849
        (hnode, nl') = IntMap.deleteFindMax nl
850
        il = Container.empty
851
        allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
852
        i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
853
    in case allocnodes >>= \allocnodes' ->
854
        Cluster.iterateAlloc nl' il i_templ allocnodes' [] [] of
855
         Types.Bad _ -> False
856
         Types.Ok (_, xnl, il', _, _) ->
857
                   let ynl = Container.add (Node.idx hnode) hnode xnl
858
                       cv = Cluster.compCV ynl
859
                       tbl = Cluster.Table ynl il' cv []
860
                   in canBalance tbl True True False
861

    
862
-- | Checks consistency.
863
prop_ClusterCheckConsistency node inst =
864
  let nl = makeSmallCluster node 3
865
      [node1, node2, node3] = Container.elems nl
866
      node3' = node3 { Node.group = 1 }
867
      nl' = Container.add (Node.idx node3') node3' nl
868
      inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
869
      inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
870
      inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
871
      ccheck = Cluster.findSplitInstances nl' . Container.fromList
872
  in null (ccheck [(0, inst1)]) &&
873
     null (ccheck [(0, inst2)]) &&
874
     (not . null $ ccheck [(0, inst3)])
875

    
876
-- | For now, we only test that we don't lose instances during the split.
877
prop_ClusterSplitCluster node inst =
878
  forAll (choose (0, 100)) $ \icnt ->
879
  let nl = makeSmallCluster node 2
880
      (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
881
                   (nl, Container.empty) [1..icnt]
882
      gni = Cluster.splitCluster nl' il'
883
  in sum (map (Container.size . snd . snd) gni) == icnt &&
884
     all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
885
                                 (Container.elems nl'')) gni
886

    
887
testCluster =
888
    [ run prop_Score_Zero
889
    , run prop_CStats_sane
890
    , run prop_ClusterAlloc_sane
891
    , run prop_ClusterCanTieredAlloc
892
    , run prop_ClusterAllocEvac
893
    , run prop_ClusterAllocBalance
894
    , run prop_ClusterCheckConsistency
895
    , run prop_ClusterSplitCluster
896
    ]
897

    
898
-- ** OpCodes tests
899

    
900
-- | Check that opcode serialization is idempotent.
901
prop_OpCodes_serialization op =
902
  case J.readJSON (J.showJSON op) of
903
    J.Error _ -> False
904
    J.Ok op' -> op == op'
905
  where _types = op::OpCodes.OpCode
906

    
907
testOpCodes =
908
  [ run prop_OpCodes_serialization
909
  ]
910

    
911
-- ** Jobs tests
912

    
913
-- | Check that (queued) job\/opcode status serialization is idempotent.
914
prop_OpStatus_serialization os =
915
  case J.readJSON (J.showJSON os) of
916
    J.Error _ -> False
917
    J.Ok os' -> os == os'
918
  where _types = os::Jobs.OpStatus
919

    
920
prop_JobStatus_serialization js =
921
  case J.readJSON (J.showJSON js) of
922
    J.Error _ -> False
923
    J.Ok js' -> js == js'
924
  where _types = js::Jobs.JobStatus
925

    
926
testJobs =
927
  [ run prop_OpStatus_serialization
928
  , run prop_JobStatus_serialization
929
  ]
930

    
931
-- ** Loader tests
932

    
933
prop_Loader_lookupNode ktn inst node =
934
  Loader.lookupNode nl inst node == Data.Map.lookup node nl
935
  where nl = Data.Map.fromList ktn
936

    
937
prop_Loader_lookupInstance kti inst =
938
  Loader.lookupInstance il inst == Data.Map.lookup inst il
939
  where il = Data.Map.fromList kti
940

    
941
prop_Loader_assignIndices nodes =
942
  Data.Map.size nassoc == length nodes &&
943
  Container.size kt == length nodes &&
944
  (if not (null nodes)
945
   then maximum (IntMap.keys kt) == length nodes - 1
946
   else True)
947
  where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
948

    
949
-- | Checks that the number of primary instances recorded on the nodes
950
-- is zero.
951
prop_Loader_mergeData ns =
952
  let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
953
  in case Loader.mergeData [] [] [] []
954
         (Loader.emptyCluster {Loader.cdNodes = na}) of
955
    Types.Bad _ -> False
956
    Types.Ok (Loader.ClusterData _ nl il _) ->
957
      let nodes = Container.elems nl
958
          instances = Container.elems il
959
      in (sum . map (length . Node.pList)) nodes == 0 &&
960
         null instances
961

    
962
testLoader =
963
  [ run prop_Loader_lookupNode
964
  , run prop_Loader_lookupInstance
965
  , run prop_Loader_assignIndices
966
  , run prop_Loader_mergeData
967
  ]
968

    
969
-- ** Types tests
970

    
971
prop_AllocPolicy_serialisation apol =
972
    case Types.apolFromString (Types.apolToString apol) of
973
      Types.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
974
                    p == apol
975
      Types.Bad s -> printTestCase ("failed to deserialise: " ++ s) False
976

    
977
prop_DiskTemplate_serialisation dt =
978
    case Types.dtFromString (Types.dtToString dt) of
979
      Types.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
980
                    p == dt
981
      Types.Bad s -> printTestCase ("failed to deserialise: " ++ s) False
982

    
983
testTypes =
984
    [ run prop_AllocPolicy_serialisation
985
    , run prop_DiskTemplate_serialisation
986
    ]