Statistics
| Branch: | Tag: | Revision:

root / htools / Ganeti / HTools / QC.hs @ 6429e8d8

History | View | Annotate | Download (34.8 kB)

1
{-| Unittests for ganeti-htools.
2

    
3
-}
4

    
5
{-
6

    
7
Copyright (C) 2009, 2010, 2011 Google Inc.
8

    
9
This program is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 2 of the License, or
12
(at your option) any later version.
13

    
14
This program is distributed in the hope that it will be useful, but
15
WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
General Public License for more details.
18

    
19
You should have received a copy of the GNU General Public License
20
along with this program; if not, write to the Free Software
21
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22
02110-1301, USA.
23

    
24
-}
25

    
26
module Ganeti.HTools.QC
27
    ( testUtils
28
    , testPeerMap
29
    , testContainer
30
    , testInstance
31
    , testNode
32
    , testText
33
    , testOpCodes
34
    , testJobs
35
    , testCluster
36
    , testLoader
37
    , testTypes
38
    ) where
39

    
40
import Test.QuickCheck
41
import Data.List (findIndex, intercalate, nub, isPrefixOf)
42
import Data.Maybe
43
import Control.Monad
44
import qualified Text.JSON as J
45
import qualified Data.Map
46
import qualified Data.IntMap as IntMap
47
import qualified Ganeti.OpCodes as OpCodes
48
import qualified Ganeti.Jobs as Jobs
49
import qualified Ganeti.Luxi
50
import qualified Ganeti.HTools.CLI as CLI
51
import qualified Ganeti.HTools.Cluster as Cluster
52
import qualified Ganeti.HTools.Container as Container
53
import qualified Ganeti.HTools.ExtLoader
54
import qualified Ganeti.HTools.IAlloc as IAlloc
55
import qualified Ganeti.HTools.Instance as Instance
56
import qualified Ganeti.HTools.Loader as Loader
57
import qualified Ganeti.HTools.Luxi
58
import qualified Ganeti.HTools.Node as Node
59
import qualified Ganeti.HTools.Group as Group
60
import qualified Ganeti.HTools.PeerMap as PeerMap
61
import qualified Ganeti.HTools.Rapi
62
import qualified Ganeti.HTools.Simu
63
import qualified Ganeti.HTools.Text as Text
64
import qualified Ganeti.HTools.Types as Types
65
import qualified Ganeti.HTools.Utils as Utils
66
import qualified Ganeti.HTools.Version
67
import qualified Ganeti.Constants as C
68

    
69
run :: Testable prop => prop -> Args -> IO Result
70
run = flip quickCheckWithResult
71

    
72
-- * Constants
73

    
74
-- | Maximum memory (1TiB, somewhat random value).
75
maxMem :: Int
76
maxMem = 1024 * 1024
77

    
78
-- | Maximum disk (8TiB, somewhat random value).
79
maxDsk :: Int
80
maxDsk = 1024 * 1024 * 8
81

    
82
-- | Max CPUs (1024, somewhat random value).
83
maxCpu :: Int
84
maxCpu = 1024
85

    
86
defGroup :: Group.Group
87
defGroup = flip Group.setIdx 0 $
88
               Group.create "default" Utils.defaultGroupID
89
                    Types.AllocPreferred
90

    
91
defGroupList :: Group.List
92
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
93

    
94
defGroupAssoc :: Data.Map.Map String Types.Gdx
95
defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
96

    
97
-- * Helper functions
98

    
99
-- | Simple checker for whether OpResult is fail or pass.
100
isFailure :: Types.OpResult a -> Bool
101
isFailure (Types.OpFail _) = True
102
isFailure _ = False
103

    
104
-- | Update an instance to be smaller than a node.
105
setInstanceSmallerThanNode node inst =
106
    inst { Instance.mem = Node.availMem node `div` 2
107
         , Instance.dsk = Node.availDisk node `div` 2
108
         , Instance.vcpus = Node.availCpu node `div` 2
109
         }
110

    
111
-- | Create an instance given its spec.
112
createInstance mem dsk vcpus =
113
    Instance.create "inst-unnamed" mem dsk vcpus "running" [] True (-1) (-1)
114
                    Types.DTDrbd8
115

    
116
-- | Create a small cluster by repeating a node spec.
117
makeSmallCluster :: Node.Node -> Int -> Node.List
118
makeSmallCluster node count =
119
    let fn = Node.buildPeers node Container.empty
120
        namelst = map (\n -> (Node.name n, n)) (replicate count fn)
121
        (_, nlst) = Loader.assignIndices namelst
122
    in nlst
123

    
124
-- | Checks if a node is "big" enough.
125
isNodeBig :: Node.Node -> Int -> Bool
126
isNodeBig node size = Node.availDisk node > size * Types.unitDsk
127
                      && Node.availMem node > size * Types.unitMem
128
                      && Node.availCpu node > size * Types.unitCpu
129

    
130
canBalance :: Cluster.Table -> Bool -> Bool -> Bool -> Bool
131
canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac 0 0
132

    
133
-- | Assigns a new fresh instance to a cluster; this is not
134
-- allocation, so no resource checks are done.
135
assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
136
                  Types.Idx -> Types.Idx ->
137
                  (Node.List, Instance.List)
138
assignInstance nl il inst pdx sdx =
139
  let pnode = Container.find pdx nl
140
      snode = Container.find sdx nl
141
      maxiidx = if Container.null il
142
                then 0
143
                else fst (Container.findMax il) + 1
144
      inst' = inst { Instance.idx = maxiidx,
145
                     Instance.pNode = pdx, Instance.sNode = sdx }
146
      pnode' = Node.setPri pnode inst'
147
      snode' = Node.setSec snode inst'
148
      nl' = Container.addTwo pdx pnode' sdx snode' nl
149
      il' = Container.add maxiidx inst' il
150
  in (nl', il')
151

    
152
-- * Arbitrary instances
153

    
154
-- | Defines a DNS name.
155
newtype DNSChar = DNSChar { dnsGetChar::Char }
156

    
157
instance Arbitrary DNSChar where
158
    arbitrary = do
159
      x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
160
      return (DNSChar x)
161

    
162
getName :: Gen String
163
getName = do
164
  n <- choose (1, 64)
165
  dn <- vector n::Gen [DNSChar]
166
  return (map dnsGetChar dn)
167

    
168

    
169
getFQDN :: Gen String
170
getFQDN = do
171
  felem <- getName
172
  ncomps <- choose (1, 4)
173
  frest <- vector ncomps::Gen [[DNSChar]]
174
  let frest' = map (map dnsGetChar) frest
175
  return (felem ++ "." ++ intercalate "." frest')
176

    
177
-- let's generate a random instance
178
instance Arbitrary Instance.Instance where
179
    arbitrary = do
180
      name <- getFQDN
181
      mem <- choose (0, maxMem)
182
      dsk <- choose (0, maxDsk)
183
      run_st <- elements [ C.inststErrorup
184
                         , C.inststErrordown
185
                         , C.inststAdmindown
186
                         , C.inststNodedown
187
                         , C.inststNodeoffline
188
                         , C.inststRunning
189
                         , "no_such_status1"
190
                         , "no_such_status2"]
191
      pn <- arbitrary
192
      sn <- arbitrary
193
      vcpus <- choose (0, maxCpu)
194
      return $ Instance.create name mem dsk vcpus run_st [] True pn sn
195
                               Types.DTDrbd8
196

    
197
-- | Generas an arbitrary node based on sizing information.
198
genNode :: Maybe Int -- ^ Minimum node size in terms of units
199
        -> Maybe Int -- ^ Maximum node size (when Nothing, bounded
200
                     -- just by the max... constants)
201
        -> Gen Node.Node
202
genNode min_multiplier max_multiplier = do
203
  let (base_mem, base_dsk, base_cpu) =
204
          case min_multiplier of
205
            Just mm -> (mm * Types.unitMem,
206
                        mm * Types.unitDsk,
207
                        mm * Types.unitCpu)
208
            Nothing -> (0, 0, 0)
209
      (top_mem, top_dsk, top_cpu)  =
210
          case max_multiplier of
211
            Just mm -> (mm * Types.unitMem,
212
                        mm * Types.unitDsk,
213
                        mm * Types.unitCpu)
214
            Nothing -> (maxMem, maxDsk, maxCpu)
215
  name  <- getFQDN
216
  mem_t <- choose (base_mem, top_mem)
217
  mem_f <- choose (base_mem, mem_t)
218
  mem_n <- choose (0, mem_t - mem_f)
219
  dsk_t <- choose (base_dsk, top_dsk)
220
  dsk_f <- choose (base_dsk, dsk_t)
221
  cpu_t <- choose (base_cpu, top_cpu)
222
  offl  <- arbitrary
223
  let n = Node.create name (fromIntegral mem_t) mem_n mem_f
224
          (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl 0
225
  return $ Node.buildPeers n Container.empty
226

    
227
-- and a random node
228
instance Arbitrary Node.Node where
229
    arbitrary = genNode Nothing Nothing
230

    
231
-- replace disks
232
instance Arbitrary OpCodes.ReplaceDisksMode where
233
  arbitrary = elements [ OpCodes.ReplaceOnPrimary
234
                       , OpCodes.ReplaceOnSecondary
235
                       , OpCodes.ReplaceNewSecondary
236
                       , OpCodes.ReplaceAuto
237
                       ]
238

    
239
instance Arbitrary OpCodes.OpCode where
240
  arbitrary = do
241
    op_id <- elements [ "OP_TEST_DELAY"
242
                      , "OP_INSTANCE_REPLACE_DISKS"
243
                      , "OP_INSTANCE_FAILOVER"
244
                      , "OP_INSTANCE_MIGRATE"
245
                      ]
246
    (case op_id of
247
        "OP_TEST_DELAY" ->
248
          liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
249
        "OP_INSTANCE_REPLACE_DISKS" ->
250
          liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
251
          arbitrary arbitrary arbitrary
252
        "OP_INSTANCE_FAILOVER" ->
253
          liftM2 OpCodes.OpInstanceFailover arbitrary arbitrary
254
        "OP_INSTANCE_MIGRATE" ->
255
          liftM4 OpCodes.OpInstanceMigrate arbitrary arbitrary arbitrary
256
          arbitrary
257
        _ -> fail "Wrong opcode")
258

    
259
instance Arbitrary Jobs.OpStatus where
260
  arbitrary = elements [minBound..maxBound]
261

    
262
instance Arbitrary Jobs.JobStatus where
263
  arbitrary = elements [minBound..maxBound]
264

    
265
newtype SmallRatio = SmallRatio Double deriving Show
266
instance Arbitrary SmallRatio where
267
    arbitrary = do
268
      v <- choose (0, 1)
269
      return $ SmallRatio v
270

    
271
instance Arbitrary Types.AllocPolicy where
272
  arbitrary = elements [minBound..maxBound]
273

    
274
instance Arbitrary Types.DiskTemplate where
275
  arbitrary = elements [minBound..maxBound]
276

    
277
-- * Actual tests
278

    
279
-- ** Utils tests
280

    
281
-- | If the list is not just an empty element, and if the elements do
282
-- not contain commas, then join+split should be idempotent.
283
prop_Utils_commaJoinSplit =
284
    forAll (arbitrary `suchThat`
285
            (\l -> l /= [""] && all (not . elem ',') l )) $ \lst ->
286
    Utils.sepSplit ',' (Utils.commaJoin lst) == lst
287

    
288
-- | Split and join should always be idempotent.
289
prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
290

    
291
-- | fromObjWithDefault, we test using the Maybe monad and an integer
292
-- value.
293
prop_Utils_fromObjWithDefault def_value random_key =
294
    -- a missing key will be returned with the default
295
    Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
296
    -- a found key will be returned as is, not with default
297
    Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
298
         random_key (def_value+1) == Just def_value
299
        where _types = def_value :: Integer
300

    
301
-- | Test list for the Utils module.
302
testUtils =
303
  [ run prop_Utils_commaJoinSplit
304
  , run prop_Utils_commaSplitJoin
305
  , run prop_Utils_fromObjWithDefault
306
  ]
307

    
308
-- ** PeerMap tests
309

    
310
-- | Make sure add is idempotent.
311
prop_PeerMap_addIdempotent pmap key em =
312
    fn puniq == fn (fn puniq)
313
    where _types = (pmap::PeerMap.PeerMap,
314
                    key::PeerMap.Key, em::PeerMap.Elem)
315
          fn = PeerMap.add key em
316
          puniq = PeerMap.accumArray const pmap
317

    
318
-- | Make sure remove is idempotent.
319
prop_PeerMap_removeIdempotent pmap key =
320
    fn puniq == fn (fn puniq)
321
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
322
          fn = PeerMap.remove key
323
          puniq = PeerMap.accumArray const pmap
324

    
325
-- | Make sure a missing item returns 0.
326
prop_PeerMap_findMissing pmap key =
327
    PeerMap.find key (PeerMap.remove key puniq) == 0
328
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
329
          puniq = PeerMap.accumArray const pmap
330

    
331
-- | Make sure an added item is found.
332
prop_PeerMap_addFind pmap key em =
333
    PeerMap.find key (PeerMap.add key em puniq) == em
334
    where _types = (pmap::PeerMap.PeerMap,
335
                    key::PeerMap.Key, em::PeerMap.Elem)
336
          puniq = PeerMap.accumArray const pmap
337

    
338
-- | Manual check that maxElem returns the maximum indeed, or 0 for null.
339
prop_PeerMap_maxElem pmap =
340
    PeerMap.maxElem puniq == if null puniq then 0
341
                             else (maximum . snd . unzip) puniq
342
    where _types = pmap::PeerMap.PeerMap
343
          puniq = PeerMap.accumArray const pmap
344

    
345
-- | List of tests for the PeerMap module.
346
testPeerMap =
347
    [ run prop_PeerMap_addIdempotent
348
    , run prop_PeerMap_removeIdempotent
349
    , run prop_PeerMap_maxElem
350
    , run prop_PeerMap_addFind
351
    , run prop_PeerMap_findMissing
352
    ]
353

    
354
-- ** Container tests
355

    
356
prop_Container_addTwo cdata i1 i2 =
357
    fn i1 i2 cont == fn i2 i1 cont &&
358
       fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
359
    where _types = (cdata::[Int],
360
                    i1::Int, i2::Int)
361
          cont = foldl (\c x -> Container.add x x c) Container.empty cdata
362
          fn x1 x2 = Container.addTwo x1 x1 x2 x2
363

    
364
prop_Container_nameOf node =
365
  let nl = makeSmallCluster node 1
366
      fnode = head (Container.elems nl)
367
  in Container.nameOf nl (Node.idx fnode) == Node.name fnode
368

    
369
-- | We test that in a cluster, given a random node, we can find it by
370
-- its name and alias, as long as all names and aliases are unique,
371
-- and that we fail to find a non-existing name.
372
prop_Container_findByName node othername =
373
  forAll (choose (1, 20)) $ \ cnt ->
374
  forAll (choose (0, cnt - 1)) $ \ fidx ->
375
  forAll (vector cnt) $ \ names ->
376
  (length . nub) (map fst names ++ map snd names) ==
377
  length names * 2 &&
378
  not (othername `elem` (map fst names ++ map snd names)) ==>
379
  let nl = makeSmallCluster node cnt
380
      nodes = Container.elems nl
381
      nodes' = map (\((name, alias), nn) -> (Node.idx nn,
382
                                             nn { Node.name = name,
383
                                                  Node.alias = alias }))
384
               $ zip names nodes
385
      nl' = Container.fromList nodes'
386
      target = snd (nodes' !! fidx)
387
  in Container.findByName nl' (Node.name target) == Just target &&
388
     Container.findByName nl' (Node.alias target) == Just target &&
389
     Container.findByName nl' othername == Nothing
390

    
391
testContainer =
392
    [ run prop_Container_addTwo
393
    , run prop_Container_nameOf
394
    , run prop_Container_findByName
395
    ]
396

    
397
-- ** Instance tests
398

    
399
-- Simple instance tests, we only have setter/getters
400

    
401
prop_Instance_creat inst =
402
    Instance.name inst == Instance.alias inst
403

    
404
prop_Instance_setIdx inst idx =
405
    Instance.idx (Instance.setIdx inst idx) == idx
406
    where _types = (inst::Instance.Instance, idx::Types.Idx)
407

    
408
prop_Instance_setName inst name =
409
    Instance.name newinst == name &&
410
    Instance.alias newinst == name
411
    where _types = (inst::Instance.Instance, name::String)
412
          newinst = Instance.setName inst name
413

    
414
prop_Instance_setAlias inst name =
415
    Instance.name newinst == Instance.name inst &&
416
    Instance.alias newinst == name
417
    where _types = (inst::Instance.Instance, name::String)
418
          newinst = Instance.setAlias inst name
419

    
420
prop_Instance_setPri inst pdx =
421
    Instance.pNode (Instance.setPri inst pdx) == pdx
422
    where _types = (inst::Instance.Instance, pdx::Types.Ndx)
423

    
424
prop_Instance_setSec inst sdx =
425
    Instance.sNode (Instance.setSec inst sdx) == sdx
426
    where _types = (inst::Instance.Instance, sdx::Types.Ndx)
427

    
428
prop_Instance_setBoth inst pdx sdx =
429
    Instance.pNode si == pdx && Instance.sNode si == sdx
430
    where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
431
          si = Instance.setBoth inst pdx sdx
432

    
433
prop_Instance_runStatus_True =
434
    forAll (arbitrary `suchThat`
435
            ((`elem` Instance.runningStates) . Instance.runSt))
436
    Instance.running
437

    
438
prop_Instance_runStatus_False inst =
439
    let run_st = Instance.running inst
440
        run_tx = Instance.runSt inst
441
    in
442
      run_tx `notElem` Instance.runningStates ==> not run_st
443

    
444
prop_Instance_shrinkMG inst =
445
    Instance.mem inst >= 2 * Types.unitMem ==>
446
        case Instance.shrinkByType inst Types.FailMem of
447
          Types.Ok inst' ->
448
              Instance.mem inst' == Instance.mem inst - Types.unitMem
449
          _ -> False
450

    
451
prop_Instance_shrinkMF inst =
452
    forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
453
    let inst' = inst { Instance.mem = mem}
454
    in Types.isBad $ Instance.shrinkByType inst' Types.FailMem
455

    
456
prop_Instance_shrinkCG inst =
457
    Instance.vcpus inst >= 2 * Types.unitCpu ==>
458
        case Instance.shrinkByType inst Types.FailCPU of
459
          Types.Ok inst' ->
460
              Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
461
          _ -> False
462

    
463
prop_Instance_shrinkCF inst =
464
    forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
465
    let inst' = inst { Instance.vcpus = vcpus }
466
    in Types.isBad $ Instance.shrinkByType inst' Types.FailCPU
467

    
468
prop_Instance_shrinkDG inst =
469
    Instance.dsk inst >= 2 * Types.unitDsk ==>
470
        case Instance.shrinkByType inst Types.FailDisk of
471
          Types.Ok inst' ->
472
              Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
473
          _ -> False
474

    
475
prop_Instance_shrinkDF inst =
476
    forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
477
    let inst' = inst { Instance.dsk = dsk }
478
    in Types.isBad $ Instance.shrinkByType inst' Types.FailDisk
479

    
480
prop_Instance_setMovable inst m =
481
    Instance.movable inst' == m
482
    where inst' = Instance.setMovable inst m
483

    
484
testInstance =
485
    [ run prop_Instance_creat
486
    , run prop_Instance_setIdx
487
    , run prop_Instance_setName
488
    , run prop_Instance_setAlias
489
    , run prop_Instance_setPri
490
    , run prop_Instance_setSec
491
    , run prop_Instance_setBoth
492
    , run prop_Instance_runStatus_True
493
    , run prop_Instance_runStatus_False
494
    , run prop_Instance_shrinkMG
495
    , run prop_Instance_shrinkMF
496
    , run prop_Instance_shrinkCG
497
    , run prop_Instance_shrinkCF
498
    , run prop_Instance_shrinkDG
499
    , run prop_Instance_shrinkDF
500
    , run prop_Instance_setMovable
501
    ]
502

    
503
-- ** Text backend tests
504

    
505
-- Instance text loader tests
506

    
507
prop_Text_Load_Instance name mem dsk vcpus status
508
                        (NonEmpty pnode) snode
509
                        (NonNegative pdx) (NonNegative sdx) autobal dt =
510
    pnode /= snode && pdx /= sdx ==>
511
    let vcpus_s = show vcpus
512
        dsk_s = show dsk
513
        mem_s = show mem
514
        ndx = if null snode
515
              then [(pnode, pdx)]
516
              else [(pnode, pdx), (snode, sdx)]
517
        nl = Data.Map.fromList ndx
518
        tags = ""
519
        sbal = if autobal then "Y" else "N"
520
        sdt = Types.dtToString dt
521
        inst = Text.loadInst nl
522
               [name, mem_s, dsk_s, vcpus_s, status,
523
                sbal, pnode, snode, sdt, tags]
524
        fail1 = Text.loadInst nl
525
               [name, mem_s, dsk_s, vcpus_s, status,
526
                sbal, pnode, pnode, tags]
527
        _types = ( name::String, mem::Int, dsk::Int
528
                 , vcpus::Int, status::String
529
                 , snode::String
530
                 , autobal::Bool)
531
    in
532
      case inst of
533
        Types.Bad msg -> printTestCase ("Failed to load instance: " ++ msg)
534
                         False
535
        Types.Ok (_, i) -> printTestCase ("Mismatch in some field while\
536
                                          \ loading the instance") $
537
            Instance.name i == name &&
538
            Instance.vcpus i == vcpus &&
539
            Instance.mem i == mem &&
540
            Instance.pNode i == pdx &&
541
            Instance.sNode i == (if null snode
542
                                 then Node.noSecondary
543
                                 else sdx) &&
544
            Instance.autoBalance i == autobal &&
545
            Types.isBad fail1
546

    
547
prop_Text_Load_InstanceFail ktn fields =
548
    length fields /= 10 ==>
549
    case Text.loadInst nl fields of
550
      Types.Ok _ -> printTestCase "Managed to load instance from invalid\
551
                                  \ data" False
552
      Types.Bad msg -> printTestCase ("Unrecognised error message: " ++ msg) $
553
                       "Invalid/incomplete instance data: '" `isPrefixOf` msg
554
    where nl = Data.Map.fromList ktn
555

    
556
prop_Text_Load_Node name tm nm fm td fd tc fo =
557
    let conv v = if v < 0
558
                    then "?"
559
                    else show v
560
        tm_s = conv tm
561
        nm_s = conv nm
562
        fm_s = conv fm
563
        td_s = conv td
564
        fd_s = conv fd
565
        tc_s = conv tc
566
        fo_s = if fo
567
               then "Y"
568
               else "N"
569
        any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
570
        gid = Group.uuid defGroup
571
    in case Text.loadNode defGroupAssoc
572
           [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
573
         Nothing -> False
574
         Just (name', node) ->
575
             if fo || any_broken
576
             then Node.offline node
577
             else Node.name node == name' && name' == name &&
578
                  Node.alias node == name &&
579
                  Node.tMem node == fromIntegral tm &&
580
                  Node.nMem node == nm &&
581
                  Node.fMem node == fm &&
582
                  Node.tDsk node == fromIntegral td &&
583
                  Node.fDsk node == fd &&
584
                  Node.tCpu node == fromIntegral tc
585

    
586
prop_Text_Load_NodeFail fields =
587
    length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
588

    
589
prop_Text_NodeLSIdempotent node =
590
    (Text.loadNode defGroupAssoc.
591
         Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
592
    Just (Node.name n, n)
593
    -- override failN1 to what loadNode returns by default
594
    where n = node { Node.failN1 = True, Node.offline = False }
595

    
596
testText =
597
    [ run prop_Text_Load_Instance
598
    , run prop_Text_Load_InstanceFail
599
    , run prop_Text_Load_Node
600
    , run prop_Text_Load_NodeFail
601
    , run prop_Text_NodeLSIdempotent
602
    ]
603

    
604
-- ** Node tests
605

    
606
prop_Node_setAlias node name =
607
    Node.name newnode == Node.name node &&
608
    Node.alias newnode == name
609
    where _types = (node::Node.Node, name::String)
610
          newnode = Node.setAlias node name
611

    
612
prop_Node_setOffline node status =
613
    Node.offline newnode == status
614
    where newnode = Node.setOffline node status
615

    
616
prop_Node_setXmem node xm =
617
    Node.xMem newnode == xm
618
    where newnode = Node.setXmem node xm
619

    
620
prop_Node_setMcpu node mc =
621
    Node.mCpu newnode == mc
622
    where newnode = Node.setMcpu node mc
623

    
624
-- | Check that an instance add with too high memory or disk will be
625
-- rejected.
626
prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
627
                               not (Node.failN1 node)
628
                               ==>
629
                               case Node.addPri node inst'' of
630
                                 Types.OpFail Types.FailMem -> True
631
                                 _ -> False
632
    where _types = (node::Node.Node, inst::Instance.Instance)
633
          inst' = setInstanceSmallerThanNode node inst
634
          inst'' = inst' { Instance.mem = Instance.mem inst }
635

    
636
prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
637
                               not (Node.failN1 node)
638
                               ==>
639
                               case Node.addPri node inst'' of
640
                                 Types.OpFail Types.FailDisk -> True
641
                                 _ -> False
642
    where _types = (node::Node.Node, inst::Instance.Instance)
643
          inst' = setInstanceSmallerThanNode node inst
644
          inst'' = inst' { Instance.dsk = Instance.dsk inst }
645

    
646
prop_Node_addPriFC node inst (Positive extra) =
647
    not (Node.failN1 node) ==>
648
        case Node.addPri node inst'' of
649
          Types.OpFail Types.FailCPU -> True
650
          _ -> False
651
    where _types = (node::Node.Node, inst::Instance.Instance)
652
          inst' = setInstanceSmallerThanNode node inst
653
          inst'' = inst' { Instance.vcpus = Node.availCpu node + extra }
654

    
655
-- | Check that an instance add with too high memory or disk will be
656
-- rejected.
657
prop_Node_addSec node inst pdx =
658
    (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
659
     Instance.dsk inst >= Node.fDsk node) &&
660
    not (Node.failN1 node)
661
    ==> isFailure (Node.addSec node inst pdx)
662
        where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
663

    
664
-- | Checks for memory reservation changes.
665
prop_Node_rMem inst =
666
    forAll (arbitrary `suchThat` ((> 0) . Node.fMem)) $ \node ->
667
    -- ab = auto_balance, nb = non-auto_balance
668
    -- we use -1 as the primary node of the instance
669
    let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True }
670
        inst_ab = setInstanceSmallerThanNode node inst'
671
        inst_nb = inst_ab { Instance.autoBalance = False }
672
        -- now we have the two instances, identical except the
673
        -- autoBalance attribute
674
        orig_rmem = Node.rMem node
675
        inst_idx = Instance.idx inst_ab
676
        node_add_ab = Node.addSec node inst_ab (-1)
677
        node_add_nb = Node.addSec node inst_nb (-1)
678
        node_del_ab = liftM (flip Node.removeSec inst_ab) node_add_ab
679
        node_del_nb = liftM (flip Node.removeSec inst_nb) node_add_nb
680
    in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
681
         (Types.OpGood a_ab, Types.OpGood a_nb,
682
          Types.OpGood d_ab, Types.OpGood d_nb) ->
683
             printTestCase "Consistency checks failed" $
684
             Node.rMem a_ab >  orig_rmem &&
685
             Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
686
             Node.rMem a_nb == orig_rmem &&
687
             Node.rMem d_ab == orig_rmem &&
688
             Node.rMem d_nb == orig_rmem &&
689
             -- this is not related to rMem, but as good a place to
690
             -- test as any
691
             inst_idx `elem` Node.sList a_ab &&
692
             not (inst_idx `elem` Node.sList d_ab)
693
         x -> printTestCase ("Failed to add/remove instances: " ++ show x)
694
              False
695

    
696
-- | Check mdsk setting.
697
prop_Node_setMdsk node mx =
698
    Node.loDsk node' >= 0 &&
699
    fromIntegral (Node.loDsk node') <= Node.tDsk node &&
700
    Node.availDisk node' >= 0 &&
701
    Node.availDisk node' <= Node.fDsk node' &&
702
    fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
703
    Node.mDsk node' == mx'
704
    where _types = (node::Node.Node, mx::SmallRatio)
705
          node' = Node.setMdsk node mx'
706
          SmallRatio mx' = mx
707

    
708
-- Check tag maps
709
prop_Node_tagMaps_idempotent tags =
710
    Node.delTags (Node.addTags m tags) tags == m
711
    where m = Data.Map.empty
712

    
713
prop_Node_tagMaps_reject tags =
714
    not (null tags) ==>
715
    any (\t -> Node.rejectAddTags m [t]) tags
716
    where m = Node.addTags Data.Map.empty tags
717

    
718
prop_Node_showField node =
719
  forAll (elements Node.defaultFields) $ \ field ->
720
  fst (Node.showHeader field) /= Types.unknownField &&
721
  Node.showField node field /= Types.unknownField
722

    
723

    
724
prop_Node_computeGroups nodes =
725
  let ng = Node.computeGroups nodes
726
      onlyuuid = map fst ng
727
  in length nodes == sum (map (length . snd) ng) &&
728
     all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
729
     length (nub onlyuuid) == length onlyuuid &&
730
     (null nodes || not (null ng))
731

    
732
testNode =
733
    [ run prop_Node_setAlias
734
    , run prop_Node_setOffline
735
    , run prop_Node_setMcpu
736
    , run prop_Node_setXmem
737
    , run prop_Node_addPriFM
738
    , run prop_Node_addPriFD
739
    , run prop_Node_addPriFC
740
    , run prop_Node_addSec
741
    , run prop_Node_rMem
742
    , run prop_Node_setMdsk
743
    , run prop_Node_tagMaps_idempotent
744
    , run prop_Node_tagMaps_reject
745
    , run prop_Node_showField
746
    , run prop_Node_computeGroups
747
    ]
748

    
749

    
750
-- ** Cluster tests
751

    
752
-- | Check that the cluster score is close to zero for a homogeneous
753
-- cluster.
754
prop_Score_Zero node =
755
    forAll (choose (1, 1024)) $ \count ->
756
    (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
757
     (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
758
    let fn = Node.buildPeers node Container.empty
759
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
760
        nl = Container.fromList nlst
761
        score = Cluster.compCV nl
762
    -- we can't say == 0 here as the floating point errors accumulate;
763
    -- this should be much lower than the default score in CLI.hs
764
    in score <= 1e-12
765

    
766
-- | Check that cluster stats are sane.
767
prop_CStats_sane node =
768
    forAll (choose (1, 1024)) $ \count ->
769
    (not (Node.offline node) && not (Node.failN1 node) &&
770
     (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
771
    let fn = Node.buildPeers node Container.empty
772
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
773
        nl = Container.fromList nlst
774
        cstats = Cluster.totalResources nl
775
    in Cluster.csAdsk cstats >= 0 &&
776
       Cluster.csAdsk cstats <= Cluster.csFdsk cstats
777

    
778
-- | Check that one instance is allocated correctly, without
779
-- rebalances needed.
780
prop_ClusterAlloc_sane node inst =
781
    forAll (choose (5, 20)) $ \count ->
782
    not (Node.offline node)
783
            && not (Node.failN1 node)
784
            && Node.availDisk node > 0
785
            && Node.availMem node > 0
786
            ==>
787
    let nl = makeSmallCluster node count
788
        il = Container.empty
789
        inst' = setInstanceSmallerThanNode node inst
790
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
791
       Cluster.tryAlloc nl il inst' of
792
         Types.Bad _ -> False
793
         Types.Ok as ->
794
             case Cluster.asSolutions as of
795
               [] -> False
796
               (xnl, xi, _, cv):[] ->
797
                   let il' = Container.add (Instance.idx xi) xi il
798
                       tbl = Cluster.Table xnl il' cv []
799
                   in not (canBalance tbl True True False)
800
               _ -> False
801

    
802
-- | Checks that on a 2-5 node cluster, we can allocate a random
803
-- instance spec via tiered allocation (whatever the original instance
804
-- spec), on either one or two nodes.
805
prop_ClusterCanTieredAlloc node inst =
806
    forAll (choose (2, 5)) $ \count ->
807
    forAll (choose (1, 2)) $ \rqnodes ->
808
    not (Node.offline node)
809
            && not (Node.failN1 node)
810
            && isNodeBig node 4
811
            ==>
812
    let nl = makeSmallCluster node count
813
        il = Container.empty
814
        allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
815
    in case allocnodes >>= \allocnodes' ->
816
        Cluster.tieredAlloc nl il inst allocnodes' [] [] of
817
         Types.Bad _ -> False
818
         Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
819
                                      IntMap.size il' == length ixes &&
820
                                      length ixes == length cstats
821

    
822
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
823
-- we can also evacuate it.
824
prop_ClusterAllocEvac node inst =
825
    forAll (choose (4, 8)) $ \count ->
826
    not (Node.offline node)
827
            && not (Node.failN1 node)
828
            && isNodeBig node 4
829
            ==>
830
    let nl = makeSmallCluster node count
831
        il = Container.empty
832
        inst' = setInstanceSmallerThanNode node inst
833
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
834
       Cluster.tryAlloc nl il inst' of
835
         Types.Bad _ -> False
836
         Types.Ok as ->
837
             case Cluster.asSolutions as of
838
               [] -> False
839
               (xnl, xi, _, _):[] ->
840
                   let sdx = Instance.sNode xi
841
                       il' = Container.add (Instance.idx xi) xi il
842
                   in case Cluster.tryEvac xnl il' [Instance.idx xi] [sdx] of
843
                        Just _ -> True
844
                        _ -> False
845
               _ -> False
846

    
847
-- | Check that allocating multiple instances on a cluster, then
848
-- adding an empty node, results in a valid rebalance.
849
prop_ClusterAllocBalance =
850
    forAll (genNode (Just 5) (Just 128)) $ \node ->
851
    forAll (choose (3, 5)) $ \count ->
852
    not (Node.offline node) && not (Node.failN1 node) ==>
853
    let nl = makeSmallCluster node count
854
        (hnode, nl') = IntMap.deleteFindMax nl
855
        il = Container.empty
856
        allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
857
        i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
858
    in case allocnodes >>= \allocnodes' ->
859
        Cluster.iterateAlloc nl' il i_templ allocnodes' [] [] of
860
         Types.Bad _ -> False
861
         Types.Ok (_, xnl, il', _, _) ->
862
                   let ynl = Container.add (Node.idx hnode) hnode xnl
863
                       cv = Cluster.compCV ynl
864
                       tbl = Cluster.Table ynl il' cv []
865
                   in canBalance tbl True True False
866

    
867
-- | Checks consistency.
868
prop_ClusterCheckConsistency node inst =
869
  let nl = makeSmallCluster node 3
870
      [node1, node2, node3] = Container.elems nl
871
      node3' = node3 { Node.group = 1 }
872
      nl' = Container.add (Node.idx node3') node3' nl
873
      inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
874
      inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
875
      inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
876
      ccheck = Cluster.findSplitInstances nl' . Container.fromList
877
  in null (ccheck [(0, inst1)]) &&
878
     null (ccheck [(0, inst2)]) &&
879
     (not . null $ ccheck [(0, inst3)])
880

    
881
-- | For now, we only test that we don't lose instances during the split.
882
prop_ClusterSplitCluster node inst =
883
  forAll (choose (0, 100)) $ \icnt ->
884
  let nl = makeSmallCluster node 2
885
      (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
886
                   (nl, Container.empty) [1..icnt]
887
      gni = Cluster.splitCluster nl' il'
888
  in sum (map (Container.size . snd . snd) gni) == icnt &&
889
     all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
890
                                 (Container.elems nl'')) gni
891

    
892
testCluster =
893
    [ run prop_Score_Zero
894
    , run prop_CStats_sane
895
    , run prop_ClusterAlloc_sane
896
    , run prop_ClusterCanTieredAlloc
897
    , run prop_ClusterAllocEvac
898
    , run prop_ClusterAllocBalance
899
    , run prop_ClusterCheckConsistency
900
    , run prop_ClusterSplitCluster
901
    ]
902

    
903
-- ** OpCodes tests
904

    
905
-- | Check that opcode serialization is idempotent.
906
prop_OpCodes_serialization op =
907
  case J.readJSON (J.showJSON op) of
908
    J.Error _ -> False
909
    J.Ok op' -> op == op'
910
  where _types = op::OpCodes.OpCode
911

    
912
testOpCodes =
913
  [ run prop_OpCodes_serialization
914
  ]
915

    
916
-- ** Jobs tests
917

    
918
-- | Check that (queued) job\/opcode status serialization is idempotent.
919
prop_OpStatus_serialization os =
920
  case J.readJSON (J.showJSON os) of
921
    J.Error _ -> False
922
    J.Ok os' -> os == os'
923
  where _types = os::Jobs.OpStatus
924

    
925
prop_JobStatus_serialization js =
926
  case J.readJSON (J.showJSON js) of
927
    J.Error _ -> False
928
    J.Ok js' -> js == js'
929
  where _types = js::Jobs.JobStatus
930

    
931
testJobs =
932
  [ run prop_OpStatus_serialization
933
  , run prop_JobStatus_serialization
934
  ]
935

    
936
-- ** Loader tests
937

    
938
prop_Loader_lookupNode ktn inst node =
939
  Loader.lookupNode nl inst node == Data.Map.lookup node nl
940
  where nl = Data.Map.fromList ktn
941

    
942
prop_Loader_lookupInstance kti inst =
943
  Loader.lookupInstance il inst == Data.Map.lookup inst il
944
  where il = Data.Map.fromList kti
945

    
946
prop_Loader_assignIndices nodes =
947
  Data.Map.size nassoc == length nodes &&
948
  Container.size kt == length nodes &&
949
  (if not (null nodes)
950
   then maximum (IntMap.keys kt) == length nodes - 1
951
   else True)
952
  where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
953

    
954
-- | Checks that the number of primary instances recorded on the nodes
955
-- is zero.
956
prop_Loader_mergeData ns =
957
  let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
958
  in case Loader.mergeData [] [] [] []
959
         (Loader.emptyCluster {Loader.cdNodes = na}) of
960
    Types.Bad _ -> False
961
    Types.Ok (Loader.ClusterData _ nl il _) ->
962
      let nodes = Container.elems nl
963
          instances = Container.elems il
964
      in (sum . map (length . Node.pList)) nodes == 0 &&
965
         null instances
966

    
967
testLoader =
968
  [ run prop_Loader_lookupNode
969
  , run prop_Loader_lookupInstance
970
  , run prop_Loader_assignIndices
971
  , run prop_Loader_mergeData
972
  ]
973

    
974
-- ** Types tests
975

    
976
prop_AllocPolicy_serialisation apol =
977
    case Types.apolFromString (Types.apolToString apol) of
978
      Types.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
979
                    p == apol
980
      Types.Bad s -> printTestCase ("failed to deserialise: " ++ s) False
981

    
982
prop_DiskTemplate_serialisation dt =
983
    case Types.dtFromString (Types.dtToString dt) of
984
      Types.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
985
                    p == dt
986
      Types.Bad s -> printTestCase ("failed to deserialise: " ++ s) False
987

    
988
testTypes =
989
    [ run prop_AllocPolicy_serialisation
990
    , run prop_DiskTemplate_serialisation
991
    ]