Statistics
| Branch: | Tag: | Revision:

root / htools / Ganeti / HTools / QC.hs @ 3c002a13

History | View | Annotate | Download (34.4 kB)

1
{-| Unittests for ganeti-htools.
2

    
3
-}
4

    
5
{-
6

    
7
Copyright (C) 2009, 2010, 2011 Google Inc.
8

    
9
This program is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 2 of the License, or
12
(at your option) any later version.
13

    
14
This program is distributed in the hope that it will be useful, but
15
WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
General Public License for more details.
18

    
19
You should have received a copy of the GNU General Public License
20
along with this program; if not, write to the Free Software
21
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22
02110-1301, USA.
23

    
24
-}
25

    
26
module Ganeti.HTools.QC
27
    ( testUtils
28
    , testPeerMap
29
    , testContainer
30
    , testInstance
31
    , testNode
32
    , testText
33
    , testOpCodes
34
    , testJobs
35
    , testCluster
36
    , testLoader
37
    , testTypes
38
    ) where
39

    
40
import Test.QuickCheck
41
import Data.List (findIndex, intercalate, nub, isPrefixOf)
42
import Data.Maybe
43
import Control.Monad
44
import qualified Text.JSON as J
45
import qualified Data.Map
46
import qualified Data.IntMap as IntMap
47
import qualified Ganeti.OpCodes as OpCodes
48
import qualified Ganeti.Jobs as Jobs
49
import qualified Ganeti.Luxi
50
import qualified Ganeti.HTools.CLI as CLI
51
import qualified Ganeti.HTools.Cluster as Cluster
52
import qualified Ganeti.HTools.Container as Container
53
import qualified Ganeti.HTools.ExtLoader
54
import qualified Ganeti.HTools.IAlloc as IAlloc
55
import qualified Ganeti.HTools.Instance as Instance
56
import qualified Ganeti.HTools.Loader as Loader
57
import qualified Ganeti.HTools.Luxi
58
import qualified Ganeti.HTools.Node as Node
59
import qualified Ganeti.HTools.Group as Group
60
import qualified Ganeti.HTools.PeerMap as PeerMap
61
import qualified Ganeti.HTools.Rapi
62
import qualified Ganeti.HTools.Simu
63
import qualified Ganeti.HTools.Text as Text
64
import qualified Ganeti.HTools.Types as Types
65
import qualified Ganeti.HTools.Utils as Utils
66
import qualified Ganeti.HTools.Version
67
import qualified Ganeti.Constants as C
68

    
69
run :: Testable prop => prop -> Args -> IO Result
70
run = flip quickCheckWithResult
71

    
72
-- * Constants
73

    
74
-- | Maximum memory (1TiB, somewhat random value).
75
maxMem :: Int
76
maxMem = 1024 * 1024
77

    
78
-- | Maximum disk (8TiB, somewhat random value).
79
maxDsk :: Int
80
maxDsk = 1024 * 1024 * 8
81

    
82
-- | Max CPUs (1024, somewhat random value).
83
maxCpu :: Int
84
maxCpu = 1024
85

    
86
defGroup :: Group.Group
87
defGroup = flip Group.setIdx 0 $
88
               Group.create "default" Utils.defaultGroupID
89
                    Types.AllocPreferred
90

    
91
defGroupList :: Group.List
92
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
93

    
94
defGroupAssoc :: Data.Map.Map String Types.Gdx
95
defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
96

    
97
-- * Helper functions
98

    
99
-- | Simple checker for whether OpResult is fail or pass.
100
isFailure :: Types.OpResult a -> Bool
101
isFailure (Types.OpFail _) = True
102
isFailure _ = False
103

    
104
-- | Update an instance to be smaller than a node.
105
setInstanceSmallerThanNode node inst =
106
    inst { Instance.mem = Node.availMem node `div` 2
107
         , Instance.dsk = Node.availDisk node `div` 2
108
         , Instance.vcpus = Node.availCpu node `div` 2
109
         }
110

    
111
-- | Create an instance given its spec.
112
createInstance mem dsk vcpus =
113
    Instance.create "inst-unnamed" mem dsk vcpus "running" [] True (-1) (-1)
114

    
115
-- | Create a small cluster by repeating a node spec.
116
makeSmallCluster :: Node.Node -> Int -> Node.List
117
makeSmallCluster node count =
118
    let fn = Node.buildPeers node Container.empty
119
        namelst = map (\n -> (Node.name n, n)) (replicate count fn)
120
        (_, nlst) = Loader.assignIndices namelst
121
    in nlst
122

    
123
-- | Checks if a node is "big" enough.
124
isNodeBig :: Node.Node -> Int -> Bool
125
isNodeBig node size = Node.availDisk node > size * Types.unitDsk
126
                      && Node.availMem node > size * Types.unitMem
127
                      && Node.availCpu node > size * Types.unitCpu
128

    
129
canBalance :: Cluster.Table -> Bool -> Bool -> Bool -> Bool
130
canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac 0 0
131

    
132
-- | Assigns a new fresh instance to a cluster; this is not
133
-- allocation, so no resource checks are done.
134
assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
135
                  Types.Idx -> Types.Idx ->
136
                  (Node.List, Instance.List)
137
assignInstance nl il inst pdx sdx =
138
  let pnode = Container.find pdx nl
139
      snode = Container.find sdx nl
140
      maxiidx = if Container.null il
141
                then 0
142
                else fst (Container.findMax il) + 1
143
      inst' = inst { Instance.idx = maxiidx,
144
                     Instance.pNode = pdx, Instance.sNode = sdx }
145
      pnode' = Node.setPri pnode inst'
146
      snode' = Node.setSec snode inst'
147
      nl' = Container.addTwo pdx pnode' sdx snode' nl
148
      il' = Container.add maxiidx inst' il
149
  in (nl', il')
150

    
151
-- * Arbitrary instances
152

    
153
-- | Defines a DNS name.
154
newtype DNSChar = DNSChar { dnsGetChar::Char }
155

    
156
instance Arbitrary DNSChar where
157
    arbitrary = do
158
      x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
159
      return (DNSChar x)
160

    
161
getName :: Gen String
162
getName = do
163
  n <- choose (1, 64)
164
  dn <- vector n::Gen [DNSChar]
165
  return (map dnsGetChar dn)
166

    
167

    
168
getFQDN :: Gen String
169
getFQDN = do
170
  felem <- getName
171
  ncomps <- choose (1, 4)
172
  frest <- vector ncomps::Gen [[DNSChar]]
173
  let frest' = map (map dnsGetChar) frest
174
  return (felem ++ "." ++ intercalate "." frest')
175

    
176
-- let's generate a random instance
177
instance Arbitrary Instance.Instance where
178
    arbitrary = do
179
      name <- getFQDN
180
      mem <- choose (0, maxMem)
181
      dsk <- choose (0, maxDsk)
182
      run_st <- elements [ C.inststErrorup
183
                         , C.inststErrordown
184
                         , C.inststAdmindown
185
                         , C.inststNodedown
186
                         , C.inststNodeoffline
187
                         , C.inststRunning
188
                         , "no_such_status1"
189
                         , "no_such_status2"]
190
      pn <- arbitrary
191
      sn <- arbitrary
192
      vcpus <- choose (0, maxCpu)
193
      return $ Instance.create name mem dsk vcpus run_st [] True pn sn
194

    
195
-- | Generas an arbitrary node based on sizing information.
196
genNode :: Maybe Int -- ^ Minimum node size in terms of units
197
        -> Maybe Int -- ^ Maximum node size (when Nothing, bounded
198
                     -- just by the max... constants)
199
        -> Gen Node.Node
200
genNode min_multiplier max_multiplier = do
201
  let (base_mem, base_dsk, base_cpu) =
202
          case min_multiplier of
203
            Just mm -> (mm * Types.unitMem,
204
                        mm * Types.unitDsk,
205
                        mm * Types.unitCpu)
206
            Nothing -> (0, 0, 0)
207
      (top_mem, top_dsk, top_cpu)  =
208
          case max_multiplier of
209
            Just mm -> (mm * Types.unitMem,
210
                        mm * Types.unitDsk,
211
                        mm * Types.unitCpu)
212
            Nothing -> (maxMem, maxDsk, maxCpu)
213
  name  <- getFQDN
214
  mem_t <- choose (base_mem, top_mem)
215
  mem_f <- choose (base_mem, mem_t)
216
  mem_n <- choose (0, mem_t - mem_f)
217
  dsk_t <- choose (base_dsk, top_dsk)
218
  dsk_f <- choose (base_dsk, dsk_t)
219
  cpu_t <- choose (base_cpu, top_cpu)
220
  offl  <- arbitrary
221
  let n = Node.create name (fromIntegral mem_t) mem_n mem_f
222
          (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl 0
223
  return $ Node.buildPeers n Container.empty
224

    
225
-- and a random node
226
instance Arbitrary Node.Node where
227
    arbitrary = genNode Nothing Nothing
228

    
229
-- replace disks
230
instance Arbitrary OpCodes.ReplaceDisksMode where
231
  arbitrary = elements [ OpCodes.ReplaceOnPrimary
232
                       , OpCodes.ReplaceOnSecondary
233
                       , OpCodes.ReplaceNewSecondary
234
                       , OpCodes.ReplaceAuto
235
                       ]
236

    
237
instance Arbitrary OpCodes.OpCode where
238
  arbitrary = do
239
    op_id <- elements [ "OP_TEST_DELAY"
240
                      , "OP_INSTANCE_REPLACE_DISKS"
241
                      , "OP_INSTANCE_FAILOVER"
242
                      , "OP_INSTANCE_MIGRATE"
243
                      ]
244
    (case op_id of
245
        "OP_TEST_DELAY" ->
246
          liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
247
        "OP_INSTANCE_REPLACE_DISKS" ->
248
          liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
249
          arbitrary arbitrary arbitrary
250
        "OP_INSTANCE_FAILOVER" ->
251
          liftM2 OpCodes.OpInstanceFailover arbitrary arbitrary
252
        "OP_INSTANCE_MIGRATE" ->
253
          liftM4 OpCodes.OpInstanceMigrate arbitrary arbitrary arbitrary
254
          arbitrary
255
        _ -> fail "Wrong opcode")
256

    
257
instance Arbitrary Jobs.OpStatus where
258
  arbitrary = elements [minBound..maxBound]
259

    
260
instance Arbitrary Jobs.JobStatus where
261
  arbitrary = elements [minBound..maxBound]
262

    
263
newtype SmallRatio = SmallRatio Double deriving Show
264
instance Arbitrary SmallRatio where
265
    arbitrary = do
266
      v <- choose (0, 1)
267
      return $ SmallRatio v
268

    
269
instance Arbitrary Types.AllocPolicy where
270
  arbitrary = elements [minBound..maxBound]
271

    
272
instance Arbitrary Types.DiskTemplate where
273
  arbitrary = elements [minBound..maxBound]
274

    
275
-- * Actual tests
276

    
277
-- ** Utils tests
278

    
279
-- | If the list is not just an empty element, and if the elements do
280
-- not contain commas, then join+split should be idempotent.
281
prop_Utils_commaJoinSplit =
282
    forAll (arbitrary `suchThat`
283
            (\l -> l /= [""] && all (not . elem ',') l )) $ \lst ->
284
    Utils.sepSplit ',' (Utils.commaJoin lst) == lst
285

    
286
-- | Split and join should always be idempotent.
287
prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
288

    
289
-- | fromObjWithDefault, we test using the Maybe monad and an integer
290
-- value.
291
prop_Utils_fromObjWithDefault def_value random_key =
292
    -- a missing key will be returned with the default
293
    Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
294
    -- a found key will be returned as is, not with default
295
    Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
296
         random_key (def_value+1) == Just def_value
297
        where _types = def_value :: Integer
298

    
299
-- | Test list for the Utils module.
300
testUtils =
301
  [ run prop_Utils_commaJoinSplit
302
  , run prop_Utils_commaSplitJoin
303
  , run prop_Utils_fromObjWithDefault
304
  ]
305

    
306
-- ** PeerMap tests
307

    
308
-- | Make sure add is idempotent.
309
prop_PeerMap_addIdempotent pmap key em =
310
    fn puniq == fn (fn puniq)
311
    where _types = (pmap::PeerMap.PeerMap,
312
                    key::PeerMap.Key, em::PeerMap.Elem)
313
          fn = PeerMap.add key em
314
          puniq = PeerMap.accumArray const pmap
315

    
316
-- | Make sure remove is idempotent.
317
prop_PeerMap_removeIdempotent pmap key =
318
    fn puniq == fn (fn puniq)
319
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
320
          fn = PeerMap.remove key
321
          puniq = PeerMap.accumArray const pmap
322

    
323
-- | Make sure a missing item returns 0.
324
prop_PeerMap_findMissing pmap key =
325
    PeerMap.find key (PeerMap.remove key puniq) == 0
326
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
327
          puniq = PeerMap.accumArray const pmap
328

    
329
-- | Make sure an added item is found.
330
prop_PeerMap_addFind pmap key em =
331
    PeerMap.find key (PeerMap.add key em puniq) == em
332
    where _types = (pmap::PeerMap.PeerMap,
333
                    key::PeerMap.Key, em::PeerMap.Elem)
334
          puniq = PeerMap.accumArray const pmap
335

    
336
-- | Manual check that maxElem returns the maximum indeed, or 0 for null.
337
prop_PeerMap_maxElem pmap =
338
    PeerMap.maxElem puniq == if null puniq then 0
339
                             else (maximum . snd . unzip) puniq
340
    where _types = pmap::PeerMap.PeerMap
341
          puniq = PeerMap.accumArray const pmap
342

    
343
-- | List of tests for the PeerMap module.
344
testPeerMap =
345
    [ run prop_PeerMap_addIdempotent
346
    , run prop_PeerMap_removeIdempotent
347
    , run prop_PeerMap_maxElem
348
    , run prop_PeerMap_addFind
349
    , run prop_PeerMap_findMissing
350
    ]
351

    
352
-- ** Container tests
353

    
354
prop_Container_addTwo cdata i1 i2 =
355
    fn i1 i2 cont == fn i2 i1 cont &&
356
       fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
357
    where _types = (cdata::[Int],
358
                    i1::Int, i2::Int)
359
          cont = foldl (\c x -> Container.add x x c) Container.empty cdata
360
          fn x1 x2 = Container.addTwo x1 x1 x2 x2
361

    
362
prop_Container_nameOf node =
363
  let nl = makeSmallCluster node 1
364
      fnode = head (Container.elems nl)
365
  in Container.nameOf nl (Node.idx fnode) == Node.name fnode
366

    
367
-- | We test that in a cluster, given a random node, we can find it by
368
-- its name and alias, as long as all names and aliases are unique,
369
-- and that we fail to find a non-existing name.
370
prop_Container_findByName node othername =
371
  forAll (choose (1, 20)) $ \ cnt ->
372
  forAll (choose (0, cnt - 1)) $ \ fidx ->
373
  forAll (vector cnt) $ \ names ->
374
  (length . nub) (map fst names ++ map snd names) ==
375
  length names * 2 &&
376
  not (othername `elem` (map fst names ++ map snd names)) ==>
377
  let nl = makeSmallCluster node cnt
378
      nodes = Container.elems nl
379
      nodes' = map (\((name, alias), nn) -> (Node.idx nn,
380
                                             nn { Node.name = name,
381
                                                  Node.alias = alias }))
382
               $ zip names nodes
383
      nl' = Container.fromList nodes'
384
      target = snd (nodes' !! fidx)
385
  in Container.findByName nl' (Node.name target) == Just target &&
386
     Container.findByName nl' (Node.alias target) == Just target &&
387
     Container.findByName nl' othername == Nothing
388

    
389
testContainer =
390
    [ run prop_Container_addTwo
391
    , run prop_Container_nameOf
392
    , run prop_Container_findByName
393
    ]
394

    
395
-- ** Instance tests
396

    
397
-- Simple instance tests, we only have setter/getters
398

    
399
prop_Instance_creat inst =
400
    Instance.name inst == Instance.alias inst
401

    
402
prop_Instance_setIdx inst idx =
403
    Instance.idx (Instance.setIdx inst idx) == idx
404
    where _types = (inst::Instance.Instance, idx::Types.Idx)
405

    
406
prop_Instance_setName inst name =
407
    Instance.name newinst == name &&
408
    Instance.alias newinst == name
409
    where _types = (inst::Instance.Instance, name::String)
410
          newinst = Instance.setName inst name
411

    
412
prop_Instance_setAlias inst name =
413
    Instance.name newinst == Instance.name inst &&
414
    Instance.alias newinst == name
415
    where _types = (inst::Instance.Instance, name::String)
416
          newinst = Instance.setAlias inst name
417

    
418
prop_Instance_setPri inst pdx =
419
    Instance.pNode (Instance.setPri inst pdx) == pdx
420
    where _types = (inst::Instance.Instance, pdx::Types.Ndx)
421

    
422
prop_Instance_setSec inst sdx =
423
    Instance.sNode (Instance.setSec inst sdx) == sdx
424
    where _types = (inst::Instance.Instance, sdx::Types.Ndx)
425

    
426
prop_Instance_setBoth inst pdx sdx =
427
    Instance.pNode si == pdx && Instance.sNode si == sdx
428
    where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
429
          si = Instance.setBoth inst pdx sdx
430

    
431
prop_Instance_runStatus_True =
432
    forAll (arbitrary `suchThat`
433
            ((`elem` Instance.runningStates) . Instance.runSt))
434
    Instance.running
435

    
436
prop_Instance_runStatus_False inst =
437
    let run_st = Instance.running inst
438
        run_tx = Instance.runSt inst
439
    in
440
      run_tx `notElem` Instance.runningStates ==> not run_st
441

    
442
prop_Instance_shrinkMG inst =
443
    Instance.mem inst >= 2 * Types.unitMem ==>
444
        case Instance.shrinkByType inst Types.FailMem of
445
          Types.Ok inst' ->
446
              Instance.mem inst' == Instance.mem inst - Types.unitMem
447
          _ -> False
448

    
449
prop_Instance_shrinkMF inst =
450
    forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
451
    let inst' = inst { Instance.mem = mem}
452
    in Types.isBad $ Instance.shrinkByType inst' Types.FailMem
453

    
454
prop_Instance_shrinkCG inst =
455
    Instance.vcpus inst >= 2 * Types.unitCpu ==>
456
        case Instance.shrinkByType inst Types.FailCPU of
457
          Types.Ok inst' ->
458
              Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
459
          _ -> False
460

    
461
prop_Instance_shrinkCF inst =
462
    forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
463
    let inst' = inst { Instance.vcpus = vcpus }
464
    in Types.isBad $ Instance.shrinkByType inst' Types.FailCPU
465

    
466
prop_Instance_shrinkDG inst =
467
    Instance.dsk inst >= 2 * Types.unitDsk ==>
468
        case Instance.shrinkByType inst Types.FailDisk of
469
          Types.Ok inst' ->
470
              Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
471
          _ -> False
472

    
473
prop_Instance_shrinkDF inst =
474
    forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
475
    let inst' = inst { Instance.dsk = dsk }
476
    in Types.isBad $ Instance.shrinkByType inst' Types.FailDisk
477

    
478
prop_Instance_setMovable inst m =
479
    Instance.movable inst' == m
480
    where inst' = Instance.setMovable inst m
481

    
482
testInstance =
483
    [ run prop_Instance_creat
484
    , run prop_Instance_setIdx
485
    , run prop_Instance_setName
486
    , run prop_Instance_setAlias
487
    , run prop_Instance_setPri
488
    , run prop_Instance_setSec
489
    , run prop_Instance_setBoth
490
    , run prop_Instance_runStatus_True
491
    , run prop_Instance_runStatus_False
492
    , run prop_Instance_shrinkMG
493
    , run prop_Instance_shrinkMF
494
    , run prop_Instance_shrinkCG
495
    , run prop_Instance_shrinkCF
496
    , run prop_Instance_shrinkDG
497
    , run prop_Instance_shrinkDF
498
    , run prop_Instance_setMovable
499
    ]
500

    
501
-- ** Text backend tests
502

    
503
-- Instance text loader tests
504

    
505
prop_Text_Load_Instance name mem dsk vcpus status
506
                        (NonEmpty pnode) snode
507
                        (NonNegative pdx) (NonNegative sdx) autobal =
508
    pnode /= snode && pdx /= sdx ==>
509
    let vcpus_s = show vcpus
510
        dsk_s = show dsk
511
        mem_s = show mem
512
        ndx = if null snode
513
              then [(pnode, pdx)]
514
              else [(pnode, pdx), (snode, sdx)]
515
        nl = Data.Map.fromList ndx
516
        tags = ""
517
        sbal = if autobal then "Y" else "N"
518
        inst = Text.loadInst nl
519
               [name, mem_s, dsk_s, vcpus_s, status,
520
                sbal, pnode, snode, tags]:: Maybe (String, Instance.Instance)
521
        fail1 = Text.loadInst nl
522
               [name, mem_s, dsk_s, vcpus_s, status,
523
                sbal, pnode, pnode, tags]:: Maybe (String, Instance.Instance)
524
        _types = ( name::String, mem::Int, dsk::Int
525
                 , vcpus::Int, status::String
526
                 , snode::String
527
                 , autobal::Bool)
528
    in
529
      case inst of
530
        Nothing -> False
531
        Just (_, i) ->
532
            Instance.name i == name &&
533
            Instance.vcpus i == vcpus &&
534
            Instance.mem i == mem &&
535
            Instance.pNode i == pdx &&
536
            Instance.sNode i == (if null snode
537
                                 then Node.noSecondary
538
                                 else sdx) &&
539
            Instance.autoBalance i == autobal &&
540
            isNothing fail1
541

    
542
prop_Text_Load_InstanceFail ktn fields =
543
    length fields /= 9 ==>
544
    case Text.loadInst nl fields of
545
      Types.Ok _ -> False
546
      Types.Bad msg -> "Invalid/incomplete instance data: '" `isPrefixOf` msg
547
    where nl = Data.Map.fromList ktn
548

    
549
prop_Text_Load_Node name tm nm fm td fd tc fo =
550
    let conv v = if v < 0
551
                    then "?"
552
                    else show v
553
        tm_s = conv tm
554
        nm_s = conv nm
555
        fm_s = conv fm
556
        td_s = conv td
557
        fd_s = conv fd
558
        tc_s = conv tc
559
        fo_s = if fo
560
               then "Y"
561
               else "N"
562
        any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
563
        gid = Group.uuid defGroup
564
    in case Text.loadNode defGroupAssoc
565
           [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
566
         Nothing -> False
567
         Just (name', node) ->
568
             if fo || any_broken
569
             then Node.offline node
570
             else Node.name node == name' && name' == name &&
571
                  Node.alias node == name &&
572
                  Node.tMem node == fromIntegral tm &&
573
                  Node.nMem node == nm &&
574
                  Node.fMem node == fm &&
575
                  Node.tDsk node == fromIntegral td &&
576
                  Node.fDsk node == fd &&
577
                  Node.tCpu node == fromIntegral tc
578

    
579
prop_Text_Load_NodeFail fields =
580
    length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
581

    
582
prop_Text_NodeLSIdempotent node =
583
    (Text.loadNode defGroupAssoc.
584
         Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
585
    Just (Node.name n, n)
586
    -- override failN1 to what loadNode returns by default
587
    where n = node { Node.failN1 = True, Node.offline = False }
588

    
589
testText =
590
    [ run prop_Text_Load_Instance
591
    , run prop_Text_Load_InstanceFail
592
    , run prop_Text_Load_Node
593
    , run prop_Text_Load_NodeFail
594
    , run prop_Text_NodeLSIdempotent
595
    ]
596

    
597
-- ** Node tests
598

    
599
prop_Node_setAlias node name =
600
    Node.name newnode == Node.name node &&
601
    Node.alias newnode == name
602
    where _types = (node::Node.Node, name::String)
603
          newnode = Node.setAlias node name
604

    
605
prop_Node_setOffline node status =
606
    Node.offline newnode == status
607
    where newnode = Node.setOffline node status
608

    
609
prop_Node_setXmem node xm =
610
    Node.xMem newnode == xm
611
    where newnode = Node.setXmem node xm
612

    
613
prop_Node_setMcpu node mc =
614
    Node.mCpu newnode == mc
615
    where newnode = Node.setMcpu node mc
616

    
617
-- | Check that an instance add with too high memory or disk will be
618
-- rejected.
619
prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
620
                               not (Node.failN1 node)
621
                               ==>
622
                               case Node.addPri node inst'' of
623
                                 Types.OpFail Types.FailMem -> True
624
                                 _ -> False
625
    where _types = (node::Node.Node, inst::Instance.Instance)
626
          inst' = setInstanceSmallerThanNode node inst
627
          inst'' = inst' { Instance.mem = Instance.mem inst }
628

    
629
prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
630
                               not (Node.failN1 node)
631
                               ==>
632
                               case Node.addPri node inst'' of
633
                                 Types.OpFail Types.FailDisk -> True
634
                                 _ -> False
635
    where _types = (node::Node.Node, inst::Instance.Instance)
636
          inst' = setInstanceSmallerThanNode node inst
637
          inst'' = inst' { Instance.dsk = Instance.dsk inst }
638

    
639
prop_Node_addPriFC node inst (Positive extra) =
640
    not (Node.failN1 node) ==>
641
        case Node.addPri node inst'' of
642
          Types.OpFail Types.FailCPU -> True
643
          _ -> False
644
    where _types = (node::Node.Node, inst::Instance.Instance)
645
          inst' = setInstanceSmallerThanNode node inst
646
          inst'' = inst' { Instance.vcpus = Node.availCpu node + extra }
647

    
648
-- | Check that an instance add with too high memory or disk will be
649
-- rejected.
650
prop_Node_addSec node inst pdx =
651
    (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
652
     Instance.dsk inst >= Node.fDsk node) &&
653
    not (Node.failN1 node)
654
    ==> isFailure (Node.addSec node inst pdx)
655
        where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
656

    
657
-- | Checks for memory reservation changes.
658
prop_Node_rMem inst =
659
    forAll (arbitrary `suchThat` ((> 0) . Node.fMem)) $ \node ->
660
    -- ab = auto_balance, nb = non-auto_balance
661
    -- we use -1 as the primary node of the instance
662
    let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True }
663
        inst_ab = setInstanceSmallerThanNode node inst'
664
        inst_nb = inst_ab { Instance.autoBalance = False }
665
        -- now we have the two instances, identical except the
666
        -- autoBalance attribute
667
        orig_rmem = Node.rMem node
668
        inst_idx = Instance.idx inst_ab
669
        node_add_ab = Node.addSec node inst_ab (-1)
670
        node_add_nb = Node.addSec node inst_nb (-1)
671
        node_del_ab = liftM (flip Node.removeSec inst_ab) node_add_ab
672
        node_del_nb = liftM (flip Node.removeSec inst_nb) node_add_nb
673
    in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
674
         (Types.OpGood a_ab, Types.OpGood a_nb,
675
          Types.OpGood d_ab, Types.OpGood d_nb) ->
676
             printTestCase "Consistency checks failed" $
677
             Node.rMem a_ab >  orig_rmem &&
678
             Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
679
             Node.rMem a_nb == orig_rmem &&
680
             Node.rMem d_ab == orig_rmem &&
681
             Node.rMem d_nb == orig_rmem &&
682
             -- this is not related to rMem, but as good a place to
683
             -- test as any
684
             inst_idx `elem` Node.sList a_ab &&
685
             not (inst_idx `elem` Node.sList d_ab)
686
         x -> printTestCase ("Failed to add/remove instances: " ++ show x)
687
              False
688

    
689
-- | Check mdsk setting.
690
prop_Node_setMdsk node mx =
691
    Node.loDsk node' >= 0 &&
692
    fromIntegral (Node.loDsk node') <= Node.tDsk node &&
693
    Node.availDisk node' >= 0 &&
694
    Node.availDisk node' <= Node.fDsk node' &&
695
    fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
696
    Node.mDsk node' == mx'
697
    where _types = (node::Node.Node, mx::SmallRatio)
698
          node' = Node.setMdsk node mx'
699
          SmallRatio mx' = mx
700

    
701
-- Check tag maps
702
prop_Node_tagMaps_idempotent tags =
703
    Node.delTags (Node.addTags m tags) tags == m
704
    where m = Data.Map.empty
705

    
706
prop_Node_tagMaps_reject tags =
707
    not (null tags) ==>
708
    any (\t -> Node.rejectAddTags m [t]) tags
709
    where m = Node.addTags Data.Map.empty tags
710

    
711
prop_Node_showField node =
712
  forAll (elements Node.defaultFields) $ \ field ->
713
  fst (Node.showHeader field) /= Types.unknownField &&
714
  Node.showField node field /= Types.unknownField
715

    
716

    
717
prop_Node_computeGroups nodes =
718
  let ng = Node.computeGroups nodes
719
      onlyuuid = map fst ng
720
  in length nodes == sum (map (length . snd) ng) &&
721
     all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
722
     length (nub onlyuuid) == length onlyuuid &&
723
     (null nodes || not (null ng))
724

    
725
testNode =
726
    [ run prop_Node_setAlias
727
    , run prop_Node_setOffline
728
    , run prop_Node_setMcpu
729
    , run prop_Node_setXmem
730
    , run prop_Node_addPriFM
731
    , run prop_Node_addPriFD
732
    , run prop_Node_addPriFC
733
    , run prop_Node_addSec
734
    , run prop_Node_rMem
735
    , run prop_Node_setMdsk
736
    , run prop_Node_tagMaps_idempotent
737
    , run prop_Node_tagMaps_reject
738
    , run prop_Node_showField
739
    , run prop_Node_computeGroups
740
    ]
741

    
742

    
743
-- ** Cluster tests
744

    
745
-- | Check that the cluster score is close to zero for a homogeneous
746
-- cluster.
747
prop_Score_Zero node =
748
    forAll (choose (1, 1024)) $ \count ->
749
    (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
750
     (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
751
    let fn = Node.buildPeers node Container.empty
752
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
753
        nl = Container.fromList nlst
754
        score = Cluster.compCV nl
755
    -- we can't say == 0 here as the floating point errors accumulate;
756
    -- this should be much lower than the default score in CLI.hs
757
    in score <= 1e-12
758

    
759
-- | Check that cluster stats are sane.
760
prop_CStats_sane node =
761
    forAll (choose (1, 1024)) $ \count ->
762
    (not (Node.offline node) && not (Node.failN1 node) &&
763
     (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
764
    let fn = Node.buildPeers node Container.empty
765
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
766
        nl = Container.fromList nlst
767
        cstats = Cluster.totalResources nl
768
    in Cluster.csAdsk cstats >= 0 &&
769
       Cluster.csAdsk cstats <= Cluster.csFdsk cstats
770

    
771
-- | Check that one instance is allocated correctly, without
772
-- rebalances needed.
773
prop_ClusterAlloc_sane node inst =
774
    forAll (choose (5, 20)) $ \count ->
775
    not (Node.offline node)
776
            && not (Node.failN1 node)
777
            && Node.availDisk node > 0
778
            && Node.availMem node > 0
779
            ==>
780
    let nl = makeSmallCluster node count
781
        il = Container.empty
782
        inst' = setInstanceSmallerThanNode node inst
783
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
784
       Cluster.tryAlloc nl il inst' of
785
         Types.Bad _ -> False
786
         Types.Ok as ->
787
             case Cluster.asSolutions as of
788
               [] -> False
789
               (xnl, xi, _, cv):[] ->
790
                   let il' = Container.add (Instance.idx xi) xi il
791
                       tbl = Cluster.Table xnl il' cv []
792
                   in not (canBalance tbl True True False)
793
               _ -> False
794

    
795
-- | Checks that on a 2-5 node cluster, we can allocate a random
796
-- instance spec via tiered allocation (whatever the original instance
797
-- spec), on either one or two nodes.
798
prop_ClusterCanTieredAlloc node inst =
799
    forAll (choose (2, 5)) $ \count ->
800
    forAll (choose (1, 2)) $ \rqnodes ->
801
    not (Node.offline node)
802
            && not (Node.failN1 node)
803
            && isNodeBig node 4
804
            ==>
805
    let nl = makeSmallCluster node count
806
        il = Container.empty
807
        allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
808
    in case allocnodes >>= \allocnodes' ->
809
        Cluster.tieredAlloc nl il inst allocnodes' [] [] of
810
         Types.Bad _ -> False
811
         Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
812
                                      IntMap.size il' == length ixes &&
813
                                      length ixes == length cstats
814

    
815
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
816
-- we can also evacuate it.
817
prop_ClusterAllocEvac node inst =
818
    forAll (choose (4, 8)) $ \count ->
819
    not (Node.offline node)
820
            && not (Node.failN1 node)
821
            && isNodeBig node 4
822
            ==>
823
    let nl = makeSmallCluster node count
824
        il = Container.empty
825
        inst' = setInstanceSmallerThanNode node inst
826
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
827
       Cluster.tryAlloc nl il inst' of
828
         Types.Bad _ -> False
829
         Types.Ok as ->
830
             case Cluster.asSolutions as of
831
               [] -> False
832
               (xnl, xi, _, _):[] ->
833
                   let sdx = Instance.sNode xi
834
                       il' = Container.add (Instance.idx xi) xi il
835
                   in case Cluster.tryEvac xnl il' [Instance.idx xi] [sdx] of
836
                        Just _ -> True
837
                        _ -> False
838
               _ -> False
839

    
840
-- | Check that allocating multiple instances on a cluster, then
841
-- adding an empty node, results in a valid rebalance.
842
prop_ClusterAllocBalance =
843
    forAll (genNode (Just 5) (Just 128)) $ \node ->
844
    forAll (choose (3, 5)) $ \count ->
845
    not (Node.offline node) && not (Node.failN1 node) ==>
846
    let nl = makeSmallCluster node count
847
        (hnode, nl') = IntMap.deleteFindMax nl
848
        il = Container.empty
849
        allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
850
        i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
851
    in case allocnodes >>= \allocnodes' ->
852
        Cluster.iterateAlloc nl' il i_templ allocnodes' [] [] of
853
         Types.Bad _ -> False
854
         Types.Ok (_, xnl, il', _, _) ->
855
                   let ynl = Container.add (Node.idx hnode) hnode xnl
856
                       cv = Cluster.compCV ynl
857
                       tbl = Cluster.Table ynl il' cv []
858
                   in canBalance tbl True True False
859

    
860
-- | Checks consistency.
861
prop_ClusterCheckConsistency node inst =
862
  let nl = makeSmallCluster node 3
863
      [node1, node2, node3] = Container.elems nl
864
      node3' = node3 { Node.group = 1 }
865
      nl' = Container.add (Node.idx node3') node3' nl
866
      inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
867
      inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
868
      inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
869
      ccheck = Cluster.findSplitInstances nl' . Container.fromList
870
  in null (ccheck [(0, inst1)]) &&
871
     null (ccheck [(0, inst2)]) &&
872
     (not . null $ ccheck [(0, inst3)])
873

    
874
-- | For now, we only test that we don't lose instances during the split.
875
prop_ClusterSplitCluster node inst =
876
  forAll (choose (0, 100)) $ \icnt ->
877
  let nl = makeSmallCluster node 2
878
      (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
879
                   (nl, Container.empty) [1..icnt]
880
      gni = Cluster.splitCluster nl' il'
881
  in sum (map (Container.size . snd . snd) gni) == icnt &&
882
     all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
883
                                 (Container.elems nl'')) gni
884

    
885
testCluster =
886
    [ run prop_Score_Zero
887
    , run prop_CStats_sane
888
    , run prop_ClusterAlloc_sane
889
    , run prop_ClusterCanTieredAlloc
890
    , run prop_ClusterAllocEvac
891
    , run prop_ClusterAllocBalance
892
    , run prop_ClusterCheckConsistency
893
    , run prop_ClusterSplitCluster
894
    ]
895

    
896
-- ** OpCodes tests
897

    
898
-- | Check that opcode serialization is idempotent.
899
prop_OpCodes_serialization op =
900
  case J.readJSON (J.showJSON op) of
901
    J.Error _ -> False
902
    J.Ok op' -> op == op'
903
  where _types = op::OpCodes.OpCode
904

    
905
testOpCodes =
906
  [ run prop_OpCodes_serialization
907
  ]
908

    
909
-- ** Jobs tests
910

    
911
-- | Check that (queued) job\/opcode status serialization is idempotent.
912
prop_OpStatus_serialization os =
913
  case J.readJSON (J.showJSON os) of
914
    J.Error _ -> False
915
    J.Ok os' -> os == os'
916
  where _types = os::Jobs.OpStatus
917

    
918
prop_JobStatus_serialization js =
919
  case J.readJSON (J.showJSON js) of
920
    J.Error _ -> False
921
    J.Ok js' -> js == js'
922
  where _types = js::Jobs.JobStatus
923

    
924
testJobs =
925
  [ run prop_OpStatus_serialization
926
  , run prop_JobStatus_serialization
927
  ]
928

    
929
-- ** Loader tests
930

    
931
prop_Loader_lookupNode ktn inst node =
932
  Loader.lookupNode nl inst node == Data.Map.lookup node nl
933
  where nl = Data.Map.fromList ktn
934

    
935
prop_Loader_lookupInstance kti inst =
936
  Loader.lookupInstance il inst == Data.Map.lookup inst il
937
  where il = Data.Map.fromList kti
938

    
939
prop_Loader_assignIndices nodes =
940
  Data.Map.size nassoc == length nodes &&
941
  Container.size kt == length nodes &&
942
  (if not (null nodes)
943
   then maximum (IntMap.keys kt) == length nodes - 1
944
   else True)
945
  where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
946

    
947
-- | Checks that the number of primary instances recorded on the nodes
948
-- is zero.
949
prop_Loader_mergeData ns =
950
  let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
951
  in case Loader.mergeData [] [] [] []
952
         (Loader.emptyCluster {Loader.cdNodes = na}) of
953
    Types.Bad _ -> False
954
    Types.Ok (Loader.ClusterData _ nl il _) ->
955
      let nodes = Container.elems nl
956
          instances = Container.elems il
957
      in (sum . map (length . Node.pList)) nodes == 0 &&
958
         null instances
959

    
960
testLoader =
961
  [ run prop_Loader_lookupNode
962
  , run prop_Loader_lookupInstance
963
  , run prop_Loader_assignIndices
964
  , run prop_Loader_mergeData
965
  ]
966

    
967
-- ** Types tests
968

    
969
prop_AllocPolicy_serialisation apol =
970
    case Types.apolFromString (Types.apolToString apol) of
971
      Types.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
972
                    p == apol
973
      Types.Bad s -> printTestCase ("failed to deserialise: " ++ s) False
974

    
975
prop_DiskTemplate_serialisation dt =
976
    case Types.dtFromString (Types.dtToString dt) of
977
      Types.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
978
                    p == dt
979
      Types.Bad s -> printTestCase ("failed to deserialise: " ++ s) False
980

    
981
testTypes =
982
    [ run prop_AllocPolicy_serialisation
983
    , run prop_DiskTemplate_serialisation
984
    ]