Statistics
| Branch: | Tag: | Revision:

root / htools / Ganeti / HTools / QC.hs @ 0e09422b

History | View | Annotate | Download (33.5 kB)

1
{-| Unittests for ganeti-htools.
2

    
3
-}
4

    
5
{-
6

    
7
Copyright (C) 2009, 2010, 2011 Google Inc.
8

    
9
This program is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 2 of the License, or
12
(at your option) any later version.
13

    
14
This program is distributed in the hope that it will be useful, but
15
WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
General Public License for more details.
18

    
19
You should have received a copy of the GNU General Public License
20
along with this program; if not, write to the Free Software
21
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22
02110-1301, USA.
23

    
24
-}
25

    
26
module Ganeti.HTools.QC
27
    ( testUtils
28
    , testPeerMap
29
    , testContainer
30
    , testInstance
31
    , testNode
32
    , testText
33
    , testOpCodes
34
    , testJobs
35
    , testCluster
36
    , testLoader
37
    ) where
38

    
39
import Test.QuickCheck
40
import Data.List (findIndex, intercalate, nub, isPrefixOf)
41
import Data.Maybe
42
import Control.Monad
43
import qualified Text.JSON as J
44
import qualified Data.Map
45
import qualified Data.IntMap as IntMap
46
import qualified Ganeti.OpCodes as OpCodes
47
import qualified Ganeti.Jobs as Jobs
48
import qualified Ganeti.Luxi
49
import qualified Ganeti.HTools.CLI as CLI
50
import qualified Ganeti.HTools.Cluster as Cluster
51
import qualified Ganeti.HTools.Container as Container
52
import qualified Ganeti.HTools.ExtLoader
53
import qualified Ganeti.HTools.IAlloc as IAlloc
54
import qualified Ganeti.HTools.Instance as Instance
55
import qualified Ganeti.HTools.Loader as Loader
56
import qualified Ganeti.HTools.Luxi
57
import qualified Ganeti.HTools.Node as Node
58
import qualified Ganeti.HTools.Group as Group
59
import qualified Ganeti.HTools.PeerMap as PeerMap
60
import qualified Ganeti.HTools.Rapi
61
import qualified Ganeti.HTools.Simu
62
import qualified Ganeti.HTools.Text as Text
63
import qualified Ganeti.HTools.Types as Types
64
import qualified Ganeti.HTools.Utils as Utils
65
import qualified Ganeti.HTools.Version
66
import qualified Ganeti.Constants as C
67

    
68
run :: Testable prop => prop -> Args -> IO Result
69
run = flip quickCheckWithResult
70

    
71
-- * Constants
72

    
73
-- | Maximum memory (1TiB, somewhat random value).
74
maxMem :: Int
75
maxMem = 1024 * 1024
76

    
77
-- | Maximum disk (8TiB, somewhat random value).
78
maxDsk :: Int
79
maxDsk = 1024 * 1024 * 8
80

    
81
-- | Max CPUs (1024, somewhat random value).
82
maxCpu :: Int
83
maxCpu = 1024
84

    
85
defGroup :: Group.Group
86
defGroup = flip Group.setIdx 0 $
87
               Group.create "default" Utils.defaultGroupID
88
                    Types.AllocPreferred
89

    
90
defGroupList :: Group.List
91
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
92

    
93
defGroupAssoc :: Data.Map.Map String Types.Gdx
94
defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
95

    
96
-- * Helper functions
97

    
98
-- | Simple checker for whether OpResult is fail or pass.
99
isFailure :: Types.OpResult a -> Bool
100
isFailure (Types.OpFail _) = True
101
isFailure _ = False
102

    
103
-- | Update an instance to be smaller than a node.
104
setInstanceSmallerThanNode node inst =
105
    inst { Instance.mem = Node.availMem node `div` 2
106
         , Instance.dsk = Node.availDisk node `div` 2
107
         , Instance.vcpus = Node.availCpu node `div` 2
108
         }
109

    
110
-- | Create an instance given its spec.
111
createInstance mem dsk vcpus =
112
    Instance.create "inst-unnamed" mem dsk vcpus "running" [] True (-1) (-1)
113

    
114
-- | Create a small cluster by repeating a node spec.
115
makeSmallCluster :: Node.Node -> Int -> Node.List
116
makeSmallCluster node count =
117
    let fn = Node.buildPeers node Container.empty
118
        namelst = map (\n -> (Node.name n, n)) (replicate count fn)
119
        (_, nlst) = Loader.assignIndices namelst
120
    in nlst
121

    
122
-- | Checks if a node is "big" enough.
123
isNodeBig :: Node.Node -> Int -> Bool
124
isNodeBig node size = Node.availDisk node > size * Types.unitDsk
125
                      && Node.availMem node > size * Types.unitMem
126
                      && Node.availCpu node > size * Types.unitCpu
127

    
128
canBalance :: Cluster.Table -> Bool -> Bool -> Bool -> Bool
129
canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac 0 0
130

    
131
-- | Assigns a new fresh instance to a cluster; this is not
132
-- allocation, so no resource checks are done.
133
assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
134
                  Types.Idx -> Types.Idx ->
135
                  (Node.List, Instance.List)
136
assignInstance nl il inst pdx sdx =
137
  let pnode = Container.find pdx nl
138
      snode = Container.find sdx nl
139
      maxiidx = if Container.null il
140
                then 0
141
                else fst (Container.findMax il) + 1
142
      inst' = inst { Instance.idx = maxiidx,
143
                     Instance.pNode = pdx, Instance.sNode = sdx }
144
      pnode' = Node.setPri pnode inst'
145
      snode' = Node.setSec snode inst'
146
      nl' = Container.addTwo pdx pnode' sdx snode' nl
147
      il' = Container.add maxiidx inst' il
148
  in (nl', il')
149

    
150
-- * Arbitrary instances
151

    
152
-- | Defines a DNS name.
153
newtype DNSChar = DNSChar { dnsGetChar::Char }
154

    
155
instance Arbitrary DNSChar where
156
    arbitrary = do
157
      x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
158
      return (DNSChar x)
159

    
160
getName :: Gen String
161
getName = do
162
  n <- choose (1, 64)
163
  dn <- vector n::Gen [DNSChar]
164
  return (map dnsGetChar dn)
165

    
166

    
167
getFQDN :: Gen String
168
getFQDN = do
169
  felem <- getName
170
  ncomps <- choose (1, 4)
171
  frest <- vector ncomps::Gen [[DNSChar]]
172
  let frest' = map (map dnsGetChar) frest
173
  return (felem ++ "." ++ intercalate "." frest')
174

    
175
-- let's generate a random instance
176
instance Arbitrary Instance.Instance where
177
    arbitrary = do
178
      name <- getFQDN
179
      mem <- choose (0, maxMem)
180
      dsk <- choose (0, maxDsk)
181
      run_st <- elements [ C.inststErrorup
182
                         , C.inststErrordown
183
                         , C.inststAdmindown
184
                         , C.inststNodedown
185
                         , C.inststNodeoffline
186
                         , C.inststRunning
187
                         , "no_such_status1"
188
                         , "no_such_status2"]
189
      pn <- arbitrary
190
      sn <- arbitrary
191
      vcpus <- choose (0, maxCpu)
192
      return $ Instance.create name mem dsk vcpus run_st [] True pn sn
193

    
194
-- | Generas an arbitrary node based on sizing information.
195
genNode :: Maybe Int -- ^ Minimum node size in terms of units
196
        -> Maybe Int -- ^ Maximum node size (when Nothing, bounded
197
                     -- just by the max... constants)
198
        -> Gen Node.Node
199
genNode min_multiplier max_multiplier = do
200
  let (base_mem, base_dsk, base_cpu) =
201
          case min_multiplier of
202
            Just mm -> (mm * Types.unitMem,
203
                        mm * Types.unitDsk,
204
                        mm * Types.unitCpu)
205
            Nothing -> (0, 0, 0)
206
      (top_mem, top_dsk, top_cpu)  =
207
          case max_multiplier of
208
            Just mm -> (mm * Types.unitMem,
209
                        mm * Types.unitDsk,
210
                        mm * Types.unitCpu)
211
            Nothing -> (maxMem, maxDsk, maxCpu)
212
  name  <- getFQDN
213
  mem_t <- choose (base_mem, top_mem)
214
  mem_f <- choose (base_mem, mem_t)
215
  mem_n <- choose (0, mem_t - mem_f)
216
  dsk_t <- choose (base_dsk, top_dsk)
217
  dsk_f <- choose (base_dsk, dsk_t)
218
  cpu_t <- choose (base_cpu, top_cpu)
219
  offl  <- arbitrary
220
  let n = Node.create name (fromIntegral mem_t) mem_n mem_f
221
          (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl 0
222
  return $ Node.buildPeers n Container.empty
223

    
224
-- and a random node
225
instance Arbitrary Node.Node where
226
    arbitrary = genNode Nothing Nothing
227

    
228
-- replace disks
229
instance Arbitrary OpCodes.ReplaceDisksMode where
230
  arbitrary = elements [ OpCodes.ReplaceOnPrimary
231
                       , OpCodes.ReplaceOnSecondary
232
                       , OpCodes.ReplaceNewSecondary
233
                       , OpCodes.ReplaceAuto
234
                       ]
235

    
236
instance Arbitrary OpCodes.OpCode where
237
  arbitrary = do
238
    op_id <- elements [ "OP_TEST_DELAY"
239
                      , "OP_INSTANCE_REPLACE_DISKS"
240
                      , "OP_INSTANCE_FAILOVER"
241
                      , "OP_INSTANCE_MIGRATE"
242
                      ]
243
    (case op_id of
244
        "OP_TEST_DELAY" ->
245
          liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
246
        "OP_INSTANCE_REPLACE_DISKS" ->
247
          liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
248
          arbitrary arbitrary arbitrary
249
        "OP_INSTANCE_FAILOVER" ->
250
          liftM2 OpCodes.OpInstanceFailover arbitrary arbitrary
251
        "OP_INSTANCE_MIGRATE" ->
252
          liftM4 OpCodes.OpInstanceMigrate arbitrary arbitrary arbitrary
253
          arbitrary
254
        _ -> fail "Wrong opcode")
255

    
256
instance Arbitrary Jobs.OpStatus where
257
  arbitrary = elements [minBound..maxBound]
258

    
259
instance Arbitrary Jobs.JobStatus where
260
  arbitrary = elements [minBound..maxBound]
261

    
262
newtype SmallRatio = SmallRatio Double deriving Show
263
instance Arbitrary SmallRatio where
264
    arbitrary = do
265
      v <- choose (0, 1)
266
      return $ SmallRatio v
267

    
268
-- * Actual tests
269

    
270
-- ** Utils tests
271

    
272
-- | If the list is not just an empty element, and if the elements do
273
-- not contain commas, then join+split should be idempotent.
274
prop_Utils_commaJoinSplit =
275
    forAll (arbitrary `suchThat`
276
            (\l -> l /= [""] && all (not . elem ',') l )) $ \lst ->
277
    Utils.sepSplit ',' (Utils.commaJoin lst) == lst
278

    
279
-- | Split and join should always be idempotent.
280
prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
281

    
282
-- | fromObjWithDefault, we test using the Maybe monad and an integer
283
-- value.
284
prop_Utils_fromObjWithDefault def_value random_key =
285
    -- a missing key will be returned with the default
286
    Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
287
    -- a found key will be returned as is, not with default
288
    Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
289
         random_key (def_value+1) == Just def_value
290
        where _types = def_value :: Integer
291

    
292
-- | Test list for the Utils module.
293
testUtils =
294
  [ run prop_Utils_commaJoinSplit
295
  , run prop_Utils_commaSplitJoin
296
  , run prop_Utils_fromObjWithDefault
297
  ]
298

    
299
-- ** PeerMap tests
300

    
301
-- | Make sure add is idempotent.
302
prop_PeerMap_addIdempotent pmap key em =
303
    fn puniq == fn (fn puniq)
304
    where _types = (pmap::PeerMap.PeerMap,
305
                    key::PeerMap.Key, em::PeerMap.Elem)
306
          fn = PeerMap.add key em
307
          puniq = PeerMap.accumArray const pmap
308

    
309
-- | Make sure remove is idempotent.
310
prop_PeerMap_removeIdempotent pmap key =
311
    fn puniq == fn (fn puniq)
312
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
313
          fn = PeerMap.remove key
314
          puniq = PeerMap.accumArray const pmap
315

    
316
-- | Make sure a missing item returns 0.
317
prop_PeerMap_findMissing pmap key =
318
    PeerMap.find key (PeerMap.remove key puniq) == 0
319
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
320
          puniq = PeerMap.accumArray const pmap
321

    
322
-- | Make sure an added item is found.
323
prop_PeerMap_addFind pmap key em =
324
    PeerMap.find key (PeerMap.add key em puniq) == em
325
    where _types = (pmap::PeerMap.PeerMap,
326
                    key::PeerMap.Key, em::PeerMap.Elem)
327
          puniq = PeerMap.accumArray const pmap
328

    
329
-- | Manual check that maxElem returns the maximum indeed, or 0 for null.
330
prop_PeerMap_maxElem pmap =
331
    PeerMap.maxElem puniq == if null puniq then 0
332
                             else (maximum . snd . unzip) puniq
333
    where _types = pmap::PeerMap.PeerMap
334
          puniq = PeerMap.accumArray const pmap
335

    
336
-- | List of tests for the PeerMap module.
337
testPeerMap =
338
    [ run prop_PeerMap_addIdempotent
339
    , run prop_PeerMap_removeIdempotent
340
    , run prop_PeerMap_maxElem
341
    , run prop_PeerMap_addFind
342
    , run prop_PeerMap_findMissing
343
    ]
344

    
345
-- ** Container tests
346

    
347
prop_Container_addTwo cdata i1 i2 =
348
    fn i1 i2 cont == fn i2 i1 cont &&
349
       fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
350
    where _types = (cdata::[Int],
351
                    i1::Int, i2::Int)
352
          cont = foldl (\c x -> Container.add x x c) Container.empty cdata
353
          fn x1 x2 = Container.addTwo x1 x1 x2 x2
354

    
355
prop_Container_nameOf node =
356
  let nl = makeSmallCluster node 1
357
      fnode = head (Container.elems nl)
358
  in Container.nameOf nl (Node.idx fnode) == Node.name fnode
359

    
360
-- | We test that in a cluster, given a random node, we can find it by
361
-- its name and alias, as long as all names and aliases are unique,
362
-- and that we fail to find a non-existing name.
363
prop_Container_findByName node othername =
364
  forAll (choose (1, 20)) $ \ cnt ->
365
  forAll (choose (0, cnt - 1)) $ \ fidx ->
366
  forAll (vector cnt) $ \ names ->
367
  (length . nub) (map fst names ++ map snd names) ==
368
  length names * 2 &&
369
  not (othername `elem` (map fst names ++ map snd names)) ==>
370
  let nl = makeSmallCluster node cnt
371
      nodes = Container.elems nl
372
      nodes' = map (\((name, alias), nn) -> (Node.idx nn,
373
                                             nn { Node.name = name,
374
                                                  Node.alias = alias }))
375
               $ zip names nodes
376
      nl' = Container.fromList nodes'
377
      target = snd (nodes' !! fidx)
378
  in Container.findByName nl' (Node.name target) == Just target &&
379
     Container.findByName nl' (Node.alias target) == Just target &&
380
     Container.findByName nl' othername == Nothing
381

    
382
testContainer =
383
    [ run prop_Container_addTwo
384
    , run prop_Container_nameOf
385
    , run prop_Container_findByName
386
    ]
387

    
388
-- ** Instance tests
389

    
390
-- Simple instance tests, we only have setter/getters
391

    
392
prop_Instance_creat inst =
393
    Instance.name inst == Instance.alias inst
394

    
395
prop_Instance_setIdx inst idx =
396
    Instance.idx (Instance.setIdx inst idx) == idx
397
    where _types = (inst::Instance.Instance, idx::Types.Idx)
398

    
399
prop_Instance_setName inst name =
400
    Instance.name newinst == name &&
401
    Instance.alias newinst == name
402
    where _types = (inst::Instance.Instance, name::String)
403
          newinst = Instance.setName inst name
404

    
405
prop_Instance_setAlias inst name =
406
    Instance.name newinst == Instance.name inst &&
407
    Instance.alias newinst == name
408
    where _types = (inst::Instance.Instance, name::String)
409
          newinst = Instance.setAlias inst name
410

    
411
prop_Instance_setPri inst pdx =
412
    Instance.pNode (Instance.setPri inst pdx) == pdx
413
    where _types = (inst::Instance.Instance, pdx::Types.Ndx)
414

    
415
prop_Instance_setSec inst sdx =
416
    Instance.sNode (Instance.setSec inst sdx) == sdx
417
    where _types = (inst::Instance.Instance, sdx::Types.Ndx)
418

    
419
prop_Instance_setBoth inst pdx sdx =
420
    Instance.pNode si == pdx && Instance.sNode si == sdx
421
    where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
422
          si = Instance.setBoth inst pdx sdx
423

    
424
prop_Instance_runStatus_True =
425
    forAll (arbitrary `suchThat`
426
            ((`elem` Instance.runningStates) . Instance.runSt))
427
    Instance.running
428

    
429
prop_Instance_runStatus_False inst =
430
    let run_st = Instance.running inst
431
        run_tx = Instance.runSt inst
432
    in
433
      run_tx `notElem` Instance.runningStates ==> not run_st
434

    
435
prop_Instance_shrinkMG inst =
436
    Instance.mem inst >= 2 * Types.unitMem ==>
437
        case Instance.shrinkByType inst Types.FailMem of
438
          Types.Ok inst' ->
439
              Instance.mem inst' == Instance.mem inst - Types.unitMem
440
          _ -> False
441

    
442
prop_Instance_shrinkMF inst =
443
    forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
444
    let inst' = inst { Instance.mem = mem}
445
    in Types.isBad $ Instance.shrinkByType inst' Types.FailMem
446

    
447
prop_Instance_shrinkCG inst =
448
    Instance.vcpus inst >= 2 * Types.unitCpu ==>
449
        case Instance.shrinkByType inst Types.FailCPU of
450
          Types.Ok inst' ->
451
              Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
452
          _ -> False
453

    
454
prop_Instance_shrinkCF inst =
455
    forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
456
    let inst' = inst { Instance.vcpus = vcpus }
457
    in Types.isBad $ Instance.shrinkByType inst' Types.FailCPU
458

    
459
prop_Instance_shrinkDG inst =
460
    Instance.dsk inst >= 2 * Types.unitDsk ==>
461
        case Instance.shrinkByType inst Types.FailDisk of
462
          Types.Ok inst' ->
463
              Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
464
          _ -> False
465

    
466
prop_Instance_shrinkDF inst =
467
    forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
468
    let inst' = inst { Instance.dsk = dsk }
469
    in Types.isBad $ Instance.shrinkByType inst' Types.FailDisk
470

    
471
prop_Instance_setMovable inst m =
472
    Instance.movable inst' == m
473
    where inst' = Instance.setMovable inst m
474

    
475
testInstance =
476
    [ run prop_Instance_creat
477
    , run prop_Instance_setIdx
478
    , run prop_Instance_setName
479
    , run prop_Instance_setAlias
480
    , run prop_Instance_setPri
481
    , run prop_Instance_setSec
482
    , run prop_Instance_setBoth
483
    , run prop_Instance_runStatus_True
484
    , run prop_Instance_runStatus_False
485
    , run prop_Instance_shrinkMG
486
    , run prop_Instance_shrinkMF
487
    , run prop_Instance_shrinkCG
488
    , run prop_Instance_shrinkCF
489
    , run prop_Instance_shrinkDG
490
    , run prop_Instance_shrinkDF
491
    , run prop_Instance_setMovable
492
    ]
493

    
494
-- ** Text backend tests
495

    
496
-- Instance text loader tests
497

    
498
prop_Text_Load_Instance name mem dsk vcpus status
499
                        (NonEmpty pnode) snode
500
                        (NonNegative pdx) (NonNegative sdx) autobal =
501
    pnode /= snode && pdx /= sdx ==>
502
    let vcpus_s = show vcpus
503
        dsk_s = show dsk
504
        mem_s = show mem
505
        ndx = if null snode
506
              then [(pnode, pdx)]
507
              else [(pnode, pdx), (snode, sdx)]
508
        nl = Data.Map.fromList ndx
509
        tags = ""
510
        sbal = if autobal then "Y" else "N"
511
        inst = Text.loadInst nl
512
               [name, mem_s, dsk_s, vcpus_s, status,
513
                sbal, pnode, snode, tags]:: Maybe (String, Instance.Instance)
514
        fail1 = Text.loadInst nl
515
               [name, mem_s, dsk_s, vcpus_s, status,
516
                sbal, pnode, pnode, tags]:: Maybe (String, Instance.Instance)
517
        _types = ( name::String, mem::Int, dsk::Int
518
                 , vcpus::Int, status::String
519
                 , snode::String
520
                 , autobal::Bool)
521
    in
522
      case inst of
523
        Nothing -> False
524
        Just (_, i) ->
525
            Instance.name i == name &&
526
            Instance.vcpus i == vcpus &&
527
            Instance.mem i == mem &&
528
            Instance.pNode i == pdx &&
529
            Instance.sNode i == (if null snode
530
                                 then Node.noSecondary
531
                                 else sdx) &&
532
            Instance.autoBalance i == autobal &&
533
            isNothing fail1
534

    
535
prop_Text_Load_InstanceFail ktn fields =
536
    length fields /= 9 ==>
537
    case Text.loadInst nl fields of
538
      Types.Ok _ -> False
539
      Types.Bad msg -> "Invalid/incomplete instance data: '" `isPrefixOf` msg
540
    where nl = Data.Map.fromList ktn
541

    
542
prop_Text_Load_Node name tm nm fm td fd tc fo =
543
    let conv v = if v < 0
544
                    then "?"
545
                    else show v
546
        tm_s = conv tm
547
        nm_s = conv nm
548
        fm_s = conv fm
549
        td_s = conv td
550
        fd_s = conv fd
551
        tc_s = conv tc
552
        fo_s = if fo
553
               then "Y"
554
               else "N"
555
        any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
556
        gid = Group.uuid defGroup
557
    in case Text.loadNode defGroupAssoc
558
           [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
559
         Nothing -> False
560
         Just (name', node) ->
561
             if fo || any_broken
562
             then Node.offline node
563
             else Node.name node == name' && name' == name &&
564
                  Node.alias node == name &&
565
                  Node.tMem node == fromIntegral tm &&
566
                  Node.nMem node == nm &&
567
                  Node.fMem node == fm &&
568
                  Node.tDsk node == fromIntegral td &&
569
                  Node.fDsk node == fd &&
570
                  Node.tCpu node == fromIntegral tc
571

    
572
prop_Text_Load_NodeFail fields =
573
    length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
574

    
575
prop_Text_NodeLSIdempotent node =
576
    (Text.loadNode defGroupAssoc.
577
         Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
578
    Just (Node.name n, n)
579
    -- override failN1 to what loadNode returns by default
580
    where n = node { Node.failN1 = True, Node.offline = False }
581

    
582
testText =
583
    [ run prop_Text_Load_Instance
584
    , run prop_Text_Load_InstanceFail
585
    , run prop_Text_Load_Node
586
    , run prop_Text_Load_NodeFail
587
    , run prop_Text_NodeLSIdempotent
588
    ]
589

    
590
-- ** Node tests
591

    
592
prop_Node_setAlias node name =
593
    Node.name newnode == Node.name node &&
594
    Node.alias newnode == name
595
    where _types = (node::Node.Node, name::String)
596
          newnode = Node.setAlias node name
597

    
598
prop_Node_setOffline node status =
599
    Node.offline newnode == status
600
    where newnode = Node.setOffline node status
601

    
602
prop_Node_setXmem node xm =
603
    Node.xMem newnode == xm
604
    where newnode = Node.setXmem node xm
605

    
606
prop_Node_setMcpu node mc =
607
    Node.mCpu newnode == mc
608
    where newnode = Node.setMcpu node mc
609

    
610
-- | Check that an instance add with too high memory or disk will be
611
-- rejected.
612
prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
613
                               not (Node.failN1 node)
614
                               ==>
615
                               case Node.addPri node inst'' of
616
                                 Types.OpFail Types.FailMem -> True
617
                                 _ -> False
618
    where _types = (node::Node.Node, inst::Instance.Instance)
619
          inst' = setInstanceSmallerThanNode node inst
620
          inst'' = inst' { Instance.mem = Instance.mem inst }
621

    
622
prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
623
                               not (Node.failN1 node)
624
                               ==>
625
                               case Node.addPri node inst'' of
626
                                 Types.OpFail Types.FailDisk -> True
627
                                 _ -> False
628
    where _types = (node::Node.Node, inst::Instance.Instance)
629
          inst' = setInstanceSmallerThanNode node inst
630
          inst'' = inst' { Instance.dsk = Instance.dsk inst }
631

    
632
prop_Node_addPriFC node inst (Positive extra) =
633
    not (Node.failN1 node) ==>
634
        case Node.addPri node inst'' of
635
          Types.OpFail Types.FailCPU -> True
636
          _ -> False
637
    where _types = (node::Node.Node, inst::Instance.Instance)
638
          inst' = setInstanceSmallerThanNode node inst
639
          inst'' = inst' { Instance.vcpus = Node.availCpu node + extra }
640

    
641
-- | Check that an instance add with too high memory or disk will be
642
-- rejected.
643
prop_Node_addSec node inst pdx =
644
    (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
645
     Instance.dsk inst >= Node.fDsk node) &&
646
    not (Node.failN1 node)
647
    ==> isFailure (Node.addSec node inst pdx)
648
        where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
649

    
650
-- | Checks for memory reservation changes.
651
prop_Node_rMem inst =
652
    forAll (arbitrary `suchThat` ((> 0) . Node.fMem)) $ \node ->
653
    -- ab = auto_balance, nb = non-auto_balance
654
    -- we use -1 as the primary node of the instance
655
    let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True }
656
        inst_ab = setInstanceSmallerThanNode node inst'
657
        inst_nb = inst_ab { Instance.autoBalance = False }
658
        -- now we have the two instances, identical except the
659
        -- autoBalance attribute
660
        orig_rmem = Node.rMem node
661
        inst_idx = Instance.idx inst_ab
662
        node_add_ab = Node.addSec node inst_ab (-1)
663
        node_add_nb = Node.addSec node inst_nb (-1)
664
        node_del_ab = liftM (flip Node.removeSec inst_ab) node_add_ab
665
        node_del_nb = liftM (flip Node.removeSec inst_nb) node_add_nb
666
    in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
667
         (Types.OpGood a_ab, Types.OpGood a_nb,
668
          Types.OpGood d_ab, Types.OpGood d_nb) ->
669
             printTestCase "Consistency checks failed" $
670
             Node.rMem a_ab >  orig_rmem &&
671
             Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
672
             Node.rMem a_nb == orig_rmem &&
673
             Node.rMem d_ab == orig_rmem &&
674
             Node.rMem d_nb == orig_rmem &&
675
             -- this is not related to rMem, but as good a place to
676
             -- test as any
677
             inst_idx `elem` Node.sList a_ab &&
678
             not (inst_idx `elem` Node.sList d_ab)
679
         x -> printTestCase ("Failed to add/remove instances: " ++ show x)
680
              False
681

    
682
-- | Check mdsk setting.
683
prop_Node_setMdsk node mx =
684
    Node.loDsk node' >= 0 &&
685
    fromIntegral (Node.loDsk node') <= Node.tDsk node &&
686
    Node.availDisk node' >= 0 &&
687
    Node.availDisk node' <= Node.fDsk node' &&
688
    fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
689
    Node.mDsk node' == mx'
690
    where _types = (node::Node.Node, mx::SmallRatio)
691
          node' = Node.setMdsk node mx'
692
          SmallRatio mx' = mx
693

    
694
-- Check tag maps
695
prop_Node_tagMaps_idempotent tags =
696
    Node.delTags (Node.addTags m tags) tags == m
697
    where m = Data.Map.empty
698

    
699
prop_Node_tagMaps_reject tags =
700
    not (null tags) ==>
701
    any (\t -> Node.rejectAddTags m [t]) tags
702
    where m = Node.addTags Data.Map.empty tags
703

    
704
prop_Node_showField node =
705
  forAll (elements Node.defaultFields) $ \ field ->
706
  fst (Node.showHeader field) /= Types.unknownField &&
707
  Node.showField node field /= Types.unknownField
708

    
709

    
710
prop_Node_computeGroups nodes =
711
  let ng = Node.computeGroups nodes
712
      onlyuuid = map fst ng
713
  in length nodes == sum (map (length . snd) ng) &&
714
     all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
715
     length (nub onlyuuid) == length onlyuuid &&
716
     (null nodes || not (null ng))
717

    
718
testNode =
719
    [ run prop_Node_setAlias
720
    , run prop_Node_setOffline
721
    , run prop_Node_setMcpu
722
    , run prop_Node_setXmem
723
    , run prop_Node_addPriFM
724
    , run prop_Node_addPriFD
725
    , run prop_Node_addPriFC
726
    , run prop_Node_addSec
727
    , run prop_Node_rMem
728
    , run prop_Node_setMdsk
729
    , run prop_Node_tagMaps_idempotent
730
    , run prop_Node_tagMaps_reject
731
    , run prop_Node_showField
732
    , run prop_Node_computeGroups
733
    ]
734

    
735

    
736
-- ** Cluster tests
737

    
738
-- | Check that the cluster score is close to zero for a homogeneous
739
-- cluster.
740
prop_Score_Zero node =
741
    forAll (choose (1, 1024)) $ \count ->
742
    (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
743
     (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
744
    let fn = Node.buildPeers node Container.empty
745
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
746
        nl = Container.fromList nlst
747
        score = Cluster.compCV nl
748
    -- we can't say == 0 here as the floating point errors accumulate;
749
    -- this should be much lower than the default score in CLI.hs
750
    in score <= 1e-12
751

    
752
-- | Check that cluster stats are sane.
753
prop_CStats_sane node =
754
    forAll (choose (1, 1024)) $ \count ->
755
    (not (Node.offline node) && not (Node.failN1 node) &&
756
     (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
757
    let fn = Node.buildPeers node Container.empty
758
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
759
        nl = Container.fromList nlst
760
        cstats = Cluster.totalResources nl
761
    in Cluster.csAdsk cstats >= 0 &&
762
       Cluster.csAdsk cstats <= Cluster.csFdsk cstats
763

    
764
-- | Check that one instance is allocated correctly, without
765
-- rebalances needed.
766
prop_ClusterAlloc_sane node inst =
767
    forAll (choose (5, 20)) $ \count ->
768
    not (Node.offline node)
769
            && not (Node.failN1 node)
770
            && Node.availDisk node > 0
771
            && Node.availMem node > 0
772
            ==>
773
    let nl = makeSmallCluster node count
774
        il = Container.empty
775
        inst' = setInstanceSmallerThanNode node inst
776
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
777
       Cluster.tryAlloc nl il inst' of
778
         Types.Bad _ -> False
779
         Types.Ok as ->
780
             case Cluster.asSolutions as of
781
               [] -> False
782
               (xnl, xi, _, cv):[] ->
783
                   let il' = Container.add (Instance.idx xi) xi il
784
                       tbl = Cluster.Table xnl il' cv []
785
                   in not (canBalance tbl True True False)
786
               _ -> False
787

    
788
-- | Checks that on a 2-5 node cluster, we can allocate a random
789
-- instance spec via tiered allocation (whatever the original instance
790
-- spec), on either one or two nodes.
791
prop_ClusterCanTieredAlloc node inst =
792
    forAll (choose (2, 5)) $ \count ->
793
    forAll (choose (1, 2)) $ \rqnodes ->
794
    not (Node.offline node)
795
            && not (Node.failN1 node)
796
            && isNodeBig node 4
797
            ==>
798
    let nl = makeSmallCluster node count
799
        il = Container.empty
800
        allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
801
    in case allocnodes >>= \allocnodes' ->
802
        Cluster.tieredAlloc nl il inst allocnodes' [] [] of
803
         Types.Bad _ -> False
804
         Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
805
                                      IntMap.size il' == length ixes &&
806
                                      length ixes == length cstats
807

    
808
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
809
-- we can also evacuate it.
810
prop_ClusterAllocEvac node inst =
811
    forAll (choose (4, 8)) $ \count ->
812
    not (Node.offline node)
813
            && not (Node.failN1 node)
814
            && isNodeBig node 4
815
            ==>
816
    let nl = makeSmallCluster node count
817
        il = Container.empty
818
        inst' = setInstanceSmallerThanNode node inst
819
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
820
       Cluster.tryAlloc nl il inst' of
821
         Types.Bad _ -> False
822
         Types.Ok as ->
823
             case Cluster.asSolutions as of
824
               [] -> False
825
               (xnl, xi, _, _):[] ->
826
                   let sdx = Instance.sNode xi
827
                       il' = Container.add (Instance.idx xi) xi il
828
                   in case Cluster.tryEvac xnl il' [Instance.idx xi] [sdx] of
829
                        Just _ -> True
830
                        _ -> False
831
               _ -> False
832

    
833
-- | Check that allocating multiple instances on a cluster, then
834
-- adding an empty node, results in a valid rebalance.
835
prop_ClusterAllocBalance =
836
    forAll (genNode (Just 5) (Just 128)) $ \node ->
837
    forAll (choose (3, 5)) $ \count ->
838
    not (Node.offline node) && not (Node.failN1 node) ==>
839
    let nl = makeSmallCluster node count
840
        (hnode, nl') = IntMap.deleteFindMax nl
841
        il = Container.empty
842
        allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
843
        i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
844
    in case allocnodes >>= \allocnodes' ->
845
        Cluster.iterateAlloc nl' il i_templ allocnodes' [] [] of
846
         Types.Bad _ -> False
847
         Types.Ok (_, xnl, il', _, _) ->
848
                   let ynl = Container.add (Node.idx hnode) hnode xnl
849
                       cv = Cluster.compCV ynl
850
                       tbl = Cluster.Table ynl il' cv []
851
                   in canBalance tbl True True False
852

    
853
-- | Checks consistency.
854
prop_ClusterCheckConsistency node inst =
855
  let nl = makeSmallCluster node 3
856
      [node1, node2, node3] = Container.elems nl
857
      node3' = node3 { Node.group = 1 }
858
      nl' = Container.add (Node.idx node3') node3' nl
859
      inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
860
      inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
861
      inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
862
      ccheck = Cluster.findSplitInstances nl' . Container.fromList
863
  in null (ccheck [(0, inst1)]) &&
864
     null (ccheck [(0, inst2)]) &&
865
     (not . null $ ccheck [(0, inst3)])
866

    
867
-- | For now, we only test that we don't lose instances during the split.
868
prop_ClusterSplitCluster node inst =
869
  forAll (choose (0, 100)) $ \icnt ->
870
  let nl = makeSmallCluster node 2
871
      (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
872
                   (nl, Container.empty) [1..icnt]
873
      gni = Cluster.splitCluster nl' il'
874
  in sum (map (Container.size . snd . snd) gni) == icnt &&
875
     all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
876
                                 (Container.elems nl'')) gni
877

    
878
testCluster =
879
    [ run prop_Score_Zero
880
    , run prop_CStats_sane
881
    , run prop_ClusterAlloc_sane
882
    , run prop_ClusterCanTieredAlloc
883
    , run prop_ClusterAllocEvac
884
    , run prop_ClusterAllocBalance
885
    , run prop_ClusterCheckConsistency
886
    , run prop_ClusterSplitCluster
887
    ]
888

    
889
-- ** OpCodes tests
890

    
891
-- | Check that opcode serialization is idempotent.
892
prop_OpCodes_serialization op =
893
  case J.readJSON (J.showJSON op) of
894
    J.Error _ -> False
895
    J.Ok op' -> op == op'
896
  where _types = op::OpCodes.OpCode
897

    
898
testOpCodes =
899
  [ run prop_OpCodes_serialization
900
  ]
901

    
902
-- ** Jobs tests
903

    
904
-- | Check that (queued) job\/opcode status serialization is idempotent.
905
prop_OpStatus_serialization os =
906
  case J.readJSON (J.showJSON os) of
907
    J.Error _ -> False
908
    J.Ok os' -> os == os'
909
  where _types = os::Jobs.OpStatus
910

    
911
prop_JobStatus_serialization js =
912
  case J.readJSON (J.showJSON js) of
913
    J.Error _ -> False
914
    J.Ok js' -> js == js'
915
  where _types = js::Jobs.JobStatus
916

    
917
testJobs =
918
  [ run prop_OpStatus_serialization
919
  , run prop_JobStatus_serialization
920
  ]
921

    
922
-- ** Loader tests
923

    
924
prop_Loader_lookupNode ktn inst node =
925
  Loader.lookupNode nl inst node == Data.Map.lookup node nl
926
  where nl = Data.Map.fromList ktn
927

    
928
prop_Loader_lookupInstance kti inst =
929
  Loader.lookupInstance il inst == Data.Map.lookup inst il
930
  where il = Data.Map.fromList kti
931

    
932
prop_Loader_assignIndices nodes =
933
  Data.Map.size nassoc == length nodes &&
934
  Container.size kt == length nodes &&
935
  (if not (null nodes)
936
   then maximum (IntMap.keys kt) == length nodes - 1
937
   else True)
938
  where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
939

    
940
-- | Checks that the number of primary instances recorded on the nodes
941
-- is zero.
942
prop_Loader_mergeData ns =
943
  let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
944
  in case Loader.mergeData [] [] [] []
945
         (Loader.emptyCluster {Loader.cdNodes = na}) of
946
    Types.Bad _ -> False
947
    Types.Ok (Loader.ClusterData _ nl il _) ->
948
      let nodes = Container.elems nl
949
          instances = Container.elems il
950
      in (sum . map (length . Node.pList)) nodes == 0 &&
951
         null instances
952

    
953
testLoader =
954
  [ run prop_Loader_lookupNode
955
  , run prop_Loader_lookupInstance
956
  , run prop_Loader_assignIndices
957
  , run prop_Loader_mergeData
958
  ]