Statistics
| Branch: | Tag: | Revision:

root / htools / Ganeti / HTools / QC.hs @ a1cd7c1e

History | View | Annotate | Download (32.9 kB)

1
{-| Unittests for ganeti-htools
2

    
3
-}
4

    
5
{-
6

    
7
Copyright (C) 2009, 2010, 2011 Google Inc.
8

    
9
This program is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 2 of the License, or
12
(at your option) any later version.
13

    
14
This program is distributed in the hope that it will be useful, but
15
WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
General Public License for more details.
18

    
19
You should have received a copy of the GNU General Public License
20
along with this program; if not, write to the Free Software
21
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22
02110-1301, USA.
23

    
24
-}
25

    
26
module Ganeti.HTools.QC
27
    ( testUtils
28
    , testPeerMap
29
    , testContainer
30
    , testInstance
31
    , testNode
32
    , testText
33
    , testOpCodes
34
    , testJobs
35
    , testCluster
36
    , testLoader
37
    ) where
38

    
39
import Test.QuickCheck
40
import Data.List (findIndex, intercalate, nub, isPrefixOf)
41
import Data.Maybe
42
import Control.Monad
43
import qualified Text.JSON as J
44
import qualified Data.Map
45
import qualified Data.IntMap as IntMap
46
import qualified Ganeti.OpCodes as OpCodes
47
import qualified Ganeti.Jobs as Jobs
48
import qualified Ganeti.Luxi
49
import qualified Ganeti.HTools.CLI as CLI
50
import qualified Ganeti.HTools.Cluster as Cluster
51
import qualified Ganeti.HTools.Container as Container
52
import qualified Ganeti.HTools.ExtLoader
53
import qualified Ganeti.HTools.IAlloc as IAlloc
54
import qualified Ganeti.HTools.Instance as Instance
55
import qualified Ganeti.HTools.Loader as Loader
56
import qualified Ganeti.HTools.Luxi
57
import qualified Ganeti.HTools.Node as Node
58
import qualified Ganeti.HTools.Group as Group
59
import qualified Ganeti.HTools.PeerMap as PeerMap
60
import qualified Ganeti.HTools.Rapi
61
import qualified Ganeti.HTools.Simu
62
import qualified Ganeti.HTools.Text as Text
63
import qualified Ganeti.HTools.Types as Types
64
import qualified Ganeti.HTools.Utils as Utils
65
import qualified Ganeti.HTools.Version
66
import qualified Ganeti.Constants as C
67

    
68
run :: Testable prop => prop -> Args -> IO Result
69
run = flip quickCheckWithResult
70

    
71
-- * Constants
72

    
73
-- | Maximum memory (1TiB, somewhat random value)
74
maxMem :: Int
75
maxMem = 1024 * 1024
76

    
77
-- | Maximum disk (8TiB, somewhat random value)
78
maxDsk :: Int
79
maxDsk = 1024 * 1024 * 8
80

    
81
-- | Max CPUs (1024, somewhat random value)
82
maxCpu :: Int
83
maxCpu = 1024
84

    
85
defGroup :: Group.Group
86
defGroup = flip Group.setIdx 0 $
87
               Group.create "default" Utils.defaultGroupID
88
                    Types.AllocPreferred
89

    
90
defGroupList :: Group.List
91
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
92

    
93
defGroupAssoc :: Data.Map.Map String Types.Gdx
94
defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
95

    
96
-- * Helper functions
97

    
98
-- | Simple checker for whether OpResult is fail or pass
99
isFailure :: Types.OpResult a -> Bool
100
isFailure (Types.OpFail _) = True
101
isFailure _ = False
102

    
103
-- | Update an instance to be smaller than a node
104
setInstanceSmallerThanNode node inst =
105
    inst { Instance.mem = Node.availMem node `div` 2
106
         , Instance.dsk = Node.availDisk node `div` 2
107
         , Instance.vcpus = Node.availCpu node `div` 2
108
         }
109

    
110
-- | Create an instance given its spec
111
createInstance mem dsk vcpus =
112
    Instance.create "inst-unnamed" mem dsk vcpus "running" [] True (-1) (-1)
113

    
114
-- | Create a small cluster by repeating a node spec
115
makeSmallCluster :: Node.Node -> Int -> Node.List
116
makeSmallCluster node count =
117
    let fn = Node.buildPeers node Container.empty
118
        namelst = map (\n -> (Node.name n, n)) (replicate count fn)
119
        (_, nlst) = Loader.assignIndices namelst
120
    in nlst
121

    
122
-- | Checks if a node is "big" enough
123
isNodeBig :: Node.Node -> Int -> Bool
124
isNodeBig node size = Node.availDisk node > size * Types.unitDsk
125
                      && Node.availMem node > size * Types.unitMem
126
                      && Node.availCpu node > size * Types.unitCpu
127

    
128
canBalance :: Cluster.Table -> Bool -> Bool -> Bool -> Bool
129
canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac 0 0
130

    
131
-- | Assigns a new fresh instance to a cluster; this is not
132
-- allocation, so no resource checks are done
133
assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
134
                  Types.Idx -> Types.Idx ->
135
                  (Node.List, Instance.List)
136
assignInstance nl il inst pdx sdx =
137
  let pnode = Container.find pdx nl
138
      snode = Container.find sdx nl
139
      maxiidx = if Container.null il
140
                then 0
141
                else fst (Container.findMax il) + 1
142
      inst' = inst { Instance.idx = maxiidx,
143
                     Instance.pNode = pdx, Instance.sNode = sdx }
144
      pnode' = Node.setPri pnode inst'
145
      snode' = Node.setSec snode inst'
146
      nl' = Container.addTwo pdx pnode' sdx snode' nl
147
      il' = Container.add maxiidx inst' il
148
  in (nl', il')
149

    
150
-- * Arbitrary instances
151

    
152
newtype DNSChar = DNSChar { dnsGetChar::Char }
153
instance Arbitrary DNSChar where
154
    arbitrary = do
155
      x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
156
      return (DNSChar x)
157

    
158
getName :: Gen String
159
getName = do
160
  n <- choose (1, 64)
161
  dn <- vector n::Gen [DNSChar]
162
  return (map dnsGetChar dn)
163

    
164

    
165
getFQDN :: Gen String
166
getFQDN = do
167
  felem <- getName
168
  ncomps <- choose (1, 4)
169
  frest <- vector ncomps::Gen [[DNSChar]]
170
  let frest' = map (map dnsGetChar) frest
171
  return (felem ++ "." ++ intercalate "." frest')
172

    
173
-- let's generate a random instance
174
instance Arbitrary Instance.Instance where
175
    arbitrary = do
176
      name <- getFQDN
177
      mem <- choose (0, maxMem)
178
      dsk <- choose (0, maxDsk)
179
      run_st <- elements [ C.inststErrorup
180
                         , C.inststErrordown
181
                         , C.inststAdmindown
182
                         , C.inststNodedown
183
                         , C.inststNodeoffline
184
                         , C.inststRunning
185
                         , "no_such_status1"
186
                         , "no_such_status2"]
187
      pn <- arbitrary
188
      sn <- arbitrary
189
      vcpus <- choose (0, maxCpu)
190
      return $ Instance.create name mem dsk vcpus run_st [] True pn sn
191

    
192
genNode :: Maybe Int -> Maybe Int -> Gen Node.Node
193
genNode min_multiplier max_multiplier = do
194
  let (base_mem, base_dsk, base_cpu) =
195
          case min_multiplier of
196
            Just mm -> (mm * Types.unitMem,
197
                        mm * Types.unitDsk,
198
                        mm * Types.unitCpu)
199
            Nothing -> (0, 0, 0)
200
      (top_mem, top_dsk, top_cpu)  =
201
          case max_multiplier of
202
            Just mm -> (mm * Types.unitMem,
203
                        mm * Types.unitDsk,
204
                        mm * Types.unitCpu)
205
            Nothing -> (maxMem, maxDsk, maxCpu)
206
  name  <- getFQDN
207
  mem_t <- choose (base_mem, top_mem)
208
  mem_f <- choose (base_mem, mem_t)
209
  mem_n <- choose (0, mem_t - mem_f)
210
  dsk_t <- choose (base_dsk, top_dsk)
211
  dsk_f <- choose (base_dsk, dsk_t)
212
  cpu_t <- choose (base_cpu, top_cpu)
213
  offl  <- arbitrary
214
  let n = Node.create name (fromIntegral mem_t) mem_n mem_f
215
          (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl 0
216
  return $ Node.buildPeers n Container.empty
217

    
218
-- and a random node
219
instance Arbitrary Node.Node where
220
    arbitrary = genNode Nothing Nothing
221

    
222
-- replace disks
223
instance Arbitrary OpCodes.ReplaceDisksMode where
224
  arbitrary = elements [ OpCodes.ReplaceOnPrimary
225
                       , OpCodes.ReplaceOnSecondary
226
                       , OpCodes.ReplaceNewSecondary
227
                       , OpCodes.ReplaceAuto
228
                       ]
229

    
230
instance Arbitrary OpCodes.OpCode where
231
  arbitrary = do
232
    op_id <- elements [ "OP_TEST_DELAY"
233
                      , "OP_INSTANCE_REPLACE_DISKS"
234
                      , "OP_INSTANCE_FAILOVER"
235
                      , "OP_INSTANCE_MIGRATE"
236
                      ]
237
    (case op_id of
238
        "OP_TEST_DELAY" ->
239
          liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
240
        "OP_INSTANCE_REPLACE_DISKS" ->
241
          liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
242
          arbitrary arbitrary arbitrary
243
        "OP_INSTANCE_FAILOVER" ->
244
          liftM2 OpCodes.OpInstanceFailover arbitrary arbitrary
245
        "OP_INSTANCE_MIGRATE" ->
246
          liftM4 OpCodes.OpInstanceMigrate arbitrary arbitrary arbitrary
247
          arbitrary
248
        _ -> fail "Wrong opcode")
249

    
250
instance Arbitrary Jobs.OpStatus where
251
  arbitrary = elements [minBound..maxBound]
252

    
253
instance Arbitrary Jobs.JobStatus where
254
  arbitrary = elements [minBound..maxBound]
255

    
256
-- * Actual tests
257

    
258
-- If the list is not just an empty element, and if the elements do
259
-- not contain commas, then join+split should be idepotent
260
prop_Utils_commaJoinSplit =
261
    forAll (arbitrary `suchThat`
262
            (\l -> l /= [""] && all (not . elem ',') l )) $ \lst ->
263
    Utils.sepSplit ',' (Utils.commaJoin lst) == lst
264

    
265
-- Split and join should always be idempotent
266
prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
267

    
268
-- | fromObjWithDefault, we test using the Maybe monad and an integer
269
-- value
270
prop_Utils_fromObjWithDefault def_value random_key =
271
    -- a missing key will be returned with the default
272
    Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
273
    -- a found key will be returned as is, not with default
274
    Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
275
         random_key (def_value+1) == Just def_value
276
        where _types = def_value :: Integer
277

    
278
testUtils =
279
  [ run prop_Utils_commaJoinSplit
280
  , run prop_Utils_commaSplitJoin
281
  , run prop_Utils_fromObjWithDefault
282
  ]
283

    
284
-- | Make sure add is idempotent
285
prop_PeerMap_addIdempotent pmap key em =
286
    fn puniq == fn (fn puniq)
287
    where _types = (pmap::PeerMap.PeerMap,
288
                    key::PeerMap.Key, em::PeerMap.Elem)
289
          fn = PeerMap.add key em
290
          puniq = PeerMap.accumArray const pmap
291

    
292
-- | Make sure remove is idempotent
293
prop_PeerMap_removeIdempotent pmap key =
294
    fn puniq == fn (fn puniq)
295
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
296
          fn = PeerMap.remove key
297
          puniq = PeerMap.accumArray const pmap
298

    
299
-- | Make sure a missing item returns 0
300
prop_PeerMap_findMissing pmap key =
301
    PeerMap.find key (PeerMap.remove key puniq) == 0
302
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
303
          puniq = PeerMap.accumArray const pmap
304

    
305
-- | Make sure an added item is found
306
prop_PeerMap_addFind pmap key em =
307
    PeerMap.find key (PeerMap.add key em puniq) == em
308
    where _types = (pmap::PeerMap.PeerMap,
309
                    key::PeerMap.Key, em::PeerMap.Elem)
310
          puniq = PeerMap.accumArray const pmap
311

    
312
-- | Manual check that maxElem returns the maximum indeed, or 0 for null
313
prop_PeerMap_maxElem pmap =
314
    PeerMap.maxElem puniq == if null puniq then 0
315
                             else (maximum . snd . unzip) puniq
316
    where _types = pmap::PeerMap.PeerMap
317
          puniq = PeerMap.accumArray const pmap
318

    
319
testPeerMap =
320
    [ run prop_PeerMap_addIdempotent
321
    , run prop_PeerMap_removeIdempotent
322
    , run prop_PeerMap_maxElem
323
    , run prop_PeerMap_addFind
324
    , run prop_PeerMap_findMissing
325
    ]
326

    
327
-- Container tests
328

    
329
prop_Container_addTwo cdata i1 i2 =
330
    fn i1 i2 cont == fn i2 i1 cont &&
331
       fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
332
    where _types = (cdata::[Int],
333
                    i1::Int, i2::Int)
334
          cont = foldl (\c x -> Container.add x x c) Container.empty cdata
335
          fn x1 x2 = Container.addTwo x1 x1 x2 x2
336

    
337
prop_Container_nameOf node =
338
  let nl = makeSmallCluster node 1
339
      fnode = head (Container.elems nl)
340
  in Container.nameOf nl (Node.idx fnode) == Node.name fnode
341

    
342
-- We test that in a cluster, given a random node, we can find it by
343
-- its name and alias, as long as all names and aliases are unique,
344
-- and that we fail to find a non-existing name
345
prop_Container_findByName node othername =
346
  forAll (choose (1, 20)) $ \ cnt ->
347
  forAll (choose (0, cnt - 1)) $ \ fidx ->
348
  forAll (vector cnt) $ \ names ->
349
  (length . nub) (map fst names ++ map snd names) ==
350
  length names * 2 &&
351
  not (othername `elem` (map fst names ++ map snd names)) ==>
352
  let nl = makeSmallCluster node cnt
353
      nodes = Container.elems nl
354
      nodes' = map (\((name, alias), nn) -> (Node.idx nn,
355
                                             nn { Node.name = name,
356
                                                  Node.alias = alias }))
357
               $ zip names nodes
358
      nl' = Container.fromList nodes'
359
      target = snd (nodes' !! fidx)
360
  in Container.findByName nl' (Node.name target) == Just target &&
361
     Container.findByName nl' (Node.alias target) == Just target &&
362
     Container.findByName nl' othername == Nothing
363

    
364
testContainer =
365
    [ run prop_Container_addTwo
366
    , run prop_Container_nameOf
367
    , run prop_Container_findByName
368
    ]
369

    
370
-- Simple instance tests, we only have setter/getters
371

    
372
prop_Instance_creat inst =
373
    Instance.name inst == Instance.alias inst
374

    
375
prop_Instance_setIdx inst idx =
376
    Instance.idx (Instance.setIdx inst idx) == idx
377
    where _types = (inst::Instance.Instance, idx::Types.Idx)
378

    
379
prop_Instance_setName inst name =
380
    Instance.name newinst == name &&
381
    Instance.alias newinst == name
382
    where _types = (inst::Instance.Instance, name::String)
383
          newinst = Instance.setName inst name
384

    
385
prop_Instance_setAlias inst name =
386
    Instance.name newinst == Instance.name inst &&
387
    Instance.alias newinst == name
388
    where _types = (inst::Instance.Instance, name::String)
389
          newinst = Instance.setAlias inst name
390

    
391
prop_Instance_setPri inst pdx =
392
    Instance.pNode (Instance.setPri inst pdx) == pdx
393
    where _types = (inst::Instance.Instance, pdx::Types.Ndx)
394

    
395
prop_Instance_setSec inst sdx =
396
    Instance.sNode (Instance.setSec inst sdx) == sdx
397
    where _types = (inst::Instance.Instance, sdx::Types.Ndx)
398

    
399
prop_Instance_setBoth inst pdx sdx =
400
    Instance.pNode si == pdx && Instance.sNode si == sdx
401
    where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
402
          si = Instance.setBoth inst pdx sdx
403

    
404
prop_Instance_runStatus_True =
405
    forAll (arbitrary `suchThat`
406
            ((`elem` Instance.runningStates) . Instance.runSt))
407
    Instance.running
408

    
409
prop_Instance_runStatus_False inst =
410
    let run_st = Instance.running inst
411
        run_tx = Instance.runSt inst
412
    in
413
      run_tx `notElem` Instance.runningStates ==> not run_st
414

    
415
prop_Instance_shrinkMG inst =
416
    Instance.mem inst >= 2 * Types.unitMem ==>
417
        case Instance.shrinkByType inst Types.FailMem of
418
          Types.Ok inst' ->
419
              Instance.mem inst' == Instance.mem inst - Types.unitMem
420
          _ -> False
421

    
422
prop_Instance_shrinkMF inst =
423
    forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
424
    let inst' = inst { Instance.mem = mem}
425
    in Types.isBad $ Instance.shrinkByType inst' Types.FailMem
426

    
427
prop_Instance_shrinkCG inst =
428
    Instance.vcpus inst >= 2 * Types.unitCpu ==>
429
        case Instance.shrinkByType inst Types.FailCPU of
430
          Types.Ok inst' ->
431
              Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
432
          _ -> False
433

    
434
prop_Instance_shrinkCF inst =
435
    forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
436
    let inst' = inst { Instance.vcpus = vcpus }
437
    in Types.isBad $ Instance.shrinkByType inst' Types.FailCPU
438

    
439
prop_Instance_shrinkDG inst =
440
    Instance.dsk inst >= 2 * Types.unitDsk ==>
441
        case Instance.shrinkByType inst Types.FailDisk of
442
          Types.Ok inst' ->
443
              Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
444
          _ -> False
445

    
446
prop_Instance_shrinkDF inst =
447
    forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
448
    let inst' = inst { Instance.dsk = dsk }
449
    in Types.isBad $ Instance.shrinkByType inst' Types.FailDisk
450

    
451
prop_Instance_setMovable inst m =
452
    Instance.movable inst' == m
453
    where inst' = Instance.setMovable inst m
454

    
455
testInstance =
456
    [ run prop_Instance_creat
457
    , run prop_Instance_setIdx
458
    , run prop_Instance_setName
459
    , run prop_Instance_setAlias
460
    , run prop_Instance_setPri
461
    , run prop_Instance_setSec
462
    , run prop_Instance_setBoth
463
    , run prop_Instance_runStatus_True
464
    , run prop_Instance_runStatus_False
465
    , run prop_Instance_shrinkMG
466
    , run prop_Instance_shrinkMF
467
    , run prop_Instance_shrinkCG
468
    , run prop_Instance_shrinkCF
469
    , run prop_Instance_shrinkDG
470
    , run prop_Instance_shrinkDF
471
    , run prop_Instance_setMovable
472
    ]
473

    
474
-- Instance text loader tests
475

    
476
prop_Text_Load_Instance name mem dsk vcpus status
477
                        (NonEmpty pnode) snode
478
                        (NonNegative pdx) (NonNegative sdx) autobal =
479
    let vcpus_s = show vcpus
480
        dsk_s = show dsk
481
        mem_s = show mem
482
        rsdx = if pdx == sdx
483
               then sdx + 1
484
               else sdx
485
        ndx = if null snode
486
              then [(pnode, pdx)]
487
              else [(pnode, pdx), (snode, rsdx)]
488
        nl = Data.Map.fromList ndx
489
        tags = ""
490
        sbal = if autobal then "Y" else "N"
491
        inst = Text.loadInst nl
492
               [name, mem_s, dsk_s, vcpus_s, status,
493
                sbal, pnode, snode, tags]:: Maybe (String, Instance.Instance)
494
        fail1 = Text.loadInst nl
495
               [name, mem_s, dsk_s, vcpus_s, status,
496
                sbal, pnode, pnode, tags]:: Maybe (String, Instance.Instance)
497
        _types = ( name::String, mem::Int, dsk::Int
498
                 , vcpus::Int, status::String
499
                 , snode::String
500
                 , autobal::Bool)
501
    in
502
      case inst of
503
        Nothing -> False
504
        Just (_, i) ->
505
            Instance.name i == name &&
506
            Instance.vcpus i == vcpus &&
507
            Instance.mem i == mem &&
508
            Instance.pNode i == pdx &&
509
            Instance.sNode i == (if null snode
510
                                 then Node.noSecondary
511
                                 else rsdx) &&
512
            Instance.auto_balance i == autobal &&
513
            isNothing fail1
514

    
515
prop_Text_Load_InstanceFail ktn fields =
516
    length fields /= 9 ==>
517
    case Text.loadInst nl fields of
518
      Types.Ok _ -> False
519
      Types.Bad msg -> "Invalid/incomplete instance data: '" `isPrefixOf` msg
520
    where nl = Data.Map.fromList ktn
521

    
522
prop_Text_Load_Node name tm nm fm td fd tc fo =
523
    let conv v = if v < 0
524
                    then "?"
525
                    else show v
526
        tm_s = conv tm
527
        nm_s = conv nm
528
        fm_s = conv fm
529
        td_s = conv td
530
        fd_s = conv fd
531
        tc_s = conv tc
532
        fo_s = if fo
533
               then "Y"
534
               else "N"
535
        any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
536
        gid = Group.uuid defGroup
537
    in case Text.loadNode defGroupAssoc
538
           [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
539
         Nothing -> False
540
         Just (name', node) ->
541
             if fo || any_broken
542
             then Node.offline node
543
             else Node.name node == name' && name' == name &&
544
                  Node.alias node == name &&
545
                  Node.tMem node == fromIntegral tm &&
546
                  Node.nMem node == nm &&
547
                  Node.fMem node == fm &&
548
                  Node.tDsk node == fromIntegral td &&
549
                  Node.fDsk node == fd &&
550
                  Node.tCpu node == fromIntegral tc
551

    
552
prop_Text_Load_NodeFail fields =
553
    length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
554

    
555
prop_Text_NodeLSIdempotent node =
556
    (Text.loadNode defGroupAssoc.
557
         Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
558
    Just (Node.name n, n)
559
    -- override failN1 to what loadNode returns by default
560
    where n = node { Node.failN1 = True, Node.offline = False }
561

    
562
testText =
563
    [ run prop_Text_Load_Instance
564
    , run prop_Text_Load_InstanceFail
565
    , run prop_Text_Load_Node
566
    , run prop_Text_Load_NodeFail
567
    , run prop_Text_NodeLSIdempotent
568
    ]
569

    
570
-- Node tests
571

    
572
prop_Node_setAlias node name =
573
    Node.name newnode == Node.name node &&
574
    Node.alias newnode == name
575
    where _types = (node::Node.Node, name::String)
576
          newnode = Node.setAlias node name
577

    
578
prop_Node_setOffline node status =
579
    Node.offline newnode == status
580
    where newnode = Node.setOffline node status
581

    
582
prop_Node_setXmem node xm =
583
    Node.xMem newnode == xm
584
    where newnode = Node.setXmem node xm
585

    
586
prop_Node_setMcpu node mc =
587
    Node.mCpu newnode == mc
588
    where newnode = Node.setMcpu node mc
589

    
590
-- | Check that an instance add with too high memory or disk will be rejected
591
prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
592
                               not (Node.failN1 node)
593
                               ==>
594
                               case Node.addPri node inst'' of
595
                                 Types.OpFail Types.FailMem -> True
596
                                 _ -> False
597
    where _types = (node::Node.Node, inst::Instance.Instance)
598
          inst' = setInstanceSmallerThanNode node inst
599
          inst'' = inst' { Instance.mem = Instance.mem inst }
600

    
601
prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
602
                               not (Node.failN1 node)
603
                               ==>
604
                               case Node.addPri node inst'' of
605
                                 Types.OpFail Types.FailDisk -> True
606
                                 _ -> False
607
    where _types = (node::Node.Node, inst::Instance.Instance)
608
          inst' = setInstanceSmallerThanNode node inst
609
          inst'' = inst' { Instance.dsk = Instance.dsk inst }
610

    
611
prop_Node_addPriFC node inst (Positive extra) =
612
    not (Node.failN1 node) ==>
613
        case Node.addPri node inst'' of
614
          Types.OpFail Types.FailCPU -> True
615
          _ -> False
616
    where _types = (node::Node.Node, inst::Instance.Instance)
617
          inst' = setInstanceSmallerThanNode node inst
618
          inst'' = inst' { Instance.vcpus = Node.availCpu node + extra }
619

    
620
-- | Check that an instance add with too high memory or disk will be rejected
621
prop_Node_addSec node inst pdx =
622
    (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
623
     Instance.dsk inst >= Node.fDsk node) &&
624
    not (Node.failN1 node)
625
    ==> isFailure (Node.addSec node inst pdx)
626
        where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
627

    
628
-- | Checks for memory reservation changes
629
prop_Node_rMem node inst =
630
    -- ab = auto_balance, nb = non-auto_balance
631
    -- we use -1 as the primary node of the instance
632
    let inst' = inst { Instance.pNode = -1, Instance.auto_balance = True }
633
        inst_ab = setInstanceSmallerThanNode node inst'
634
        inst_nb = inst_ab { Instance.auto_balance = False }
635
        -- now we have the two instances, identical except the
636
        -- auto_balance attribute
637
        orig_rmem = Node.rMem node
638
        inst_idx = Instance.idx inst_ab
639
        node_add_ab = Node.addSec node inst_ab (-1)
640
        node_add_nb = Node.addSec node inst_nb (-1)
641
        node_del_ab = liftM (flip Node.removeSec inst_ab) node_add_ab
642
        node_del_nb = liftM (flip Node.removeSec inst_nb) node_add_nb
643
    in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
644
         (Types.OpGood a_ab, Types.OpGood a_nb,
645
          Types.OpGood d_ab, Types.OpGood d_nb) ->
646
             Node.rMem a_ab >  orig_rmem &&
647
             Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
648
             Node.rMem a_nb == orig_rmem &&
649
             Node.rMem d_ab == orig_rmem &&
650
             Node.rMem d_nb == orig_rmem &&
651
             -- this is not related to rMem, but as good a place to
652
             -- test as any
653
             inst_idx `elem` Node.sList a_ab &&
654
             not (inst_idx `elem` Node.sList d_ab)
655
         _ -> False
656

    
657
newtype SmallRatio = SmallRatio Double deriving Show
658
instance Arbitrary SmallRatio where
659
    arbitrary = do
660
      v <- choose (0, 1)
661
      return $ SmallRatio v
662

    
663
-- | Check mdsk setting
664
prop_Node_setMdsk node mx =
665
    Node.loDsk node' >= 0 &&
666
    fromIntegral (Node.loDsk node') <= Node.tDsk node &&
667
    Node.availDisk node' >= 0 &&
668
    Node.availDisk node' <= Node.fDsk node' &&
669
    fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
670
    Node.mDsk node' == mx'
671
    where _types = (node::Node.Node, mx::SmallRatio)
672
          node' = Node.setMdsk node mx'
673
          SmallRatio mx' = mx
674

    
675
-- Check tag maps
676
prop_Node_tagMaps_idempotent tags =
677
    Node.delTags (Node.addTags m tags) tags == m
678
    where m = Data.Map.empty
679

    
680
prop_Node_tagMaps_reject tags =
681
    not (null tags) ==>
682
    any (\t -> Node.rejectAddTags m [t]) tags
683
    where m = Node.addTags Data.Map.empty tags
684

    
685
prop_Node_showField node =
686
  forAll (elements Node.defaultFields) $ \ field ->
687
  fst (Node.showHeader field) /= Types.unknownField &&
688
  Node.showField node field /= Types.unknownField
689

    
690

    
691
prop_Node_computeGroups nodes =
692
  let ng = Node.computeGroups nodes
693
      onlyuuid = map fst ng
694
  in length nodes == sum (map (length . snd) ng) &&
695
     all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
696
     length (nub onlyuuid) == length onlyuuid &&
697
     (null nodes || not (null ng))
698

    
699
testNode =
700
    [ run prop_Node_setAlias
701
    , run prop_Node_setOffline
702
    , run prop_Node_setMcpu
703
    , run prop_Node_setXmem
704
    , run prop_Node_addPriFM
705
    , run prop_Node_addPriFD
706
    , run prop_Node_addPriFC
707
    , run prop_Node_addSec
708
    , run prop_Node_rMem
709
    , run prop_Node_setMdsk
710
    , run prop_Node_tagMaps_idempotent
711
    , run prop_Node_tagMaps_reject
712
    , run prop_Node_showField
713
    , run prop_Node_computeGroups
714
    ]
715

    
716

    
717
-- Cluster tests
718

    
719
-- | Check that the cluster score is close to zero for a homogeneous cluster
720
prop_Score_Zero node =
721
    forAll (choose (1, 1024)) $ \count ->
722
    (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
723
     (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
724
    let fn = Node.buildPeers node Container.empty
725
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
726
        nl = Container.fromList nlst
727
        score = Cluster.compCV nl
728
    -- we can't say == 0 here as the floating point errors accumulate;
729
    -- this should be much lower than the default score in CLI.hs
730
    in score <= 1e-12
731

    
732
-- | Check that cluster stats are sane
733
prop_CStats_sane node =
734
    forAll (choose (1, 1024)) $ \count ->
735
    (not (Node.offline node) && not (Node.failN1 node) &&
736
     (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
737
    let fn = Node.buildPeers node Container.empty
738
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
739
        nl = Container.fromList nlst
740
        cstats = Cluster.totalResources nl
741
    in Cluster.csAdsk cstats >= 0 &&
742
       Cluster.csAdsk cstats <= Cluster.csFdsk cstats
743

    
744
-- | Check that one instance is allocated correctly, without
745
-- rebalances needed
746
prop_ClusterAlloc_sane node inst =
747
    forAll (choose (5, 20)) $ \count ->
748
    not (Node.offline node)
749
            && not (Node.failN1 node)
750
            && Node.availDisk node > 0
751
            && Node.availMem node > 0
752
            ==>
753
    let nl = makeSmallCluster node count
754
        il = Container.empty
755
        inst' = setInstanceSmallerThanNode node inst
756
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
757
       Cluster.tryAlloc nl il inst' of
758
         Types.Bad _ -> False
759
         Types.Ok as ->
760
             case Cluster.asSolutions as of
761
               [] -> False
762
               (xnl, xi, _, cv):[] ->
763
                   let il' = Container.add (Instance.idx xi) xi il
764
                       tbl = Cluster.Table xnl il' cv []
765
                   in not (canBalance tbl True True False)
766
               _ -> False
767

    
768
-- | Checks that on a 2-5 node cluster, we can allocate a random
769
-- instance spec via tiered allocation (whatever the original instance
770
-- spec), on either one or two nodes
771
prop_ClusterCanTieredAlloc node inst =
772
    forAll (choose (2, 5)) $ \count ->
773
    forAll (choose (1, 2)) $ \rqnodes ->
774
    not (Node.offline node)
775
            && not (Node.failN1 node)
776
            && isNodeBig node 4
777
            ==>
778
    let nl = makeSmallCluster node count
779
        il = Container.empty
780
        allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
781
    in case allocnodes >>= \allocnodes' ->
782
        Cluster.tieredAlloc nl il inst allocnodes' [] [] of
783
         Types.Bad _ -> False
784
         Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
785
                                      IntMap.size il' == length ixes &&
786
                                      length ixes == length cstats
787

    
788
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
789
-- we can also evacuate it
790
prop_ClusterAllocEvac node inst =
791
    forAll (choose (4, 8)) $ \count ->
792
    not (Node.offline node)
793
            && not (Node.failN1 node)
794
            && isNodeBig node 4
795
            ==>
796
    let nl = makeSmallCluster node count
797
        il = Container.empty
798
        inst' = setInstanceSmallerThanNode node inst
799
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
800
       Cluster.tryAlloc nl il inst' of
801
         Types.Bad _ -> False
802
         Types.Ok as ->
803
             case Cluster.asSolutions as of
804
               [] -> False
805
               (xnl, xi, _, _):[] ->
806
                   let sdx = Instance.sNode xi
807
                       il' = Container.add (Instance.idx xi) xi il
808
                   in case Cluster.tryEvac xnl il' [Instance.idx xi] [sdx] of
809
                        Just _ -> True
810
                        _ -> False
811
               _ -> False
812

    
813
-- | Check that allocating multiple instances on a cluster, then
814
-- adding an empty node, results in a valid rebalance
815
prop_ClusterAllocBalance =
816
    forAll (genNode (Just 5) (Just 128)) $ \node ->
817
    forAll (choose (3, 5)) $ \count ->
818
    not (Node.offline node) && not (Node.failN1 node) ==>
819
    let nl = makeSmallCluster node count
820
        (hnode, nl') = IntMap.deleteFindMax nl
821
        il = Container.empty
822
        allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
823
        i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
824
    in case allocnodes >>= \allocnodes' ->
825
        Cluster.iterateAlloc nl' il i_templ allocnodes' [] [] of
826
         Types.Bad _ -> False
827
         Types.Ok (_, xnl, il', _, _) ->
828
                   let ynl = Container.add (Node.idx hnode) hnode xnl
829
                       cv = Cluster.compCV ynl
830
                       tbl = Cluster.Table ynl il' cv []
831
                   in canBalance tbl True True False
832

    
833
-- | Checks consistency
834
prop_ClusterCheckConsistency node inst =
835
  let nl = makeSmallCluster node 3
836
      [node1, node2, node3] = Container.elems nl
837
      node3' = node3 { Node.group = 1 }
838
      nl' = Container.add (Node.idx node3') node3' nl
839
      inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
840
      inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
841
      inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
842
      ccheck = Cluster.findSplitInstances nl' . Container.fromList
843
  in null (ccheck [(0, inst1)]) &&
844
     null (ccheck [(0, inst2)]) &&
845
     (not . null $ ccheck [(0, inst3)])
846

    
847
-- For now, we only test that we don't lose instances during the split
848
prop_ClusterSplitCluster node inst =
849
  forAll (choose (0, 100)) $ \icnt ->
850
  let nl = makeSmallCluster node 2
851
      (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
852
                   (nl, Container.empty) [1..icnt]
853
      gni = Cluster.splitCluster nl' il'
854
  in sum (map (Container.size . snd . snd) gni) == icnt &&
855
     all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
856
                                 (Container.elems nl'')) gni
857

    
858
testCluster =
859
    [ run prop_Score_Zero
860
    , run prop_CStats_sane
861
    , run prop_ClusterAlloc_sane
862
    , run prop_ClusterCanTieredAlloc
863
    , run prop_ClusterAllocEvac
864
    , run prop_ClusterAllocBalance
865
    , run prop_ClusterCheckConsistency
866
    , run prop_ClusterSplitCluster
867
    ]
868

    
869
-- | Check that opcode serialization is idempotent
870

    
871
prop_OpCodes_serialization op =
872
  case J.readJSON (J.showJSON op) of
873
    J.Error _ -> False
874
    J.Ok op' -> op == op'
875
  where _types = op::OpCodes.OpCode
876

    
877
testOpCodes =
878
  [ run prop_OpCodes_serialization
879
  ]
880

    
881
-- | Check that (queued) job\/opcode status serialization is idempotent
882
prop_OpStatus_serialization os =
883
  case J.readJSON (J.showJSON os) of
884
    J.Error _ -> False
885
    J.Ok os' -> os == os'
886
  where _types = os::Jobs.OpStatus
887

    
888
prop_JobStatus_serialization js =
889
  case J.readJSON (J.showJSON js) of
890
    J.Error _ -> False
891
    J.Ok js' -> js == js'
892
  where _types = js::Jobs.JobStatus
893

    
894
testJobs =
895
  [ run prop_OpStatus_serialization
896
  , run prop_JobStatus_serialization
897
  ]
898

    
899
-- | Loader tests
900

    
901
prop_Loader_lookupNode ktn inst node =
902
  Loader.lookupNode nl inst node == Data.Map.lookup node nl
903
  where nl = Data.Map.fromList ktn
904

    
905
prop_Loader_lookupInstance kti inst =
906
  Loader.lookupInstance il inst == Data.Map.lookup inst il
907
  where il = Data.Map.fromList kti
908

    
909
prop_Loader_assignIndices nodes =
910
  Data.Map.size nassoc == length nodes &&
911
  Container.size kt == length nodes &&
912
  (if not (null nodes)
913
   then maximum (IntMap.keys kt) == length nodes - 1
914
   else True)
915
  where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
916

    
917

    
918
-- | Checks that the number of primary instances recorded on the nodes
919
-- is zero
920
prop_Loader_mergeData ns =
921
  let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
922
  in case Loader.mergeData [] [] [] []
923
         (Loader.emptyCluster {Loader.cdNodes = na}) of
924
    Types.Bad _ -> False
925
    Types.Ok (Loader.ClusterData _ nl il _) ->
926
      let nodes = Container.elems nl
927
          instances = Container.elems il
928
      in (sum . map (length . Node.pList)) nodes == 0 &&
929
         null instances
930

    
931
testLoader =
932
  [ run prop_Loader_lookupNode
933
  , run prop_Loader_lookupInstance
934
  , run prop_Loader_assignIndices
935
  , run prop_Loader_mergeData
936
  ]