Statistics
| Branch: | Tag: | Revision:

root / htools / Ganeti / HTools / QC.hs @ 2aa65bf0

History | View | Annotate | Download (30 kB)

1
{-| Unittests for ganeti-htools
2

    
3
-}
4

    
5
{-
6

    
7
Copyright (C) 2009, 2010, 2011 Google Inc.
8

    
9
This program is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 2 of the License, or
12
(at your option) any later version.
13

    
14
This program is distributed in the hope that it will be useful, but
15
WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
General Public License for more details.
18

    
19
You should have received a copy of the GNU General Public License
20
along with this program; if not, write to the Free Software
21
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22
02110-1301, USA.
23

    
24
-}
25

    
26
module Ganeti.HTools.QC
27
    ( testUtils
28
    , testPeerMap
29
    , testContainer
30
    , testInstance
31
    , testNode
32
    , testText
33
    , testOpCodes
34
    , testJobs
35
    , testCluster
36
    , testLoader
37
    ) where
38

    
39
import Test.QuickCheck
40
import Test.QuickCheck.Batch
41
import Data.List (findIndex, intercalate, nub)
42
import Data.Maybe
43
import Control.Monad
44
import qualified Text.JSON as J
45
import qualified Data.Map
46
import qualified Data.IntMap as IntMap
47
import qualified Ganeti.OpCodes as OpCodes
48
import qualified Ganeti.Jobs as Jobs
49
import qualified Ganeti.Luxi
50
import qualified Ganeti.HTools.CLI as CLI
51
import qualified Ganeti.HTools.Cluster as Cluster
52
import qualified Ganeti.HTools.Container as Container
53
import qualified Ganeti.HTools.ExtLoader
54
import qualified Ganeti.HTools.IAlloc as IAlloc
55
import qualified Ganeti.HTools.Instance as Instance
56
import qualified Ganeti.HTools.Loader as Loader
57
import qualified Ganeti.HTools.Luxi
58
import qualified Ganeti.HTools.Node as Node
59
import qualified Ganeti.HTools.Group as Group
60
import qualified Ganeti.HTools.PeerMap as PeerMap
61
import qualified Ganeti.HTools.Simu
62
import qualified Ganeti.HTools.Text as Text
63
import qualified Ganeti.HTools.Types as Types
64
import qualified Ganeti.HTools.Utils as Utils
65
import qualified Ganeti.HTools.Version
66

    
67
-- * Constants
68

    
69
-- | Maximum memory (1TiB, somewhat random value)
70
maxMem :: Int
71
maxMem = 1024 * 1024
72

    
73
-- | Maximum disk (8TiB, somewhat random value)
74
maxDsk :: Int
75
maxDsk = 1024 * 1024 * 8
76

    
77
-- | Max CPUs (1024, somewhat random value)
78
maxCpu :: Int
79
maxCpu = 1024
80

    
81
defGroup :: Group.Group
82
defGroup = flip Group.setIdx 0 $
83
               Group.create "default" Utils.defaultGroupID
84
                    Types.AllocPreferred
85

    
86
defGroupList :: Group.List
87
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
88

    
89
defGroupAssoc :: Data.Map.Map String Types.Gdx
90
defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
91

    
92
-- * Helper functions
93

    
94
-- | Simple checker for whether OpResult is fail or pass
95
isFailure :: Types.OpResult a -> Bool
96
isFailure (Types.OpFail _) = True
97
isFailure _ = False
98

    
99
-- | Update an instance to be smaller than a node
100
setInstanceSmallerThanNode node inst =
101
    inst { Instance.mem = Node.availMem node `div` 2
102
         , Instance.dsk = Node.availDisk node `div` 2
103
         , Instance.vcpus = Node.availCpu node `div` 2
104
         }
105

    
106
-- | Create an instance given its spec
107
createInstance mem dsk vcpus =
108
    Instance.create "inst-unnamed" mem dsk vcpus "running" [] (-1) (-1)
109

    
110
-- | Create a small cluster by repeating a node spec
111
makeSmallCluster :: Node.Node -> Int -> Node.List
112
makeSmallCluster node count =
113
    let fn = Node.buildPeers node Container.empty
114
        namelst = map (\n -> (Node.name n, n)) (replicate count fn)
115
        (_, nlst) = Loader.assignIndices namelst
116
    in nlst
117

    
118
-- | Checks if a node is "big" enough
119
isNodeBig :: Node.Node -> Int -> Bool
120
isNodeBig node size = Node.availDisk node > size * Types.unitDsk
121
                      && Node.availMem node > size * Types.unitMem
122
                      && Node.availCpu node > size * Types.unitCpu
123

    
124
canBalance :: Cluster.Table -> Bool -> Bool -> Bool
125
canBalance tbl dm evac = isJust $ Cluster.tryBalance tbl dm evac 0 0
126

    
127
-- | Assigns a new fresh instance to a cluster; this is not
128
-- allocation, so no resource checks are done
129
assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
130
                  Types.Idx -> Types.Idx ->
131
                  (Node.List, Instance.List)
132
assignInstance nl il inst pdx sdx =
133
  let pnode = Container.find pdx nl
134
      snode = Container.find sdx nl
135
      maxiidx = if Container.null il
136
                then 0
137
                else fst (Container.findMax il) + 1
138
      inst' = inst { Instance.idx = maxiidx,
139
                     Instance.pNode = pdx, Instance.sNode = sdx }
140
      pnode' = Node.setPri pnode inst'
141
      snode' = Node.setSec snode inst'
142
      nl' = Container.addTwo pdx pnode' sdx snode' nl
143
      il' = Container.add maxiidx inst' il
144
  in (nl', il')
145

    
146
-- * Arbitrary instances
147

    
148
-- copied from the introduction to quickcheck
149
instance Arbitrary Char where
150
    arbitrary = choose ('\32', '\128')
151

    
152
newtype DNSChar = DNSChar { dnsGetChar::Char }
153
instance Arbitrary DNSChar where
154
    arbitrary = do
155
      x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
156
      return (DNSChar x)
157

    
158
getName :: Gen String
159
getName = do
160
  n <- choose (1, 64)
161
  dn <- vector n::Gen [DNSChar]
162
  return (map dnsGetChar dn)
163

    
164

    
165
getFQDN :: Gen String
166
getFQDN = do
167
  felem <- getName
168
  ncomps <- choose (1, 4)
169
  frest <- vector ncomps::Gen [[DNSChar]]
170
  let frest' = map (map dnsGetChar) frest
171
  return (felem ++ "." ++ intercalate "." frest')
172

    
173
-- let's generate a random instance
174
instance Arbitrary Instance.Instance where
175
    arbitrary = do
176
      name <- getFQDN
177
      mem <- choose (0, maxMem)
178
      dsk <- choose (0, maxDsk)
179
      run_st <- elements ["ERROR_up", "ERROR_down", "ADMIN_down"
180
                         , "ERROR_nodedown", "ERROR_nodeoffline"
181
                         , "running"
182
                         , "no_such_status1", "no_such_status2"]
183
      pn <- arbitrary
184
      sn <- arbitrary
185
      vcpus <- choose (0, maxCpu)
186
      return $ Instance.create name mem dsk vcpus run_st [] pn sn
187

    
188
-- and a random node
189
instance Arbitrary Node.Node where
190
    arbitrary = do
191
      name <- getFQDN
192
      mem_t <- choose (0, maxMem)
193
      mem_f <- choose (0, mem_t)
194
      mem_n <- choose (0, mem_t - mem_f)
195
      dsk_t <- choose (0, maxDsk)
196
      dsk_f <- choose (0, dsk_t)
197
      cpu_t <- choose (0, maxCpu)
198
      offl <- arbitrary
199
      let n = Node.create name (fromIntegral mem_t) mem_n mem_f
200
              (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl
201
              0
202
          n' = Node.buildPeers n Container.empty
203
      return n'
204

    
205
-- replace disks
206
instance Arbitrary OpCodes.ReplaceDisksMode where
207
  arbitrary = elements [ OpCodes.ReplaceOnPrimary
208
                       , OpCodes.ReplaceOnSecondary
209
                       , OpCodes.ReplaceNewSecondary
210
                       , OpCodes.ReplaceAuto
211
                       ]
212

    
213
instance Arbitrary OpCodes.OpCode where
214
  arbitrary = do
215
    op_id <- elements [ "OP_TEST_DELAY"
216
                      , "OP_INSTANCE_REPLACE_DISKS"
217
                      , "OP_INSTANCE_FAILOVER"
218
                      , "OP_INSTANCE_MIGRATE"
219
                      ]
220
    (case op_id of
221
        "OP_TEST_DELAY" ->
222
          liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
223
        "OP_INSTANCE_REPLACE_DISKS" ->
224
          liftM5 OpCodes.OpReplaceDisks arbitrary arbitrary
225
          arbitrary arbitrary arbitrary
226
        "OP_INSTANCE_FAILOVER" ->
227
          liftM2 OpCodes.OpFailoverInstance arbitrary arbitrary
228
        "OP_INSTANCE_MIGRATE" ->
229
          liftM3 OpCodes.OpMigrateInstance arbitrary arbitrary arbitrary
230
        _ -> fail "Wrong opcode")
231

    
232
instance Arbitrary Jobs.OpStatus where
233
  arbitrary = elements [minBound..maxBound]
234

    
235
instance Arbitrary Jobs.JobStatus where
236
  arbitrary = elements [minBound..maxBound]
237

    
238
-- * Actual tests
239

    
240
-- If the list is not just an empty element, and if the elements do
241
-- not contain commas, then join+split should be idepotent
242
prop_Utils_commaJoinSplit lst = lst /= [""] &&
243
                                all (not . elem ',') lst ==>
244
                                Utils.sepSplit ',' (Utils.commaJoin lst) == lst
245
-- Split and join should always be idempotent
246
prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
247

    
248
testUtils =
249
  [ run prop_Utils_commaJoinSplit
250
  , run prop_Utils_commaSplitJoin
251
  ]
252

    
253
-- | Make sure add is idempotent
254
prop_PeerMap_addIdempotent pmap key em =
255
    fn puniq == fn (fn puniq)
256
    where _types = (pmap::PeerMap.PeerMap,
257
                    key::PeerMap.Key, em::PeerMap.Elem)
258
          fn = PeerMap.add key em
259
          puniq = PeerMap.accumArray const pmap
260

    
261
-- | Make sure remove is idempotent
262
prop_PeerMap_removeIdempotent pmap key =
263
    fn puniq == fn (fn puniq)
264
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
265
          fn = PeerMap.remove key
266
          puniq = PeerMap.accumArray const pmap
267

    
268
-- | Make sure a missing item returns 0
269
prop_PeerMap_findMissing pmap key =
270
    PeerMap.find key (PeerMap.remove key puniq) == 0
271
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
272
          puniq = PeerMap.accumArray const pmap
273

    
274
-- | Make sure an added item is found
275
prop_PeerMap_addFind pmap key em =
276
    PeerMap.find key (PeerMap.add key em puniq) == em
277
    where _types = (pmap::PeerMap.PeerMap,
278
                    key::PeerMap.Key, em::PeerMap.Elem)
279
          puniq = PeerMap.accumArray const pmap
280

    
281
-- | Manual check that maxElem returns the maximum indeed, or 0 for null
282
prop_PeerMap_maxElem pmap =
283
    PeerMap.maxElem puniq == if null puniq then 0
284
                             else (maximum . snd . unzip) puniq
285
    where _types = pmap::PeerMap.PeerMap
286
          puniq = PeerMap.accumArray const pmap
287

    
288
testPeerMap =
289
    [ run prop_PeerMap_addIdempotent
290
    , run prop_PeerMap_removeIdempotent
291
    , run prop_PeerMap_maxElem
292
    , run prop_PeerMap_addFind
293
    , run prop_PeerMap_findMissing
294
    ]
295

    
296
-- Container tests
297

    
298
prop_Container_addTwo cdata i1 i2 =
299
    fn i1 i2 cont == fn i2 i1 cont &&
300
       fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
301
    where _types = (cdata::[Int],
302
                    i1::Int, i2::Int)
303
          cont = foldl (\c x -> Container.add x x c) Container.empty cdata
304
          fn x1 x2 = Container.addTwo x1 x1 x2 x2
305

    
306
prop_Container_nameOf node =
307
  let nl = makeSmallCluster node 1
308
      fnode = head (Container.elems nl)
309
  in Container.nameOf nl (Node.idx fnode) == Node.name fnode
310

    
311
-- We test that in a cluster, given a random node, we can find it by
312
-- its name and alias, as long as all names and aliases are unique,
313
-- and that we fail to find a non-existing name
314
prop_Container_findByName node othername =
315
  forAll (choose (1, 20)) $ \ cnt ->
316
  forAll (choose (0, cnt - 1)) $ \ fidx ->
317
  forAll (vector cnt) $ \ names ->
318
  (length . nub) (map fst names ++ map snd names) ==
319
  length names * 2 &&
320
  not (othername `elem` (map fst names ++ map snd names)) ==>
321
  let nl = makeSmallCluster node cnt
322
      nodes = Container.elems nl
323
      nodes' = map (\((name, alias), nn) -> (Node.idx nn,
324
                                             nn { Node.name = name,
325
                                                  Node.alias = alias }))
326
               $ zip names nodes
327
      nl' = Container.fromList nodes'
328
      target = snd (nodes' !! fidx)
329
  in Container.findByName nl' (Node.name target) == Just target &&
330
     Container.findByName nl' (Node.alias target) == Just target &&
331
     Container.findByName nl' othername == Nothing
332

    
333
testContainer =
334
    [ run prop_Container_addTwo
335
    , run prop_Container_nameOf
336
    , run prop_Container_findByName
337
    ]
338

    
339
-- Simple instance tests, we only have setter/getters
340

    
341
prop_Instance_creat inst =
342
    Instance.name inst == Instance.alias inst
343

    
344
prop_Instance_setIdx inst idx =
345
    Instance.idx (Instance.setIdx inst idx) == idx
346
    where _types = (inst::Instance.Instance, idx::Types.Idx)
347

    
348
prop_Instance_setName inst name =
349
    Instance.name newinst == name &&
350
    Instance.alias newinst == name
351
    where _types = (inst::Instance.Instance, name::String)
352
          newinst = Instance.setName inst name
353

    
354
prop_Instance_setAlias inst name =
355
    Instance.name newinst == Instance.name inst &&
356
    Instance.alias newinst == name
357
    where _types = (inst::Instance.Instance, name::String)
358
          newinst = Instance.setAlias inst name
359

    
360
prop_Instance_setPri inst pdx =
361
    Instance.pNode (Instance.setPri inst pdx) == pdx
362
    where _types = (inst::Instance.Instance, pdx::Types.Ndx)
363

    
364
prop_Instance_setSec inst sdx =
365
    Instance.sNode (Instance.setSec inst sdx) == sdx
366
    where _types = (inst::Instance.Instance, sdx::Types.Ndx)
367

    
368
prop_Instance_setBoth inst pdx sdx =
369
    Instance.pNode si == pdx && Instance.sNode si == sdx
370
    where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
371
          si = Instance.setBoth inst pdx sdx
372

    
373
prop_Instance_runStatus_True inst =
374
    let run_st = Instance.running inst
375
        run_tx = Instance.runSt inst
376
    in
377
      run_tx `elem` Instance.runningStates ==> run_st
378

    
379
prop_Instance_runStatus_False inst =
380
    let run_st = Instance.running inst
381
        run_tx = Instance.runSt inst
382
    in
383
      run_tx `notElem` Instance.runningStates ==> not run_st
384

    
385
prop_Instance_shrinkMG inst =
386
    Instance.mem inst >= 2 * Types.unitMem ==>
387
        case Instance.shrinkByType inst Types.FailMem of
388
          Types.Ok inst' ->
389
              Instance.mem inst' == Instance.mem inst - Types.unitMem
390
          _ -> False
391

    
392
prop_Instance_shrinkMF inst =
393
    Instance.mem inst < 2 * Types.unitMem ==>
394
        Types.isBad $ Instance.shrinkByType inst Types.FailMem
395

    
396
prop_Instance_shrinkCG inst =
397
    Instance.vcpus inst >= 2 * Types.unitCpu ==>
398
        case Instance.shrinkByType inst Types.FailCPU of
399
          Types.Ok inst' ->
400
              Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
401
          _ -> False
402

    
403
prop_Instance_shrinkCF inst =
404
    Instance.vcpus inst < 2 * Types.unitCpu ==>
405
        Types.isBad $ Instance.shrinkByType inst Types.FailCPU
406

    
407
prop_Instance_shrinkDG inst =
408
    Instance.dsk inst >= 2 * Types.unitDsk ==>
409
        case Instance.shrinkByType inst Types.FailDisk of
410
          Types.Ok inst' ->
411
              Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
412
          _ -> False
413

    
414
prop_Instance_shrinkDF inst =
415
    Instance.dsk inst < 2 * Types.unitDsk ==>
416
        Types.isBad $ Instance.shrinkByType inst Types.FailDisk
417

    
418
prop_Instance_setMovable inst m =
419
    Instance.movable inst' == m
420
    where inst' = Instance.setMovable inst m
421

    
422
testInstance =
423
    [ run prop_Instance_creat
424
    , run prop_Instance_setIdx
425
    , run prop_Instance_setName
426
    , run prop_Instance_setAlias
427
    , run prop_Instance_setPri
428
    , run prop_Instance_setSec
429
    , run prop_Instance_setBoth
430
    , run prop_Instance_runStatus_True
431
    , run prop_Instance_runStatus_False
432
    , run prop_Instance_shrinkMG
433
    , run prop_Instance_shrinkMF
434
    , run prop_Instance_shrinkCG
435
    , run prop_Instance_shrinkCF
436
    , run prop_Instance_shrinkDG
437
    , run prop_Instance_shrinkDF
438
    , run prop_Instance_setMovable
439
    ]
440

    
441
-- Instance text loader tests
442

    
443
prop_Text_Load_Instance name mem dsk vcpus status pnode snode pdx sdx =
444
    not (null pnode) && pdx >= 0 && sdx >= 0 ==>
445
    let vcpus_s = show vcpus
446
        dsk_s = show dsk
447
        mem_s = show mem
448
        rsdx = if pdx == sdx
449
               then sdx + 1
450
               else sdx
451
        ndx = if null snode
452
              then [(pnode, pdx)]
453
              else [(pnode, pdx), (snode, rsdx)]
454
        nl = Data.Map.fromList ndx
455
        tags = ""
456
        inst = Text.loadInst nl
457
               [name, mem_s, dsk_s, vcpus_s, status, pnode, snode, tags]::
458
               Maybe (String, Instance.Instance)
459
        fail1 = Text.loadInst nl
460
               [name, mem_s, dsk_s, vcpus_s, status, pnode, pnode, tags]::
461
               Maybe (String, Instance.Instance)
462
        _types = ( name::String, mem::Int, dsk::Int
463
                 , vcpus::Int, status::String
464
                 , pnode::String, snode::String
465
                 , pdx::Types.Ndx, sdx::Types.Ndx)
466
    in
467
      case inst of
468
        Nothing -> False
469
        Just (_, i) ->
470
            (Instance.name i == name &&
471
             Instance.vcpus i == vcpus &&
472
             Instance.mem i == mem &&
473
             Instance.pNode i == pdx &&
474
             Instance.sNode i == (if null snode
475
                                  then Node.noSecondary
476
                                  else rsdx) &&
477
             isNothing fail1)
478

    
479
prop_Text_Load_InstanceFail ktn fields =
480
    length fields /= 8 ==> isNothing $ Text.loadInst nl fields
481
    where nl = Data.Map.fromList ktn
482

    
483
prop_Text_Load_Node name tm nm fm td fd tc fo =
484
    let conv v = if v < 0
485
                    then "?"
486
                    else show v
487
        tm_s = conv tm
488
        nm_s = conv nm
489
        fm_s = conv fm
490
        td_s = conv td
491
        fd_s = conv fd
492
        tc_s = conv tc
493
        fo_s = if fo
494
               then "Y"
495
               else "N"
496
        any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
497
        gid = Group.uuid defGroup
498
    in case Text.loadNode defGroupAssoc
499
           [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
500
         Nothing -> False
501
         Just (name', node) ->
502
             if fo || any_broken
503
             then Node.offline node
504
             else Node.name node == name' && name' == name &&
505
                  Node.alias node == name &&
506
                  Node.tMem node == fromIntegral tm &&
507
                  Node.nMem node == nm &&
508
                  Node.fMem node == fm &&
509
                  Node.tDsk node == fromIntegral td &&
510
                  Node.fDsk node == fd &&
511
                  Node.tCpu node == fromIntegral tc
512

    
513
prop_Text_Load_NodeFail fields =
514
    length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
515

    
516
prop_Text_NodeLSIdempotent node =
517
    (Text.loadNode defGroupAssoc.
518
         Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
519
    Just (Node.name n, n)
520
    -- override failN1 to what loadNode returns by default
521
    where n = node { Node.failN1 = True, Node.offline = False }
522

    
523
testText =
524
    [ run prop_Text_Load_Instance
525
    , run prop_Text_Load_InstanceFail
526
    , run prop_Text_Load_Node
527
    , run prop_Text_Load_NodeFail
528
    , run prop_Text_NodeLSIdempotent
529
    ]
530

    
531
-- Node tests
532

    
533
prop_Node_setAlias node name =
534
    Node.name newnode == Node.name node &&
535
    Node.alias newnode == name
536
    where _types = (node::Node.Node, name::String)
537
          newnode = Node.setAlias node name
538

    
539
prop_Node_setOffline node status =
540
    Node.offline newnode == status
541
    where newnode = Node.setOffline node status
542

    
543
prop_Node_setXmem node xm =
544
    Node.xMem newnode == xm
545
    where newnode = Node.setXmem node xm
546

    
547
prop_Node_setMcpu node mc =
548
    Node.mCpu newnode == mc
549
    where newnode = Node.setMcpu node mc
550

    
551
-- | Check that an instance add with too high memory or disk will be rejected
552
prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
553
                               not (Node.failN1 node)
554
                               ==>
555
                               case Node.addPri node inst'' of
556
                                 Types.OpFail Types.FailMem -> True
557
                                 _ -> False
558
    where _types = (node::Node.Node, inst::Instance.Instance)
559
          inst' = setInstanceSmallerThanNode node inst
560
          inst'' = inst' { Instance.mem = Instance.mem inst }
561

    
562
prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
563
                               not (Node.failN1 node)
564
                               ==>
565
                               case Node.addPri node inst'' of
566
                                 Types.OpFail Types.FailDisk -> True
567
                                 _ -> False
568
    where _types = (node::Node.Node, inst::Instance.Instance)
569
          inst' = setInstanceSmallerThanNode node inst
570
          inst'' = inst' { Instance.dsk = Instance.dsk inst }
571

    
572
prop_Node_addPriFC node inst = Instance.vcpus inst > Node.availCpu node &&
573
                               not (Node.failN1 node)
574
                               ==>
575
                               case Node.addPri node inst'' of
576
                                 Types.OpFail Types.FailCPU -> True
577
                                 _ -> False
578
    where _types = (node::Node.Node, inst::Instance.Instance)
579
          inst' = setInstanceSmallerThanNode node inst
580
          inst'' = inst' { Instance.vcpus = Instance.vcpus inst }
581

    
582
-- | Check that an instance add with too high memory or disk will be rejected
583
prop_Node_addSec node inst pdx =
584
    (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
585
     Instance.dsk inst >= Node.fDsk node) &&
586
    not (Node.failN1 node)
587
    ==> isFailure (Node.addSec node inst pdx)
588
        where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
589

    
590
newtype SmallRatio = SmallRatio Double deriving Show
591
instance Arbitrary SmallRatio where
592
    arbitrary = do
593
      v <- choose (0, 1)
594
      return $ SmallRatio v
595

    
596
-- | Check mdsk setting
597
prop_Node_setMdsk node mx =
598
    Node.loDsk node' >= 0 &&
599
    fromIntegral (Node.loDsk node') <= Node.tDsk node &&
600
    Node.availDisk node' >= 0 &&
601
    Node.availDisk node' <= Node.fDsk node' &&
602
    fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
603
    Node.mDsk node' == mx'
604
    where _types = (node::Node.Node, mx::SmallRatio)
605
          node' = Node.setMdsk node mx'
606
          SmallRatio mx' = mx
607

    
608
-- Check tag maps
609
prop_Node_tagMaps_idempotent tags =
610
    Node.delTags (Node.addTags m tags) tags == m
611
    where m = Data.Map.empty
612

    
613
prop_Node_tagMaps_reject tags =
614
    not (null tags) ==>
615
    any (\t -> Node.rejectAddTags m [t]) tags
616
    where m = Node.addTags Data.Map.empty tags
617

    
618
prop_Node_showField node =
619
  forAll (elements Node.defaultFields) $ \ field ->
620
  fst (Node.showHeader field) /= Types.unknownField &&
621
  Node.showField node field /= Types.unknownField
622

    
623

    
624
prop_Node_computeGroups nodes =
625
  let ng = Node.computeGroups nodes
626
      onlyuuid = map fst ng
627
  in length nodes == sum (map (length . snd) ng) &&
628
     all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
629
     length (nub onlyuuid) == length onlyuuid &&
630
     if null nodes then True else not (null ng)
631

    
632
testNode =
633
    [ run prop_Node_setAlias
634
    , run prop_Node_setOffline
635
    , run prop_Node_setMcpu
636
    , run prop_Node_setXmem
637
    , run prop_Node_addPriFM
638
    , run prop_Node_addPriFD
639
    , run prop_Node_addPriFC
640
    , run prop_Node_addSec
641
    , run prop_Node_setMdsk
642
    , run prop_Node_tagMaps_idempotent
643
    , run prop_Node_tagMaps_reject
644
    , run prop_Node_showField
645
    , run prop_Node_computeGroups
646
    ]
647

    
648

    
649
-- Cluster tests
650

    
651
-- | Check that the cluster score is close to zero for a homogeneous cluster
652
prop_Score_Zero node count =
653
    (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
654
     (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
655
    let fn = Node.buildPeers node Container.empty
656
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
657
        nl = Container.fromList nlst
658
        score = Cluster.compCV nl
659
    -- we can't say == 0 here as the floating point errors accumulate;
660
    -- this should be much lower than the default score in CLI.hs
661
    in score <= 1e-15
662

    
663
-- | Check that cluster stats are sane
664
prop_CStats_sane node count =
665
    (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
666
     (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
667
    let fn = Node.buildPeers node Container.empty
668
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
669
        nl = Container.fromList nlst
670
        cstats = Cluster.totalResources nl
671
    in Cluster.csAdsk cstats >= 0 &&
672
       Cluster.csAdsk cstats <= Cluster.csFdsk cstats
673

    
674
-- | Check that one instance is allocated correctly, without
675
-- rebalances needed
676
prop_ClusterAlloc_sane node inst =
677
    forAll (choose (5, 20)) $ \count ->
678
    not (Node.offline node)
679
            && not (Node.failN1 node)
680
            && Node.availDisk node > 0
681
            && Node.availMem node > 0
682
            ==>
683
    let nl = makeSmallCluster node count
684
        il = Container.empty
685
        inst' = setInstanceSmallerThanNode node inst
686
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
687
       Cluster.tryAlloc nl il inst' of
688
         Types.Bad _ -> False
689
         Types.Ok as ->
690
             case Cluster.asSolutions as of
691
               [] -> False
692
               (xnl, xi, _, cv):[] ->
693
                   let il' = Container.add (Instance.idx xi) xi il
694
                       tbl = Cluster.Table xnl il' cv []
695
                   in not (canBalance tbl True False)
696
               _ -> False
697

    
698
-- | Checks that on a 2-5 node cluster, we can allocate a random
699
-- instance spec via tiered allocation (whatever the original instance
700
-- spec), on either one or two nodes
701
prop_ClusterCanTieredAlloc node inst =
702
    forAll (choose (2, 5)) $ \count ->
703
    forAll (choose (1, 2)) $ \rqnodes ->
704
    not (Node.offline node)
705
            && not (Node.failN1 node)
706
            && isNodeBig node 4
707
            ==>
708
    let nl = makeSmallCluster node count
709
        il = Container.empty
710
        allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
711
    in case allocnodes >>= \allocnodes' ->
712
        Cluster.tieredAlloc nl il inst allocnodes' [] [] of
713
         Types.Bad _ -> False
714
         Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
715
                                      IntMap.size il' == length ixes &&
716
                                      length ixes == length cstats
717

    
718
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
719
-- we can also evacuate it
720
prop_ClusterAllocEvac node inst =
721
    forAll (choose (4, 8)) $ \count ->
722
    not (Node.offline node)
723
            && not (Node.failN1 node)
724
            && isNodeBig node 4
725
            ==>
726
    let nl = makeSmallCluster node count
727
        il = Container.empty
728
        inst' = setInstanceSmallerThanNode node inst
729
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
730
       Cluster.tryAlloc nl il inst' of
731
         Types.Bad _ -> False
732
         Types.Ok as ->
733
             case Cluster.asSolutions as of
734
               [] -> False
735
               (xnl, xi, _, _):[] ->
736
                   let sdx = Instance.sNode xi
737
                       il' = Container.add (Instance.idx xi) xi il
738
                   in case Cluster.tryEvac xnl il' [Instance.idx xi] [sdx] of
739
                        Just _ -> True
740
                        _ -> False
741
               _ -> False
742

    
743
-- | Check that allocating multiple instances on a cluster, then
744
-- adding an empty node, results in a valid rebalance
745
prop_ClusterAllocBalance node =
746
    forAll (choose (3, 5)) $ \count ->
747
    not (Node.offline node)
748
            && not (Node.failN1 node)
749
            && isNodeBig node 4
750
            && not (isNodeBig node 8)
751
            ==>
752
    let nl = makeSmallCluster node count
753
        (hnode, nl') = IntMap.deleteFindMax nl
754
        il = Container.empty
755
        allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
756
        i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
757
    in case allocnodes >>= \allocnodes' ->
758
        Cluster.iterateAlloc nl' il i_templ allocnodes' [] [] of
759
         Types.Bad _ -> False
760
         Types.Ok (_, xnl, il', _, _) ->
761
                   let ynl = Container.add (Node.idx hnode) hnode xnl
762
                       cv = Cluster.compCV ynl
763
                       tbl = Cluster.Table ynl il' cv []
764
                   in canBalance tbl True False
765

    
766
-- | Checks consistency
767
prop_ClusterCheckConsistency node inst =
768
  let nl = makeSmallCluster node 3
769
      [node1, node2, node3] = Container.elems nl
770
      node3' = node3 { Node.group = 1 }
771
      nl' = Container.add (Node.idx node3') node3' nl
772
      inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
773
      inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
774
      inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
775
      ccheck = Cluster.findSplitInstances nl' . Container.fromList
776
  in null (ccheck [(0, inst1)]) &&
777
     null (ccheck [(0, inst2)]) &&
778
     (not . null $ ccheck [(0, inst3)])
779

    
780
-- For now, we only test that we don't lose instances during the split
781
prop_ClusterSplitCluster node inst =
782
  forAll (choose (0, 100)) $ \icnt ->
783
  let nl = makeSmallCluster node 2
784
      (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
785
                   (nl, Container.empty) [1..icnt]
786
      gni = Cluster.splitCluster nl' il'
787
  in sum (map (Container.size . snd . snd) gni) == icnt &&
788
     all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
789
                                 (Container.elems nl'')) gni
790

    
791
testCluster =
792
    [ run prop_Score_Zero
793
    , run prop_CStats_sane
794
    , run prop_ClusterAlloc_sane
795
    , run prop_ClusterCanTieredAlloc
796
    , run prop_ClusterAllocEvac
797
    , run prop_ClusterAllocBalance
798
    , run prop_ClusterCheckConsistency
799
    , run prop_ClusterSplitCluster
800
    ]
801

    
802
-- | Check that opcode serialization is idempotent
803

    
804
prop_OpCodes_serialization op =
805
  case J.readJSON (J.showJSON op) of
806
    J.Error _ -> False
807
    J.Ok op' -> op == op'
808
  where _types = op::OpCodes.OpCode
809

    
810
testOpCodes =
811
  [ run prop_OpCodes_serialization
812
  ]
813

    
814
-- | Check that (queued) job\/opcode status serialization is idempotent
815
prop_OpStatus_serialization os =
816
  case J.readJSON (J.showJSON os) of
817
    J.Error _ -> False
818
    J.Ok os' -> os == os'
819
  where _types = os::Jobs.OpStatus
820

    
821
prop_JobStatus_serialization js =
822
  case J.readJSON (J.showJSON js) of
823
    J.Error _ -> False
824
    J.Ok js' -> js == js'
825
  where _types = js::Jobs.JobStatus
826

    
827
testJobs =
828
  [ run prop_OpStatus_serialization
829
  , run prop_JobStatus_serialization
830
  ]
831

    
832
-- | Loader tests
833

    
834
prop_Loader_lookupNode ktn inst node =
835
  Loader.lookupNode nl inst node == Data.Map.lookup node nl
836
  where nl = Data.Map.fromList ktn
837

    
838
prop_Loader_lookupInstance kti inst =
839
  Loader.lookupInstance il inst == Data.Map.lookup inst il
840
  where il = Data.Map.fromList kti
841

    
842
prop_Loader_assignIndices nodes =
843
  Data.Map.size nassoc == length nodes &&
844
  Container.size kt == length nodes &&
845
  (if not (null nodes)
846
   then maximum (IntMap.keys kt) == length nodes - 1
847
   else True)
848
  where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
849

    
850

    
851
-- | Checks that the number of primary instances recorded on the nodes
852
-- is zero
853
prop_Loader_mergeData ns =
854
  let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
855
  in case Loader.mergeData [] [] []
856
         (Loader.emptyCluster {Loader.cdNodes = na}) of
857
    Types.Bad _ -> False
858
    Types.Ok (Loader.ClusterData _ nl il _) ->
859
      let nodes = Container.elems nl
860
          instances = Container.elems il
861
      in (sum . map (length . Node.pList)) nodes == 0 &&
862
         null instances
863

    
864
testLoader =
865
  [ run prop_Loader_lookupNode
866
  , run prop_Loader_lookupInstance
867
  , run prop_Loader_assignIndices
868
  , run prop_Loader_mergeData
869
  ]