Statistics
| Branch: | Tag: | Revision:

root / Ganeti / HTools / QC.hs @ a334d536

History | View | Annotate | Download (29.2 kB)

1
{-| Unittests for ganeti-htools
2

    
3
-}
4

    
5
{-
6

    
7
Copyright (C) 2009, 2010 Google Inc.
8

    
9
This program is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 2 of the License, or
12
(at your option) any later version.
13

    
14
This program is distributed in the hope that it will be useful, but
15
WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
General Public License for more details.
18

    
19
You should have received a copy of the GNU General Public License
20
along with this program; if not, write to the Free Software
21
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22
02110-1301, USA.
23

    
24
-}
25

    
26
module Ganeti.HTools.QC
27
    ( testUtils
28
    , testPeerMap
29
    , testContainer
30
    , testInstance
31
    , testNode
32
    , testText
33
    , testOpCodes
34
    , testJobs
35
    , testCluster
36
    , testLoader
37
    ) where
38

    
39
import Test.QuickCheck
40
import Test.QuickCheck.Batch
41
import Data.List (findIndex, intercalate, nub)
42
import Data.Maybe
43
import Control.Monad
44
import qualified Text.JSON as J
45
import qualified Data.Map
46
import qualified Data.IntMap as IntMap
47
import qualified Ganeti.OpCodes as OpCodes
48
import qualified Ganeti.Jobs as Jobs
49
import qualified Ganeti.Luxi
50
import qualified Ganeti.HTools.CLI as CLI
51
import qualified Ganeti.HTools.Cluster as Cluster
52
import qualified Ganeti.HTools.Container as Container
53
import qualified Ganeti.HTools.ExtLoader
54
import qualified Ganeti.HTools.IAlloc as IAlloc
55
import qualified Ganeti.HTools.Instance as Instance
56
import qualified Ganeti.HTools.Loader as Loader
57
import qualified Ganeti.HTools.Luxi
58
import qualified Ganeti.HTools.Node as Node
59
import qualified Ganeti.HTools.PeerMap as PeerMap
60
import qualified Ganeti.HTools.Rapi
61
import qualified Ganeti.HTools.Simu
62
import qualified Ganeti.HTools.Text as Text
63
import qualified Ganeti.HTools.Types as Types
64
import qualified Ganeti.HTools.Utils as Utils
65
import qualified Ganeti.HTools.Version
66

    
67
-- * Constants
68

    
69
-- | Maximum memory (1TiB, somewhat random value)
70
maxMem :: Int
71
maxMem = 1024 * 1024
72

    
73
-- | Maximum disk (8TiB, somewhat random value)
74
maxDsk :: Int
75
maxDsk = 1024 * 1024 * 8
76

    
77
-- | Max CPUs (1024, somewhat random value)
78
maxCpu :: Int
79
maxCpu = 1024
80

    
81
-- * Helper functions
82

    
83
-- | Simple checker for whether OpResult is fail or pass
84
isFailure :: Types.OpResult a -> Bool
85
isFailure (Types.OpFail _) = True
86
isFailure _ = False
87

    
88
-- | Update an instance to be smaller than a node
89
setInstanceSmallerThanNode node inst =
90
    inst { Instance.mem = Node.availMem node `div` 2
91
         , Instance.dsk = Node.availDisk node `div` 2
92
         , Instance.vcpus = Node.availCpu node `div` 2
93
         }
94

    
95
-- | Create an instance given its spec
96
createInstance mem dsk vcpus =
97
    Instance.create "inst-unnamed" mem dsk vcpus "running" [] (-1) (-1)
98

    
99
-- | Create a small cluster by repeating a node spec
100
makeSmallCluster :: Node.Node -> Int -> Node.List
101
makeSmallCluster node count =
102
    let fn = Node.buildPeers node Container.empty
103
        namelst = map (\n -> (Node.name n, n)) (replicate count fn)
104
        (_, nlst) = Loader.assignIndices namelst
105
    in nlst
106

    
107
-- | Checks if a node is "big" enough
108
isNodeBig :: Node.Node -> Int -> Bool
109
isNodeBig node size = Node.availDisk node > size * Types.unitDsk
110
                      && Node.availMem node > size * Types.unitMem
111
                      && Node.availCpu node > size * Types.unitCpu
112

    
113
canBalance :: Cluster.Table -> Bool -> Bool -> Bool
114
canBalance tbl dm evac = isJust $ Cluster.tryBalance tbl dm evac 0 0
115

    
116
-- | Assigns a new fresh instance to a cluster; this is not
117
-- allocation, so no resource checks are done
118
assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
119
                  Types.Idx -> Types.Idx ->
120
                  (Node.List, Instance.List)
121
assignInstance nl il inst pdx sdx =
122
  let pnode = Container.find pdx nl
123
      snode = Container.find sdx nl
124
      maxiidx = if Container.null il
125
                then 0
126
                else fst (Container.findMax il) + 1
127
      inst' = inst { Instance.idx = maxiidx,
128
                     Instance.pNode = pdx, Instance.sNode = sdx }
129
      pnode' = Node.setPri pnode inst'
130
      snode' = Node.setSec snode inst'
131
      nl' = Container.addTwo pdx pnode' sdx snode' nl
132
      il' = Container.add maxiidx inst' il
133
  in (nl', il')
134

    
135
-- * Arbitrary instances
136

    
137
-- copied from the introduction to quickcheck
138
instance Arbitrary Char where
139
    arbitrary = choose ('\32', '\128')
140

    
141
newtype DNSChar = DNSChar { dnsGetChar::Char }
142
instance Arbitrary DNSChar where
143
    arbitrary = do
144
      x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
145
      return (DNSChar x)
146

    
147
getName :: Gen String
148
getName = do
149
  n <- choose (1, 64)
150
  dn <- vector n::Gen [DNSChar]
151
  return (map dnsGetChar dn)
152

    
153

    
154
getFQDN :: Gen String
155
getFQDN = do
156
  felem <- getName
157
  ncomps <- choose (1, 4)
158
  frest <- vector ncomps::Gen [[DNSChar]]
159
  let frest' = map (map dnsGetChar) frest
160
  return (felem ++ "." ++ intercalate "." frest')
161

    
162
-- let's generate a random instance
163
instance Arbitrary Instance.Instance where
164
    arbitrary = do
165
      name <- getFQDN
166
      mem <- choose (0, maxMem)
167
      dsk <- choose (0, maxDsk)
168
      run_st <- elements ["ERROR_up", "ERROR_down", "ADMIN_down"
169
                         , "ERROR_nodedown", "ERROR_nodeoffline"
170
                         , "running"
171
                         , "no_such_status1", "no_such_status2"]
172
      pn <- arbitrary
173
      sn <- arbitrary
174
      vcpus <- choose (0, maxCpu)
175
      return $ Instance.create name mem dsk vcpus run_st [] pn sn
176

    
177
-- and a random node
178
instance Arbitrary Node.Node where
179
    arbitrary = do
180
      name <- getFQDN
181
      mem_t <- choose (0, maxMem)
182
      mem_f <- choose (0, mem_t)
183
      mem_n <- choose (0, mem_t - mem_f)
184
      dsk_t <- choose (0, maxDsk)
185
      dsk_f <- choose (0, dsk_t)
186
      cpu_t <- choose (0, maxCpu)
187
      offl <- arbitrary
188
      let n = Node.create name (fromIntegral mem_t) mem_n mem_f
189
              (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl
190
              Utils.defaultGroupID
191
          n' = Node.buildPeers n Container.empty
192
      return n'
193

    
194
-- replace disks
195
instance Arbitrary OpCodes.ReplaceDisksMode where
196
  arbitrary = elements [ OpCodes.ReplaceOnPrimary
197
                       , OpCodes.ReplaceOnSecondary
198
                       , OpCodes.ReplaceNewSecondary
199
                       , OpCodes.ReplaceAuto
200
                       ]
201

    
202
instance Arbitrary OpCodes.OpCode where
203
  arbitrary = do
204
    op_id <- elements [ "OP_TEST_DELAY"
205
                      , "OP_INSTANCE_REPLACE_DISKS"
206
                      , "OP_INSTANCE_FAILOVER"
207
                      , "OP_INSTANCE_MIGRATE"
208
                      ]
209
    (case op_id of
210
        "OP_TEST_DELAY" ->
211
          liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
212
        "OP_INSTANCE_REPLACE_DISKS" ->
213
          liftM5 OpCodes.OpReplaceDisks arbitrary arbitrary
214
          arbitrary arbitrary arbitrary
215
        "OP_INSTANCE_FAILOVER" ->
216
          liftM2 OpCodes.OpFailoverInstance arbitrary arbitrary
217
        "OP_INSTANCE_MIGRATE" ->
218
          liftM3 OpCodes.OpMigrateInstance arbitrary arbitrary arbitrary
219
        _ -> fail "Wrong opcode")
220

    
221
instance Arbitrary Jobs.OpStatus where
222
  arbitrary = elements [minBound..maxBound]
223

    
224
instance Arbitrary Jobs.JobStatus where
225
  arbitrary = elements [minBound..maxBound]
226

    
227
-- * Actual tests
228

    
229
-- If the list is not just an empty element, and if the elements do
230
-- not contain commas, then join+split should be idepotent
231
prop_Utils_commaJoinSplit lst = lst /= [""] &&
232
                                all (not . elem ',') lst ==>
233
                                Utils.sepSplit ',' (Utils.commaJoin lst) == lst
234
-- Split and join should always be idempotent
235
prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
236

    
237
testUtils =
238
  [ run prop_Utils_commaJoinSplit
239
  , run prop_Utils_commaSplitJoin
240
  ]
241

    
242
-- | Make sure add is idempotent
243
prop_PeerMap_addIdempotent pmap key em =
244
    fn puniq == fn (fn puniq)
245
    where _types = (pmap::PeerMap.PeerMap,
246
                    key::PeerMap.Key, em::PeerMap.Elem)
247
          fn = PeerMap.add key em
248
          puniq = PeerMap.accumArray const pmap
249

    
250
-- | Make sure remove is idempotent
251
prop_PeerMap_removeIdempotent pmap key =
252
    fn puniq == fn (fn puniq)
253
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
254
          fn = PeerMap.remove key
255
          puniq = PeerMap.accumArray const pmap
256

    
257
-- | Make sure a missing item returns 0
258
prop_PeerMap_findMissing pmap key =
259
    PeerMap.find key (PeerMap.remove key puniq) == 0
260
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
261
          puniq = PeerMap.accumArray const pmap
262

    
263
-- | Make sure an added item is found
264
prop_PeerMap_addFind pmap key em =
265
    PeerMap.find key (PeerMap.add key em puniq) == em
266
    where _types = (pmap::PeerMap.PeerMap,
267
                    key::PeerMap.Key, em::PeerMap.Elem)
268
          puniq = PeerMap.accumArray const pmap
269

    
270
-- | Manual check that maxElem returns the maximum indeed, or 0 for null
271
prop_PeerMap_maxElem pmap =
272
    PeerMap.maxElem puniq == if null puniq then 0
273
                             else (maximum . snd . unzip) puniq
274
    where _types = pmap::PeerMap.PeerMap
275
          puniq = PeerMap.accumArray const pmap
276

    
277
testPeerMap =
278
    [ run prop_PeerMap_addIdempotent
279
    , run prop_PeerMap_removeIdempotent
280
    , run prop_PeerMap_maxElem
281
    , run prop_PeerMap_addFind
282
    , run prop_PeerMap_findMissing
283
    ]
284

    
285
-- Container tests
286

    
287
prop_Container_addTwo cdata i1 i2 =
288
    fn i1 i2 cont == fn i2 i1 cont &&
289
       fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
290
    where _types = (cdata::[Int],
291
                    i1::Int, i2::Int)
292
          cont = foldl (\c x -> Container.add x x c) Container.empty cdata
293
          fn x1 x2 = Container.addTwo x1 x1 x2 x2
294

    
295
prop_Container_nameOf node =
296
  let nl = makeSmallCluster node 1
297
      fnode = head (Container.elems nl)
298
  in Container.nameOf nl (Node.idx fnode) == Node.name fnode
299

    
300
-- We test that in a cluster, given a random node, we can find it by
301
-- its name and alias, as long as all names and aliases are unique,
302
-- and that we fail to find a non-existing name
303
prop_Container_findByName node othername =
304
  forAll (choose (1, 20)) $ \ cnt ->
305
  forAll (choose (0, cnt - 1)) $ \ fidx ->
306
  forAll (vector cnt) $ \ names ->
307
  (length . nub) (map fst names ++ map snd names) ==
308
  length names * 2 &&
309
  not (othername `elem` (map fst names ++ map snd names)) ==>
310
  let nl = makeSmallCluster node cnt
311
      nodes = Container.elems nl
312
      nodes' = map (\((name, alias), nn) -> (Node.idx nn,
313
                                             nn { Node.name = name,
314
                                                  Node.alias = alias }))
315
               $ zip names nodes
316
      nl' = Container.fromAssocList nodes'
317
      target = snd (nodes' !! fidx)
318
  in Container.findByName nl' (Node.name target) == Just target &&
319
     Container.findByName nl' (Node.alias target) == Just target &&
320
     Container.findByName nl' othername == Nothing
321

    
322
testContainer =
323
    [ run prop_Container_addTwo
324
    , run prop_Container_nameOf
325
    , run prop_Container_findByName
326
    ]
327

    
328
-- Simple instance tests, we only have setter/getters
329

    
330
prop_Instance_creat inst =
331
    Instance.name inst == Instance.alias inst
332

    
333
prop_Instance_setIdx inst idx =
334
    Instance.idx (Instance.setIdx inst idx) == idx
335
    where _types = (inst::Instance.Instance, idx::Types.Idx)
336

    
337
prop_Instance_setName inst name =
338
    Instance.name newinst == name &&
339
    Instance.alias newinst == name
340
    where _types = (inst::Instance.Instance, name::String)
341
          newinst = Instance.setName inst name
342

    
343
prop_Instance_setAlias inst name =
344
    Instance.name newinst == Instance.name inst &&
345
    Instance.alias newinst == name
346
    where _types = (inst::Instance.Instance, name::String)
347
          newinst = Instance.setAlias inst name
348

    
349
prop_Instance_setPri inst pdx =
350
    Instance.pNode (Instance.setPri inst pdx) == pdx
351
    where _types = (inst::Instance.Instance, pdx::Types.Ndx)
352

    
353
prop_Instance_setSec inst sdx =
354
    Instance.sNode (Instance.setSec inst sdx) == sdx
355
    where _types = (inst::Instance.Instance, sdx::Types.Ndx)
356

    
357
prop_Instance_setBoth inst pdx sdx =
358
    Instance.pNode si == pdx && Instance.sNode si == sdx
359
    where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
360
          si = Instance.setBoth inst pdx sdx
361

    
362
prop_Instance_runStatus_True inst =
363
    let run_st = Instance.running inst
364
        run_tx = Instance.runSt inst
365
    in
366
      run_tx `elem` Instance.runningStates ==> run_st
367

    
368
prop_Instance_runStatus_False inst =
369
    let run_st = Instance.running inst
370
        run_tx = Instance.runSt inst
371
    in
372
      run_tx `notElem` Instance.runningStates ==> not run_st
373

    
374
prop_Instance_shrinkMG inst =
375
    Instance.mem inst >= 2 * Types.unitMem ==>
376
        case Instance.shrinkByType inst Types.FailMem of
377
          Types.Ok inst' ->
378
              Instance.mem inst' == Instance.mem inst - Types.unitMem
379
          _ -> False
380

    
381
prop_Instance_shrinkMF inst =
382
    Instance.mem inst < 2 * Types.unitMem ==>
383
        Types.isBad $ Instance.shrinkByType inst Types.FailMem
384

    
385
prop_Instance_shrinkCG inst =
386
    Instance.vcpus inst >= 2 * Types.unitCpu ==>
387
        case Instance.shrinkByType inst Types.FailCPU of
388
          Types.Ok inst' ->
389
              Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
390
          _ -> False
391

    
392
prop_Instance_shrinkCF inst =
393
    Instance.vcpus inst < 2 * Types.unitCpu ==>
394
        Types.isBad $ Instance.shrinkByType inst Types.FailCPU
395

    
396
prop_Instance_shrinkDG inst =
397
    Instance.dsk inst >= 2 * Types.unitDsk ==>
398
        case Instance.shrinkByType inst Types.FailDisk of
399
          Types.Ok inst' ->
400
              Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
401
          _ -> False
402

    
403
prop_Instance_shrinkDF inst =
404
    Instance.dsk inst < 2 * Types.unitDsk ==>
405
        Types.isBad $ Instance.shrinkByType inst Types.FailDisk
406

    
407
prop_Instance_setMovable inst m =
408
    Instance.movable inst' == m
409
    where inst' = Instance.setMovable inst m
410

    
411
testInstance =
412
    [ run prop_Instance_creat
413
    , run prop_Instance_setIdx
414
    , run prop_Instance_setName
415
    , run prop_Instance_setAlias
416
    , run prop_Instance_setPri
417
    , run prop_Instance_setSec
418
    , run prop_Instance_setBoth
419
    , run prop_Instance_runStatus_True
420
    , run prop_Instance_runStatus_False
421
    , run prop_Instance_shrinkMG
422
    , run prop_Instance_shrinkMF
423
    , run prop_Instance_shrinkCG
424
    , run prop_Instance_shrinkCF
425
    , run prop_Instance_shrinkDG
426
    , run prop_Instance_shrinkDF
427
    , run prop_Instance_setMovable
428
    ]
429

    
430
-- Instance text loader tests
431

    
432
prop_Text_Load_Instance name mem dsk vcpus status pnode snode pdx sdx =
433
    not (null pnode) && pdx >= 0 && sdx >= 0 ==>
434
    let vcpus_s = show vcpus
435
        dsk_s = show dsk
436
        mem_s = show mem
437
        rsdx = if pdx == sdx
438
               then sdx + 1
439
               else sdx
440
        ndx = if null snode
441
              then [(pnode, pdx)]
442
              else [(pnode, pdx), (snode, rsdx)]
443
        nl = Data.Map.fromList ndx
444
        tags = ""
445
        inst = Text.loadInst nl
446
               [name, mem_s, dsk_s, vcpus_s, status, pnode, snode, tags]::
447
               Maybe (String, Instance.Instance)
448
        fail1 = Text.loadInst nl
449
               [name, mem_s, dsk_s, vcpus_s, status, pnode, pnode, tags]::
450
               Maybe (String, Instance.Instance)
451
        _types = ( name::String, mem::Int, dsk::Int
452
                 , vcpus::Int, status::String
453
                 , pnode::String, snode::String
454
                 , pdx::Types.Ndx, sdx::Types.Ndx)
455
    in
456
      case inst of
457
        Nothing -> False
458
        Just (_, i) ->
459
            (Instance.name i == name &&
460
             Instance.vcpus i == vcpus &&
461
             Instance.mem i == mem &&
462
             Instance.pNode i == pdx &&
463
             Instance.sNode i == (if null snode
464
                                  then Node.noSecondary
465
                                  else rsdx) &&
466
             isNothing fail1)
467

    
468
prop_Text_Load_InstanceFail ktn fields =
469
    length fields /= 8 ==> isNothing $ Text.loadInst nl fields
470
    where nl = Data.Map.fromList ktn
471

    
472
prop_Text_Load_Node name tm nm fm td fd tc fo =
473
    let conv v = if v < 0
474
                    then "?"
475
                    else show v
476
        tm_s = conv tm
477
        nm_s = conv nm
478
        fm_s = conv fm
479
        td_s = conv td
480
        fd_s = conv fd
481
        tc_s = conv tc
482
        fo_s = if fo
483
               then "Y"
484
               else "N"
485
        any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
486
    in case Text.loadNode [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s] of
487
         Nothing -> False
488
         Just (name', node) ->
489
             if fo || any_broken
490
             then Node.offline node
491
             else Node.name node == name' && name' == name &&
492
                  Node.alias node == name &&
493
                  Node.tMem node == fromIntegral tm &&
494
                  Node.nMem node == nm &&
495
                  Node.fMem node == fm &&
496
                  Node.tDsk node == fromIntegral td &&
497
                  Node.fDsk node == fd &&
498
                  Node.tCpu node == fromIntegral tc
499

    
500
prop_Text_Load_NodeFail fields =
501
    length fields /= 8 ==> isNothing $ Text.loadNode fields
502

    
503
prop_Text_NodeLSIdempotent node =
504
    (Text.loadNode .
505
         Utils.sepSplit '|' . Text.serializeNode) n ==
506
    Just (Node.name n, n)
507
    -- override failN1 to what loadNode returns by default
508
    where n = node { Node.failN1 = True, Node.offline = False }
509

    
510
testText =
511
    [ run prop_Text_Load_Instance
512
    , run prop_Text_Load_InstanceFail
513
    , run prop_Text_Load_Node
514
    , run prop_Text_Load_NodeFail
515
    , run prop_Text_NodeLSIdempotent
516
    ]
517

    
518
-- Node tests
519

    
520
prop_Node_setAlias node name =
521
    Node.name newnode == Node.name node &&
522
    Node.alias newnode == name
523
    where _types = (node::Node.Node, name::String)
524
          newnode = Node.setAlias node name
525

    
526
prop_Node_setOffline node status =
527
    Node.offline newnode == status
528
    where newnode = Node.setOffline node status
529

    
530
prop_Node_setXmem node xm =
531
    Node.xMem newnode == xm
532
    where newnode = Node.setXmem node xm
533

    
534
prop_Node_setMcpu node mc =
535
    Node.mCpu newnode == mc
536
    where newnode = Node.setMcpu node mc
537

    
538
-- | Check that an instance add with too high memory or disk will be rejected
539
prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
540
                               not (Node.failN1 node)
541
                               ==>
542
                               case Node.addPri node inst'' of
543
                                 Types.OpFail Types.FailMem -> True
544
                                 _ -> False
545
    where _types = (node::Node.Node, inst::Instance.Instance)
546
          inst' = setInstanceSmallerThanNode node inst
547
          inst'' = inst' { Instance.mem = Instance.mem inst }
548

    
549
prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
550
                               not (Node.failN1 node)
551
                               ==>
552
                               case Node.addPri node inst'' of
553
                                 Types.OpFail Types.FailDisk -> True
554
                                 _ -> False
555
    where _types = (node::Node.Node, inst::Instance.Instance)
556
          inst' = setInstanceSmallerThanNode node inst
557
          inst'' = inst' { Instance.dsk = Instance.dsk inst }
558

    
559
prop_Node_addPriFC node inst = Instance.vcpus inst > Node.availCpu node &&
560
                               not (Node.failN1 node)
561
                               ==>
562
                               case Node.addPri node inst'' of
563
                                 Types.OpFail Types.FailCPU -> True
564
                                 _ -> False
565
    where _types = (node::Node.Node, inst::Instance.Instance)
566
          inst' = setInstanceSmallerThanNode node inst
567
          inst'' = inst' { Instance.vcpus = Instance.vcpus inst }
568

    
569
-- | Check that an instance add with too high memory or disk will be rejected
570
prop_Node_addSec node inst pdx =
571
    (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
572
     Instance.dsk inst >= Node.fDsk node) &&
573
    not (Node.failN1 node)
574
    ==> isFailure (Node.addSec node inst pdx)
575
        where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
576

    
577
newtype SmallRatio = SmallRatio Double deriving Show
578
instance Arbitrary SmallRatio where
579
    arbitrary = do
580
      v <- choose (0, 1)
581
      return $ SmallRatio v
582

    
583
-- | Check mdsk setting
584
prop_Node_setMdsk node mx =
585
    Node.loDsk node' >= 0 &&
586
    fromIntegral (Node.loDsk node') <= Node.tDsk node &&
587
    Node.availDisk node' >= 0 &&
588
    Node.availDisk node' <= Node.fDsk node' &&
589
    fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
590
    Node.mDsk node' == mx'
591
    where _types = (node::Node.Node, mx::SmallRatio)
592
          node' = Node.setMdsk node mx'
593
          SmallRatio mx' = mx
594

    
595
-- Check tag maps
596
prop_Node_tagMaps_idempotent tags =
597
    Node.delTags (Node.addTags m tags) tags == m
598
    where m = Data.Map.empty
599

    
600
prop_Node_tagMaps_reject tags =
601
    not (null tags) ==>
602
    any (\t -> Node.rejectAddTags m [t]) tags
603
    where m = Node.addTags Data.Map.empty tags
604

    
605
prop_Node_showField node =
606
  forAll (elements Node.defaultFields) $ \ field ->
607
  fst (Node.showHeader field) /= Types.unknownField &&
608
  Node.showField node field /= Types.unknownField
609

    
610

    
611
prop_Node_computeGroups nodes =
612
  let ng = Node.computeGroups nodes
613
      onlyuuid = map fst ng
614
  in length nodes == sum (map (length . snd) ng) &&
615
     all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
616
     length (nub onlyuuid) == length onlyuuid &&
617
     if null nodes then True else not (null ng)
618

    
619
testNode =
620
    [ run prop_Node_setAlias
621
    , run prop_Node_setOffline
622
    , run prop_Node_setMcpu
623
    , run prop_Node_setXmem
624
    , run prop_Node_addPriFM
625
    , run prop_Node_addPriFD
626
    , run prop_Node_addPriFC
627
    , run prop_Node_addSec
628
    , run prop_Node_setMdsk
629
    , run prop_Node_tagMaps_idempotent
630
    , run prop_Node_tagMaps_reject
631
    , run prop_Node_showField
632
    , run prop_Node_computeGroups
633
    ]
634

    
635

    
636
-- Cluster tests
637

    
638
-- | Check that the cluster score is close to zero for a homogeneous cluster
639
prop_Score_Zero node count =
640
    (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
641
     (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
642
    let fn = Node.buildPeers node Container.empty
643
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
644
        nl = Container.fromAssocList nlst
645
        score = Cluster.compCV nl
646
    -- we can't say == 0 here as the floating point errors accumulate;
647
    -- this should be much lower than the default score in CLI.hs
648
    in score <= 1e-15
649

    
650
-- | Check that cluster stats are sane
651
prop_CStats_sane node count =
652
    (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
653
     (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
654
    let fn = Node.buildPeers node Container.empty
655
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
656
        nl = Container.fromAssocList nlst
657
        cstats = Cluster.totalResources nl
658
    in Cluster.csAdsk cstats >= 0 &&
659
       Cluster.csAdsk cstats <= Cluster.csFdsk cstats
660

    
661
-- | Check that one instance is allocated correctly, without
662
-- rebalances needed
663
prop_ClusterAlloc_sane node inst =
664
    forAll (choose (5, 20)) $ \count ->
665
    not (Node.offline node)
666
            && not (Node.failN1 node)
667
            && Node.availDisk node > 0
668
            && Node.availMem node > 0
669
            ==>
670
    let nl = makeSmallCluster node count
671
        il = Container.empty
672
        rqnodes = 2
673
        inst' = setInstanceSmallerThanNode node inst
674
    in case Cluster.tryAlloc nl il inst' rqnodes of
675
         Types.Bad _ -> False
676
         Types.Ok (_, _, sols3) ->
677
             case sols3 of
678
               [] -> False
679
               (xnl, xi, _, cv):[] ->
680
                   let il' = Container.add (Instance.idx xi) xi il
681
                       tbl = Cluster.Table xnl il' cv []
682
                   in not (canBalance tbl True False)
683
               _ -> False
684

    
685
-- | Checks that on a 2-5 node cluster, we can allocate a random
686
-- instance spec via tiered allocation (whatever the original instance
687
-- spec), on either one or two nodes
688
prop_ClusterCanTieredAlloc node inst =
689
    forAll (choose (2, 5)) $ \count ->
690
    forAll (choose (1, 2)) $ \rqnodes ->
691
    not (Node.offline node)
692
            && not (Node.failN1 node)
693
            && isNodeBig node 4
694
            ==>
695
    let nl = makeSmallCluster node count
696
        il = Container.empty
697
    in case Cluster.tieredAlloc nl il inst rqnodes [] of
698
         Types.Bad _ -> False
699
         Types.Ok (_, _, il', ixes) -> not (null ixes) &&
700
                                      IntMap.size il' == length ixes
701

    
702
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
703
-- we can also evacuate it
704
prop_ClusterAllocEvac node inst =
705
    forAll (choose (4, 8)) $ \count ->
706
    not (Node.offline node)
707
            && not (Node.failN1 node)
708
            && isNodeBig node 4
709
            ==>
710
    let nl = makeSmallCluster node count
711
        il = Container.empty
712
        rqnodes = 2
713
        inst' = setInstanceSmallerThanNode node inst
714
    in case Cluster.tryAlloc nl il inst' rqnodes of
715
         Types.Bad _ -> False
716
         Types.Ok (_, _, sols3) ->
717
             case sols3 of
718
               [] -> False
719
               (xnl, xi, _, _):[] ->
720
                   let sdx = Instance.sNode xi
721
                       il' = Container.add (Instance.idx xi) xi il
722
                   in case Cluster.tryEvac xnl il' [sdx] of
723
                        Just _ -> True
724
                        _ -> False
725
               _ -> False
726

    
727
-- | Check that allocating multiple instances on a cluster, then
728
-- adding an empty node, results in a valid rebalance
729
prop_ClusterAllocBalance node =
730
    forAll (choose (3, 5)) $ \count ->
731
    not (Node.offline node)
732
            && not (Node.failN1 node)
733
            && isNodeBig node 4
734
            && not (isNodeBig node 8)
735
            ==>
736
    let nl = makeSmallCluster node count
737
        (hnode, nl') = IntMap.deleteFindMax nl
738
        il = Container.empty
739
        rqnodes = 2
740
        i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
741
    in case Cluster.iterateAlloc nl' il i_templ rqnodes [] of
742
         Types.Bad _ -> False
743
         Types.Ok (_, xnl, il', _) ->
744
                   let ynl = Container.add (Node.idx hnode) hnode xnl
745
                       cv = Cluster.compCV ynl
746
                       tbl = Cluster.Table ynl il' cv []
747
                   in canBalance tbl True False
748

    
749
-- | Checks consistency
750
prop_ClusterCheckConsistency node inst =
751
  let nl = makeSmallCluster node 3
752
      [node1, node2, node3] = Container.elems nl
753
      node3' = node3 { Node.group = "other-uuid" }
754
      nl' = Container.add (Node.idx node3') node3' nl
755
      inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
756
      inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
757
      inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
758
      ccheck = Cluster.findSplitInstances nl' . Container.fromAssocList
759
  in null (ccheck [(0, inst1)]) &&
760
     null (ccheck [(0, inst2)]) &&
761
     (not . null $ ccheck [(0, inst3)])
762

    
763
-- For now, we only test that we don't lose instances during the split
764
prop_ClusterSplitCluster node inst =
765
  forAll (choose (0, 100)) $ \icnt ->
766
  let nl = makeSmallCluster node 2
767
      (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
768
                   (nl, Container.empty) [1..icnt]
769
      gni = Cluster.splitCluster nl' il'
770
  in sum (map (Container.size . snd . snd) gni) == icnt &&
771
     all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
772
                                 (Container.elems nl'')) gni
773

    
774
testCluster =
775
    [ run prop_Score_Zero
776
    , run prop_CStats_sane
777
    , run prop_ClusterAlloc_sane
778
    , run prop_ClusterCanTieredAlloc
779
    , run prop_ClusterAllocEvac
780
    , run prop_ClusterAllocBalance
781
    , run prop_ClusterCheckConsistency
782
    , run prop_ClusterSplitCluster
783
    ]
784

    
785
-- | Check that opcode serialization is idempotent
786

    
787
prop_OpCodes_serialization op =
788
  case J.readJSON (J.showJSON op) of
789
    J.Error _ -> False
790
    J.Ok op' -> op == op'
791
  where _types = op::OpCodes.OpCode
792

    
793
testOpCodes =
794
  [ run prop_OpCodes_serialization
795
  ]
796

    
797
-- | Check that (queued) job\/opcode status serialization is idempotent
798
prop_OpStatus_serialization os =
799
  case J.readJSON (J.showJSON os) of
800
    J.Error _ -> False
801
    J.Ok os' -> os == os'
802
  where _types = os::Jobs.OpStatus
803

    
804
prop_JobStatus_serialization js =
805
  case J.readJSON (J.showJSON js) of
806
    J.Error _ -> False
807
    J.Ok js' -> js == js'
808
  where _types = js::Jobs.JobStatus
809

    
810
testJobs =
811
  [ run prop_OpStatus_serialization
812
  , run prop_JobStatus_serialization
813
  ]
814

    
815
-- | Loader tests
816

    
817
prop_Loader_lookupNode ktn inst node =
818
  Loader.lookupNode nl inst node == Data.Map.lookup node nl
819
  where nl = Data.Map.fromList ktn
820

    
821
prop_Loader_lookupInstance kti inst =
822
  Loader.lookupInstance il inst == Data.Map.lookup inst il
823
  where il = Data.Map.fromList kti
824

    
825
prop_Loader_assignIndices nodes =
826
  Data.Map.size nassoc == length nodes &&
827
  Container.size kt == length nodes &&
828
  (if not (null nodes)
829
   then maximum (IntMap.keys kt) == length nodes - 1
830
   else True)
831
  where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
832

    
833

    
834
-- | Checks that the number of primary instances recorded on the nodes
835
-- is zero
836
prop_Loader_mergeData ns =
837
  let na = Container.fromAssocList $ map (\n -> (Node.idx n, n)) ns
838
  in case Loader.mergeData [] [] [] (na, Container.empty, []) of
839
    Types.Bad _ -> False
840
    Types.Ok (nl, il, _) ->
841
      let nodes = Container.elems nl
842
          instances = Container.elems il
843
      in (sum . map (length . Node.pList)) nodes == 0 &&
844
         null instances
845

    
846
testLoader =
847
  [ run prop_Loader_lookupNode
848
  , run prop_Loader_lookupInstance
849
  , run prop_Loader_assignIndices
850
  , run prop_Loader_mergeData
851
  ]