Statistics
| Branch: | Tag: | Revision:

root / Ganeti / HTools / QC.hs @ 41b5c85a

History | View | Annotate | Download (30 kB)

1
{-| Unittests for ganeti-htools
2

    
3
-}
4

    
5
{-
6

    
7
Copyright (C) 2009, 2010, 2011 Google Inc.
8

    
9
This program is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 2 of the License, or
12
(at your option) any later version.
13

    
14
This program is distributed in the hope that it will be useful, but
15
WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
General Public License for more details.
18

    
19
You should have received a copy of the GNU General Public License
20
along with this program; if not, write to the Free Software
21
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22
02110-1301, USA.
23

    
24
-}
25

    
26
module Ganeti.HTools.QC
27
    ( testUtils
28
    , testPeerMap
29
    , testContainer
30
    , testInstance
31
    , testNode
32
    , testText
33
    , testOpCodes
34
    , testJobs
35
    , testCluster
36
    , testLoader
37
    ) where
38

    
39
import Test.QuickCheck
40
import Test.QuickCheck.Batch
41
import Data.List (findIndex, intercalate, nub)
42
import Data.Maybe
43
import Control.Monad
44
import qualified Text.JSON as J
45
import qualified Data.Map
46
import qualified Data.IntMap as IntMap
47
import qualified Ganeti.OpCodes as OpCodes
48
import qualified Ganeti.Jobs as Jobs
49
import qualified Ganeti.Luxi
50
import qualified Ganeti.HTools.CLI as CLI
51
import qualified Ganeti.HTools.Cluster as Cluster
52
import qualified Ganeti.HTools.Container as Container
53
import qualified Ganeti.HTools.ExtLoader
54
import qualified Ganeti.HTools.IAlloc as IAlloc
55
import qualified Ganeti.HTools.Instance as Instance
56
import qualified Ganeti.HTools.Loader as Loader
57
import qualified Ganeti.HTools.Luxi
58
import qualified Ganeti.HTools.Node as Node
59
import qualified Ganeti.HTools.Group as Group
60
import qualified Ganeti.HTools.PeerMap as PeerMap
61
import qualified Ganeti.HTools.Rapi
62
import qualified Ganeti.HTools.Simu
63
import qualified Ganeti.HTools.Text as Text
64
import qualified Ganeti.HTools.Types as Types
65
import qualified Ganeti.HTools.Utils as Utils
66
import qualified Ganeti.HTools.Version
67

    
68
-- * Constants
69

    
70
-- | Maximum memory (1TiB, somewhat random value)
71
maxMem :: Int
72
maxMem = 1024 * 1024
73

    
74
-- | Maximum disk (8TiB, somewhat random value)
75
maxDsk :: Int
76
maxDsk = 1024 * 1024 * 8
77

    
78
-- | Max CPUs (1024, somewhat random value)
79
maxCpu :: Int
80
maxCpu = 1024
81

    
82
defGroup :: Group.Group
83
defGroup = flip Group.setIdx 0 $
84
               Group.create "default" Utils.defaultGroupID
85
                    Types.AllocPreferred
86

    
87
defGroupList :: Group.List
88
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
89

    
90
defGroupAssoc :: Data.Map.Map String Types.Gdx
91
defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
92

    
93
-- * Helper functions
94

    
95
-- | Simple checker for whether OpResult is fail or pass
96
isFailure :: Types.OpResult a -> Bool
97
isFailure (Types.OpFail _) = True
98
isFailure _ = False
99

    
100
-- | Update an instance to be smaller than a node
101
setInstanceSmallerThanNode node inst =
102
    inst { Instance.mem = Node.availMem node `div` 2
103
         , Instance.dsk = Node.availDisk node `div` 2
104
         , Instance.vcpus = Node.availCpu node `div` 2
105
         }
106

    
107
-- | Create an instance given its spec
108
createInstance mem dsk vcpus =
109
    Instance.create "inst-unnamed" mem dsk vcpus "running" [] (-1) (-1)
110

    
111
-- | Create a small cluster by repeating a node spec
112
makeSmallCluster :: Node.Node -> Int -> Node.List
113
makeSmallCluster node count =
114
    let fn = Node.buildPeers node Container.empty
115
        namelst = map (\n -> (Node.name n, n)) (replicate count fn)
116
        (_, nlst) = Loader.assignIndices namelst
117
    in nlst
118

    
119
-- | Checks if a node is "big" enough
120
isNodeBig :: Node.Node -> Int -> Bool
121
isNodeBig node size = Node.availDisk node > size * Types.unitDsk
122
                      && Node.availMem node > size * Types.unitMem
123
                      && Node.availCpu node > size * Types.unitCpu
124

    
125
canBalance :: Cluster.Table -> Bool -> Bool -> Bool
126
canBalance tbl dm evac = isJust $ Cluster.tryBalance tbl dm evac 0 0
127

    
128
-- | Assigns a new fresh instance to a cluster; this is not
129
-- allocation, so no resource checks are done
130
assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
131
                  Types.Idx -> Types.Idx ->
132
                  (Node.List, Instance.List)
133
assignInstance nl il inst pdx sdx =
134
  let pnode = Container.find pdx nl
135
      snode = Container.find sdx nl
136
      maxiidx = if Container.null il
137
                then 0
138
                else fst (Container.findMax il) + 1
139
      inst' = inst { Instance.idx = maxiidx,
140
                     Instance.pNode = pdx, Instance.sNode = sdx }
141
      pnode' = Node.setPri pnode inst'
142
      snode' = Node.setSec snode inst'
143
      nl' = Container.addTwo pdx pnode' sdx snode' nl
144
      il' = Container.add maxiidx inst' il
145
  in (nl', il')
146

    
147
-- * Arbitrary instances
148

    
149
-- copied from the introduction to quickcheck
150
instance Arbitrary Char where
151
    arbitrary = choose ('\32', '\128')
152

    
153
newtype DNSChar = DNSChar { dnsGetChar::Char }
154
instance Arbitrary DNSChar where
155
    arbitrary = do
156
      x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
157
      return (DNSChar x)
158

    
159
getName :: Gen String
160
getName = do
161
  n <- choose (1, 64)
162
  dn <- vector n::Gen [DNSChar]
163
  return (map dnsGetChar dn)
164

    
165

    
166
getFQDN :: Gen String
167
getFQDN = do
168
  felem <- getName
169
  ncomps <- choose (1, 4)
170
  frest <- vector ncomps::Gen [[DNSChar]]
171
  let frest' = map (map dnsGetChar) frest
172
  return (felem ++ "." ++ intercalate "." frest')
173

    
174
-- let's generate a random instance
175
instance Arbitrary Instance.Instance where
176
    arbitrary = do
177
      name <- getFQDN
178
      mem <- choose (0, maxMem)
179
      dsk <- choose (0, maxDsk)
180
      run_st <- elements ["ERROR_up", "ERROR_down", "ADMIN_down"
181
                         , "ERROR_nodedown", "ERROR_nodeoffline"
182
                         , "running"
183
                         , "no_such_status1", "no_such_status2"]
184
      pn <- arbitrary
185
      sn <- arbitrary
186
      vcpus <- choose (0, maxCpu)
187
      return $ Instance.create name mem dsk vcpus run_st [] pn sn
188

    
189
-- and a random node
190
instance Arbitrary Node.Node where
191
    arbitrary = do
192
      name <- getFQDN
193
      mem_t <- choose (0, maxMem)
194
      mem_f <- choose (0, mem_t)
195
      mem_n <- choose (0, mem_t - mem_f)
196
      dsk_t <- choose (0, maxDsk)
197
      dsk_f <- choose (0, dsk_t)
198
      cpu_t <- choose (0, maxCpu)
199
      offl <- arbitrary
200
      let n = Node.create name (fromIntegral mem_t) mem_n mem_f
201
              (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl
202
              0
203
          n' = Node.buildPeers n Container.empty
204
      return n'
205

    
206
-- replace disks
207
instance Arbitrary OpCodes.ReplaceDisksMode where
208
  arbitrary = elements [ OpCodes.ReplaceOnPrimary
209
                       , OpCodes.ReplaceOnSecondary
210
                       , OpCodes.ReplaceNewSecondary
211
                       , OpCodes.ReplaceAuto
212
                       ]
213

    
214
instance Arbitrary OpCodes.OpCode where
215
  arbitrary = do
216
    op_id <- elements [ "OP_TEST_DELAY"
217
                      , "OP_INSTANCE_REPLACE_DISKS"
218
                      , "OP_INSTANCE_FAILOVER"
219
                      , "OP_INSTANCE_MIGRATE"
220
                      ]
221
    (case op_id of
222
        "OP_TEST_DELAY" ->
223
          liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
224
        "OP_INSTANCE_REPLACE_DISKS" ->
225
          liftM5 OpCodes.OpReplaceDisks arbitrary arbitrary
226
          arbitrary arbitrary arbitrary
227
        "OP_INSTANCE_FAILOVER" ->
228
          liftM2 OpCodes.OpFailoverInstance arbitrary arbitrary
229
        "OP_INSTANCE_MIGRATE" ->
230
          liftM3 OpCodes.OpMigrateInstance arbitrary arbitrary arbitrary
231
        _ -> fail "Wrong opcode")
232

    
233
instance Arbitrary Jobs.OpStatus where
234
  arbitrary = elements [minBound..maxBound]
235

    
236
instance Arbitrary Jobs.JobStatus where
237
  arbitrary = elements [minBound..maxBound]
238

    
239
-- * Actual tests
240

    
241
-- If the list is not just an empty element, and if the elements do
242
-- not contain commas, then join+split should be idepotent
243
prop_Utils_commaJoinSplit lst = lst /= [""] &&
244
                                all (not . elem ',') lst ==>
245
                                Utils.sepSplit ',' (Utils.commaJoin lst) == lst
246
-- Split and join should always be idempotent
247
prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
248

    
249
testUtils =
250
  [ run prop_Utils_commaJoinSplit
251
  , run prop_Utils_commaSplitJoin
252
  ]
253

    
254
-- | Make sure add is idempotent
255
prop_PeerMap_addIdempotent pmap key em =
256
    fn puniq == fn (fn puniq)
257
    where _types = (pmap::PeerMap.PeerMap,
258
                    key::PeerMap.Key, em::PeerMap.Elem)
259
          fn = PeerMap.add key em
260
          puniq = PeerMap.accumArray const pmap
261

    
262
-- | Make sure remove is idempotent
263
prop_PeerMap_removeIdempotent pmap key =
264
    fn puniq == fn (fn puniq)
265
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
266
          fn = PeerMap.remove key
267
          puniq = PeerMap.accumArray const pmap
268

    
269
-- | Make sure a missing item returns 0
270
prop_PeerMap_findMissing pmap key =
271
    PeerMap.find key (PeerMap.remove key puniq) == 0
272
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
273
          puniq = PeerMap.accumArray const pmap
274

    
275
-- | Make sure an added item is found
276
prop_PeerMap_addFind pmap key em =
277
    PeerMap.find key (PeerMap.add key em puniq) == em
278
    where _types = (pmap::PeerMap.PeerMap,
279
                    key::PeerMap.Key, em::PeerMap.Elem)
280
          puniq = PeerMap.accumArray const pmap
281

    
282
-- | Manual check that maxElem returns the maximum indeed, or 0 for null
283
prop_PeerMap_maxElem pmap =
284
    PeerMap.maxElem puniq == if null puniq then 0
285
                             else (maximum . snd . unzip) puniq
286
    where _types = pmap::PeerMap.PeerMap
287
          puniq = PeerMap.accumArray const pmap
288

    
289
testPeerMap =
290
    [ run prop_PeerMap_addIdempotent
291
    , run prop_PeerMap_removeIdempotent
292
    , run prop_PeerMap_maxElem
293
    , run prop_PeerMap_addFind
294
    , run prop_PeerMap_findMissing
295
    ]
296

    
297
-- Container tests
298

    
299
prop_Container_addTwo cdata i1 i2 =
300
    fn i1 i2 cont == fn i2 i1 cont &&
301
       fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
302
    where _types = (cdata::[Int],
303
                    i1::Int, i2::Int)
304
          cont = foldl (\c x -> Container.add x x c) Container.empty cdata
305
          fn x1 x2 = Container.addTwo x1 x1 x2 x2
306

    
307
prop_Container_nameOf node =
308
  let nl = makeSmallCluster node 1
309
      fnode = head (Container.elems nl)
310
  in Container.nameOf nl (Node.idx fnode) == Node.name fnode
311

    
312
-- We test that in a cluster, given a random node, we can find it by
313
-- its name and alias, as long as all names and aliases are unique,
314
-- and that we fail to find a non-existing name
315
prop_Container_findByName node othername =
316
  forAll (choose (1, 20)) $ \ cnt ->
317
  forAll (choose (0, cnt - 1)) $ \ fidx ->
318
  forAll (vector cnt) $ \ names ->
319
  (length . nub) (map fst names ++ map snd names) ==
320
  length names * 2 &&
321
  not (othername `elem` (map fst names ++ map snd names)) ==>
322
  let nl = makeSmallCluster node cnt
323
      nodes = Container.elems nl
324
      nodes' = map (\((name, alias), nn) -> (Node.idx nn,
325
                                             nn { Node.name = name,
326
                                                  Node.alias = alias }))
327
               $ zip names nodes
328
      nl' = Container.fromList nodes'
329
      target = snd (nodes' !! fidx)
330
  in Container.findByName nl' (Node.name target) == Just target &&
331
     Container.findByName nl' (Node.alias target) == Just target &&
332
     Container.findByName nl' othername == Nothing
333

    
334
testContainer =
335
    [ run prop_Container_addTwo
336
    , run prop_Container_nameOf
337
    , run prop_Container_findByName
338
    ]
339

    
340
-- Simple instance tests, we only have setter/getters
341

    
342
prop_Instance_creat inst =
343
    Instance.name inst == Instance.alias inst
344

    
345
prop_Instance_setIdx inst idx =
346
    Instance.idx (Instance.setIdx inst idx) == idx
347
    where _types = (inst::Instance.Instance, idx::Types.Idx)
348

    
349
prop_Instance_setName inst name =
350
    Instance.name newinst == name &&
351
    Instance.alias newinst == name
352
    where _types = (inst::Instance.Instance, name::String)
353
          newinst = Instance.setName inst name
354

    
355
prop_Instance_setAlias inst name =
356
    Instance.name newinst == Instance.name inst &&
357
    Instance.alias newinst == name
358
    where _types = (inst::Instance.Instance, name::String)
359
          newinst = Instance.setAlias inst name
360

    
361
prop_Instance_setPri inst pdx =
362
    Instance.pNode (Instance.setPri inst pdx) == pdx
363
    where _types = (inst::Instance.Instance, pdx::Types.Ndx)
364

    
365
prop_Instance_setSec inst sdx =
366
    Instance.sNode (Instance.setSec inst sdx) == sdx
367
    where _types = (inst::Instance.Instance, sdx::Types.Ndx)
368

    
369
prop_Instance_setBoth inst pdx sdx =
370
    Instance.pNode si == pdx && Instance.sNode si == sdx
371
    where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
372
          si = Instance.setBoth inst pdx sdx
373

    
374
prop_Instance_runStatus_True inst =
375
    let run_st = Instance.running inst
376
        run_tx = Instance.runSt inst
377
    in
378
      run_tx `elem` Instance.runningStates ==> run_st
379

    
380
prop_Instance_runStatus_False inst =
381
    let run_st = Instance.running inst
382
        run_tx = Instance.runSt inst
383
    in
384
      run_tx `notElem` Instance.runningStates ==> not run_st
385

    
386
prop_Instance_shrinkMG inst =
387
    Instance.mem inst >= 2 * Types.unitMem ==>
388
        case Instance.shrinkByType inst Types.FailMem of
389
          Types.Ok inst' ->
390
              Instance.mem inst' == Instance.mem inst - Types.unitMem
391
          _ -> False
392

    
393
prop_Instance_shrinkMF inst =
394
    Instance.mem inst < 2 * Types.unitMem ==>
395
        Types.isBad $ Instance.shrinkByType inst Types.FailMem
396

    
397
prop_Instance_shrinkCG inst =
398
    Instance.vcpus inst >= 2 * Types.unitCpu ==>
399
        case Instance.shrinkByType inst Types.FailCPU of
400
          Types.Ok inst' ->
401
              Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
402
          _ -> False
403

    
404
prop_Instance_shrinkCF inst =
405
    Instance.vcpus inst < 2 * Types.unitCpu ==>
406
        Types.isBad $ Instance.shrinkByType inst Types.FailCPU
407

    
408
prop_Instance_shrinkDG inst =
409
    Instance.dsk inst >= 2 * Types.unitDsk ==>
410
        case Instance.shrinkByType inst Types.FailDisk of
411
          Types.Ok inst' ->
412
              Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
413
          _ -> False
414

    
415
prop_Instance_shrinkDF inst =
416
    Instance.dsk inst < 2 * Types.unitDsk ==>
417
        Types.isBad $ Instance.shrinkByType inst Types.FailDisk
418

    
419
prop_Instance_setMovable inst m =
420
    Instance.movable inst' == m
421
    where inst' = Instance.setMovable inst m
422

    
423
testInstance =
424
    [ run prop_Instance_creat
425
    , run prop_Instance_setIdx
426
    , run prop_Instance_setName
427
    , run prop_Instance_setAlias
428
    , run prop_Instance_setPri
429
    , run prop_Instance_setSec
430
    , run prop_Instance_setBoth
431
    , run prop_Instance_runStatus_True
432
    , run prop_Instance_runStatus_False
433
    , run prop_Instance_shrinkMG
434
    , run prop_Instance_shrinkMF
435
    , run prop_Instance_shrinkCG
436
    , run prop_Instance_shrinkCF
437
    , run prop_Instance_shrinkDG
438
    , run prop_Instance_shrinkDF
439
    , run prop_Instance_setMovable
440
    ]
441

    
442
-- Instance text loader tests
443

    
444
prop_Text_Load_Instance name mem dsk vcpus status pnode snode pdx sdx =
445
    not (null pnode) && pdx >= 0 && sdx >= 0 ==>
446
    let vcpus_s = show vcpus
447
        dsk_s = show dsk
448
        mem_s = show mem
449
        rsdx = if pdx == sdx
450
               then sdx + 1
451
               else sdx
452
        ndx = if null snode
453
              then [(pnode, pdx)]
454
              else [(pnode, pdx), (snode, rsdx)]
455
        nl = Data.Map.fromList ndx
456
        tags = ""
457
        inst = Text.loadInst nl
458
               [name, mem_s, dsk_s, vcpus_s, status, pnode, snode, tags]::
459
               Maybe (String, Instance.Instance)
460
        fail1 = Text.loadInst nl
461
               [name, mem_s, dsk_s, vcpus_s, status, pnode, pnode, tags]::
462
               Maybe (String, Instance.Instance)
463
        _types = ( name::String, mem::Int, dsk::Int
464
                 , vcpus::Int, status::String
465
                 , pnode::String, snode::String
466
                 , pdx::Types.Ndx, sdx::Types.Ndx)
467
    in
468
      case inst of
469
        Nothing -> False
470
        Just (_, i) ->
471
            (Instance.name i == name &&
472
             Instance.vcpus i == vcpus &&
473
             Instance.mem i == mem &&
474
             Instance.pNode i == pdx &&
475
             Instance.sNode i == (if null snode
476
                                  then Node.noSecondary
477
                                  else rsdx) &&
478
             isNothing fail1)
479

    
480
prop_Text_Load_InstanceFail ktn fields =
481
    length fields /= 8 ==> isNothing $ Text.loadInst nl fields
482
    where nl = Data.Map.fromList ktn
483

    
484
prop_Text_Load_Node name tm nm fm td fd tc fo =
485
    let conv v = if v < 0
486
                    then "?"
487
                    else show v
488
        tm_s = conv tm
489
        nm_s = conv nm
490
        fm_s = conv fm
491
        td_s = conv td
492
        fd_s = conv fd
493
        tc_s = conv tc
494
        fo_s = if fo
495
               then "Y"
496
               else "N"
497
        any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
498
        gid = Group.uuid defGroup
499
    in case Text.loadNode defGroupAssoc
500
           [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
501
         Nothing -> False
502
         Just (name', node) ->
503
             if fo || any_broken
504
             then Node.offline node
505
             else Node.name node == name' && name' == name &&
506
                  Node.alias node == name &&
507
                  Node.tMem node == fromIntegral tm &&
508
                  Node.nMem node == nm &&
509
                  Node.fMem node == fm &&
510
                  Node.tDsk node == fromIntegral td &&
511
                  Node.fDsk node == fd &&
512
                  Node.tCpu node == fromIntegral tc
513

    
514
prop_Text_Load_NodeFail fields =
515
    length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
516

    
517
prop_Text_NodeLSIdempotent node =
518
    (Text.loadNode defGroupAssoc.
519
         Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
520
    Just (Node.name n, n)
521
    -- override failN1 to what loadNode returns by default
522
    where n = node { Node.failN1 = True, Node.offline = False }
523

    
524
testText =
525
    [ run prop_Text_Load_Instance
526
    , run prop_Text_Load_InstanceFail
527
    , run prop_Text_Load_Node
528
    , run prop_Text_Load_NodeFail
529
    , run prop_Text_NodeLSIdempotent
530
    ]
531

    
532
-- Node tests
533

    
534
prop_Node_setAlias node name =
535
    Node.name newnode == Node.name node &&
536
    Node.alias newnode == name
537
    where _types = (node::Node.Node, name::String)
538
          newnode = Node.setAlias node name
539

    
540
prop_Node_setOffline node status =
541
    Node.offline newnode == status
542
    where newnode = Node.setOffline node status
543

    
544
prop_Node_setXmem node xm =
545
    Node.xMem newnode == xm
546
    where newnode = Node.setXmem node xm
547

    
548
prop_Node_setMcpu node mc =
549
    Node.mCpu newnode == mc
550
    where newnode = Node.setMcpu node mc
551

    
552
-- | Check that an instance add with too high memory or disk will be rejected
553
prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
554
                               not (Node.failN1 node)
555
                               ==>
556
                               case Node.addPri node inst'' of
557
                                 Types.OpFail Types.FailMem -> True
558
                                 _ -> False
559
    where _types = (node::Node.Node, inst::Instance.Instance)
560
          inst' = setInstanceSmallerThanNode node inst
561
          inst'' = inst' { Instance.mem = Instance.mem inst }
562

    
563
prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
564
                               not (Node.failN1 node)
565
                               ==>
566
                               case Node.addPri node inst'' of
567
                                 Types.OpFail Types.FailDisk -> True
568
                                 _ -> False
569
    where _types = (node::Node.Node, inst::Instance.Instance)
570
          inst' = setInstanceSmallerThanNode node inst
571
          inst'' = inst' { Instance.dsk = Instance.dsk inst }
572

    
573
prop_Node_addPriFC node inst = Instance.vcpus inst > Node.availCpu node &&
574
                               not (Node.failN1 node)
575
                               ==>
576
                               case Node.addPri node inst'' of
577
                                 Types.OpFail Types.FailCPU -> True
578
                                 _ -> False
579
    where _types = (node::Node.Node, inst::Instance.Instance)
580
          inst' = setInstanceSmallerThanNode node inst
581
          inst'' = inst' { Instance.vcpus = Instance.vcpus inst }
582

    
583
-- | Check that an instance add with too high memory or disk will be rejected
584
prop_Node_addSec node inst pdx =
585
    (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
586
     Instance.dsk inst >= Node.fDsk node) &&
587
    not (Node.failN1 node)
588
    ==> isFailure (Node.addSec node inst pdx)
589
        where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
590

    
591
newtype SmallRatio = SmallRatio Double deriving Show
592
instance Arbitrary SmallRatio where
593
    arbitrary = do
594
      v <- choose (0, 1)
595
      return $ SmallRatio v
596

    
597
-- | Check mdsk setting
598
prop_Node_setMdsk node mx =
599
    Node.loDsk node' >= 0 &&
600
    fromIntegral (Node.loDsk node') <= Node.tDsk node &&
601
    Node.availDisk node' >= 0 &&
602
    Node.availDisk node' <= Node.fDsk node' &&
603
    fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
604
    Node.mDsk node' == mx'
605
    where _types = (node::Node.Node, mx::SmallRatio)
606
          node' = Node.setMdsk node mx'
607
          SmallRatio mx' = mx
608

    
609
-- Check tag maps
610
prop_Node_tagMaps_idempotent tags =
611
    Node.delTags (Node.addTags m tags) tags == m
612
    where m = Data.Map.empty
613

    
614
prop_Node_tagMaps_reject tags =
615
    not (null tags) ==>
616
    any (\t -> Node.rejectAddTags m [t]) tags
617
    where m = Node.addTags Data.Map.empty tags
618

    
619
prop_Node_showField node =
620
  forAll (elements Node.defaultFields) $ \ field ->
621
  fst (Node.showHeader field) /= Types.unknownField &&
622
  Node.showField node field /= Types.unknownField
623

    
624

    
625
prop_Node_computeGroups nodes =
626
  let ng = Node.computeGroups nodes
627
      onlyuuid = map fst ng
628
  in length nodes == sum (map (length . snd) ng) &&
629
     all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
630
     length (nub onlyuuid) == length onlyuuid &&
631
     if null nodes then True else not (null ng)
632

    
633
testNode =
634
    [ run prop_Node_setAlias
635
    , run prop_Node_setOffline
636
    , run prop_Node_setMcpu
637
    , run prop_Node_setXmem
638
    , run prop_Node_addPriFM
639
    , run prop_Node_addPriFD
640
    , run prop_Node_addPriFC
641
    , run prop_Node_addSec
642
    , run prop_Node_setMdsk
643
    , run prop_Node_tagMaps_idempotent
644
    , run prop_Node_tagMaps_reject
645
    , run prop_Node_showField
646
    , run prop_Node_computeGroups
647
    ]
648

    
649

    
650
-- Cluster tests
651

    
652
-- | Check that the cluster score is close to zero for a homogeneous cluster
653
prop_Score_Zero node count =
654
    (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
655
     (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
656
    let fn = Node.buildPeers node Container.empty
657
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
658
        nl = Container.fromList nlst
659
        score = Cluster.compCV nl
660
    -- we can't say == 0 here as the floating point errors accumulate;
661
    -- this should be much lower than the default score in CLI.hs
662
    in score <= 1e-15
663

    
664
-- | Check that cluster stats are sane
665
prop_CStats_sane node count =
666
    (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
667
     (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
668
    let fn = Node.buildPeers node Container.empty
669
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
670
        nl = Container.fromList nlst
671
        cstats = Cluster.totalResources nl
672
    in Cluster.csAdsk cstats >= 0 &&
673
       Cluster.csAdsk cstats <= Cluster.csFdsk cstats
674

    
675
-- | Check that one instance is allocated correctly, without
676
-- rebalances needed
677
prop_ClusterAlloc_sane node inst =
678
    forAll (choose (5, 20)) $ \count ->
679
    not (Node.offline node)
680
            && not (Node.failN1 node)
681
            && Node.availDisk node > 0
682
            && Node.availMem node > 0
683
            ==>
684
    let nl = makeSmallCluster node count
685
        il = Container.empty
686
        inst' = setInstanceSmallerThanNode node inst
687
    in case Cluster.genAllocNodes nl 2 >>= Cluster.tryAlloc nl il inst' of
688
         Types.Bad _ -> False
689
         Types.Ok as ->
690
             case Cluster.asSolutions as of
691
               [] -> False
692
               (xnl, xi, _, cv):[] ->
693
                   let il' = Container.add (Instance.idx xi) xi il
694
                       tbl = Cluster.Table xnl il' cv []
695
                   in not (canBalance tbl True False)
696
               _ -> False
697

    
698
-- | Checks that on a 2-5 node cluster, we can allocate a random
699
-- instance spec via tiered allocation (whatever the original instance
700
-- spec), on either one or two nodes
701
prop_ClusterCanTieredAlloc node inst =
702
    forAll (choose (2, 5)) $ \count ->
703
    forAll (choose (1, 2)) $ \rqnodes ->
704
    not (Node.offline node)
705
            && not (Node.failN1 node)
706
            && isNodeBig node 4
707
            ==>
708
    let nl = makeSmallCluster node count
709
        il = Container.empty
710
        allocnodes = Cluster.genAllocNodes nl rqnodes
711
    in case allocnodes >>= \allocnodes' ->
712
        Cluster.tieredAlloc nl il inst allocnodes' [] [] of
713
         Types.Bad _ -> False
714
         Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
715
                                      IntMap.size il' == length ixes &&
716
                                      length ixes == length cstats
717

    
718
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
719
-- we can also evacuate it
720
prop_ClusterAllocEvac node inst =
721
    forAll (choose (4, 8)) $ \count ->
722
    not (Node.offline node)
723
            && not (Node.failN1 node)
724
            && isNodeBig node 4
725
            ==>
726
    let nl = makeSmallCluster node count
727
        il = Container.empty
728
        inst' = setInstanceSmallerThanNode node inst
729
    in case Cluster.genAllocNodes nl 2 >>= Cluster.tryAlloc nl il inst' of
730
         Types.Bad _ -> False
731
         Types.Ok as ->
732
             case Cluster.asSolutions as of
733
               [] -> False
734
               (xnl, xi, _, _):[] ->
735
                   let sdx = Instance.sNode xi
736
                       il' = Container.add (Instance.idx xi) xi il
737
                   in case Cluster.tryEvac xnl il' [Instance.idx xi] [sdx] of
738
                        Just _ -> True
739
                        _ -> False
740
               _ -> False
741

    
742
-- | Check that allocating multiple instances on a cluster, then
743
-- adding an empty node, results in a valid rebalance
744
prop_ClusterAllocBalance node =
745
    forAll (choose (3, 5)) $ \count ->
746
    not (Node.offline node)
747
            && not (Node.failN1 node)
748
            && isNodeBig node 4
749
            && not (isNodeBig node 8)
750
            ==>
751
    let nl = makeSmallCluster node count
752
        (hnode, nl') = IntMap.deleteFindMax nl
753
        il = Container.empty
754
        allocnodes = Cluster.genAllocNodes nl' 2
755
        i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
756
    in case allocnodes >>= \allocnodes' ->
757
        Cluster.iterateAlloc nl' il i_templ allocnodes' [] [] of
758
         Types.Bad _ -> False
759
         Types.Ok (_, xnl, il', _, _) ->
760
                   let ynl = Container.add (Node.idx hnode) hnode xnl
761
                       cv = Cluster.compCV ynl
762
                       tbl = Cluster.Table ynl il' cv []
763
                   in canBalance tbl True False
764

    
765
-- | Checks consistency
766
prop_ClusterCheckConsistency node inst =
767
  let nl = makeSmallCluster node 3
768
      [node1, node2, node3] = Container.elems nl
769
      node3' = node3 { Node.group = 1 }
770
      nl' = Container.add (Node.idx node3') node3' nl
771
      inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
772
      inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
773
      inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
774
      ccheck = Cluster.findSplitInstances nl' . Container.fromList
775
  in null (ccheck [(0, inst1)]) &&
776
     null (ccheck [(0, inst2)]) &&
777
     (not . null $ ccheck [(0, inst3)])
778

    
779
-- For now, we only test that we don't lose instances during the split
780
prop_ClusterSplitCluster node inst =
781
  forAll (choose (0, 100)) $ \icnt ->
782
  let nl = makeSmallCluster node 2
783
      (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
784
                   (nl, Container.empty) [1..icnt]
785
      gni = Cluster.splitCluster nl' il'
786
  in sum (map (Container.size . snd . snd) gni) == icnt &&
787
     all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
788
                                 (Container.elems nl'')) gni
789

    
790
testCluster =
791
    [ run prop_Score_Zero
792
    , run prop_CStats_sane
793
    , run prop_ClusterAlloc_sane
794
    , run prop_ClusterCanTieredAlloc
795
    , run prop_ClusterAllocEvac
796
    , run prop_ClusterAllocBalance
797
    , run prop_ClusterCheckConsistency
798
    , run prop_ClusterSplitCluster
799
    ]
800

    
801
-- | Check that opcode serialization is idempotent
802

    
803
prop_OpCodes_serialization op =
804
  case J.readJSON (J.showJSON op) of
805
    J.Error _ -> False
806
    J.Ok op' -> op == op'
807
  where _types = op::OpCodes.OpCode
808

    
809
testOpCodes =
810
  [ run prop_OpCodes_serialization
811
  ]
812

    
813
-- | Check that (queued) job\/opcode status serialization is idempotent
814
prop_OpStatus_serialization os =
815
  case J.readJSON (J.showJSON os) of
816
    J.Error _ -> False
817
    J.Ok os' -> os == os'
818
  where _types = os::Jobs.OpStatus
819

    
820
prop_JobStatus_serialization js =
821
  case J.readJSON (J.showJSON js) of
822
    J.Error _ -> False
823
    J.Ok js' -> js == js'
824
  where _types = js::Jobs.JobStatus
825

    
826
testJobs =
827
  [ run prop_OpStatus_serialization
828
  , run prop_JobStatus_serialization
829
  ]
830

    
831
-- | Loader tests
832

    
833
prop_Loader_lookupNode ktn inst node =
834
  Loader.lookupNode nl inst node == Data.Map.lookup node nl
835
  where nl = Data.Map.fromList ktn
836

    
837
prop_Loader_lookupInstance kti inst =
838
  Loader.lookupInstance il inst == Data.Map.lookup inst il
839
  where il = Data.Map.fromList kti
840

    
841
prop_Loader_assignIndices nodes =
842
  Data.Map.size nassoc == length nodes &&
843
  Container.size kt == length nodes &&
844
  (if not (null nodes)
845
   then maximum (IntMap.keys kt) == length nodes - 1
846
   else True)
847
  where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
848

    
849

    
850
-- | Checks that the number of primary instances recorded on the nodes
851
-- is zero
852
prop_Loader_mergeData ns =
853
  let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
854
  in case Loader.mergeData [] [] []
855
         (Loader.emptyCluster {Loader.cdNodes = na}) of
856
    Types.Bad _ -> False
857
    Types.Ok (Loader.ClusterData _ nl il _) ->
858
      let nodes = Container.elems nl
859
          instances = Container.elems il
860
      in (sum . map (length . Node.pList)) nodes == 0 &&
861
         null instances
862

    
863
testLoader =
864
  [ run prop_Loader_lookupNode
865
  , run prop_Loader_lookupInstance
866
  , run prop_Loader_assignIndices
867
  , run prop_Loader_mergeData
868
  ]