Statistics
| Branch: | Tag: | Revision:

root / htools / Ganeti / HTools / QC.hs @ 9cbc1edb

History | View | Annotate | Download (32.2 kB)

1
{-| Unittests for ganeti-htools
2

    
3
-}
4

    
5
{-
6

    
7
Copyright (C) 2009, 2010, 2011 Google Inc.
8

    
9
This program is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 2 of the License, or
12
(at your option) any later version.
13

    
14
This program is distributed in the hope that it will be useful, but
15
WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
General Public License for more details.
18

    
19
You should have received a copy of the GNU General Public License
20
along with this program; if not, write to the Free Software
21
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22
02110-1301, USA.
23

    
24
-}
25

    
26
module Ganeti.HTools.QC
27
    ( testUtils
28
    , testPeerMap
29
    , testContainer
30
    , testInstance
31
    , testNode
32
    , testText
33
    , testOpCodes
34
    , testJobs
35
    , testCluster
36
    , testLoader
37
    ) where
38

    
39
import Test.QuickCheck
40
import Test.QuickCheck.Batch
41
import Data.List (findIndex, intercalate, nub, isPrefixOf)
42
import Data.Maybe
43
import Control.Monad
44
import qualified Text.JSON as J
45
import qualified Data.Map
46
import qualified Data.IntMap as IntMap
47
import qualified Ganeti.OpCodes as OpCodes
48
import qualified Ganeti.Jobs as Jobs
49
import qualified Ganeti.Luxi
50
import qualified Ganeti.HTools.CLI as CLI
51
import qualified Ganeti.HTools.Cluster as Cluster
52
import qualified Ganeti.HTools.Container as Container
53
import qualified Ganeti.HTools.ExtLoader
54
import qualified Ganeti.HTools.IAlloc as IAlloc
55
import qualified Ganeti.HTools.Instance as Instance
56
import qualified Ganeti.HTools.Loader as Loader
57
import qualified Ganeti.HTools.Luxi
58
import qualified Ganeti.HTools.Node as Node
59
import qualified Ganeti.HTools.Group as Group
60
import qualified Ganeti.HTools.PeerMap as PeerMap
61
import qualified Ganeti.HTools.Rapi
62
import qualified Ganeti.HTools.Simu
63
import qualified Ganeti.HTools.Text as Text
64
import qualified Ganeti.HTools.Types as Types
65
import qualified Ganeti.HTools.Utils as Utils
66
import qualified Ganeti.HTools.Version
67

    
68
-- * Constants
69

    
70
-- | Maximum memory (1TiB, somewhat random value)
71
maxMem :: Int
72
maxMem = 1024 * 1024
73

    
74
-- | Maximum disk (8TiB, somewhat random value)
75
maxDsk :: Int
76
maxDsk = 1024 * 1024 * 8
77

    
78
-- | Max CPUs (1024, somewhat random value)
79
maxCpu :: Int
80
maxCpu = 1024
81

    
82
defGroup :: Group.Group
83
defGroup = flip Group.setIdx 0 $
84
               Group.create "default" Utils.defaultGroupID
85
                    Types.AllocPreferred
86

    
87
defGroupList :: Group.List
88
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
89

    
90
defGroupAssoc :: Data.Map.Map String Types.Gdx
91
defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
92

    
93
-- * Helper functions
94

    
95
-- | Simple checker for whether OpResult is fail or pass
96
isFailure :: Types.OpResult a -> Bool
97
isFailure (Types.OpFail _) = True
98
isFailure _ = False
99

    
100
-- | Update an instance to be smaller than a node
101
setInstanceSmallerThanNode node inst =
102
    inst { Instance.mem = Node.availMem node `div` 2
103
         , Instance.dsk = Node.availDisk node `div` 2
104
         , Instance.vcpus = Node.availCpu node `div` 2
105
         }
106

    
107
-- | Create an instance given its spec
108
createInstance mem dsk vcpus =
109
    Instance.create "inst-unnamed" mem dsk vcpus "running" [] True (-1) (-1)
110

    
111
-- | Create a small cluster by repeating a node spec
112
makeSmallCluster :: Node.Node -> Int -> Node.List
113
makeSmallCluster node count =
114
    let fn = Node.buildPeers node Container.empty
115
        namelst = map (\n -> (Node.name n, n)) (replicate count fn)
116
        (_, nlst) = Loader.assignIndices namelst
117
    in nlst
118

    
119
-- | Checks if a node is "big" enough
120
isNodeBig :: Node.Node -> Int -> Bool
121
isNodeBig node size = Node.availDisk node > size * Types.unitDsk
122
                      && Node.availMem node > size * Types.unitMem
123
                      && Node.availCpu node > size * Types.unitCpu
124

    
125
canBalance :: Cluster.Table -> Bool -> Bool -> Bool
126
canBalance tbl dm evac = isJust $ Cluster.tryBalance tbl dm evac 0 0
127

    
128
-- | Assigns a new fresh instance to a cluster; this is not
129
-- allocation, so no resource checks are done
130
assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
131
                  Types.Idx -> Types.Idx ->
132
                  (Node.List, Instance.List)
133
assignInstance nl il inst pdx sdx =
134
  let pnode = Container.find pdx nl
135
      snode = Container.find sdx nl
136
      maxiidx = if Container.null il
137
                then 0
138
                else fst (Container.findMax il) + 1
139
      inst' = inst { Instance.idx = maxiidx,
140
                     Instance.pNode = pdx, Instance.sNode = sdx }
141
      pnode' = Node.setPri pnode inst'
142
      snode' = Node.setSec snode inst'
143
      nl' = Container.addTwo pdx pnode' sdx snode' nl
144
      il' = Container.add maxiidx inst' il
145
  in (nl', il')
146

    
147
-- * Arbitrary instances
148

    
149
-- copied from the introduction to quickcheck
150
instance Arbitrary Char where
151
    arbitrary = choose ('\32', '\128')
152

    
153
newtype DNSChar = DNSChar { dnsGetChar::Char }
154
instance Arbitrary DNSChar where
155
    arbitrary = do
156
      x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
157
      return (DNSChar x)
158

    
159
getName :: Gen String
160
getName = do
161
  n <- choose (1, 64)
162
  dn <- vector n::Gen [DNSChar]
163
  return (map dnsGetChar dn)
164

    
165

    
166
getFQDN :: Gen String
167
getFQDN = do
168
  felem <- getName
169
  ncomps <- choose (1, 4)
170
  frest <- vector ncomps::Gen [[DNSChar]]
171
  let frest' = map (map dnsGetChar) frest
172
  return (felem ++ "." ++ intercalate "." frest')
173

    
174
-- let's generate a random instance
175
instance Arbitrary Instance.Instance where
176
    arbitrary = do
177
      name <- getFQDN
178
      mem <- choose (0, maxMem)
179
      dsk <- choose (0, maxDsk)
180
      run_st <- elements ["ERROR_up", "ERROR_down", "ADMIN_down"
181
                         , "ERROR_nodedown", "ERROR_nodeoffline"
182
                         , "running"
183
                         , "no_such_status1", "no_such_status2"]
184
      pn <- arbitrary
185
      sn <- arbitrary
186
      vcpus <- choose (0, maxCpu)
187
      return $ Instance.create name mem dsk vcpus run_st [] True pn sn
188

    
189
-- and a random node
190
instance Arbitrary Node.Node where
191
    arbitrary = do
192
      name <- getFQDN
193
      mem_t <- choose (0, maxMem)
194
      mem_f <- choose (0, mem_t)
195
      mem_n <- choose (0, mem_t - mem_f)
196
      dsk_t <- choose (0, maxDsk)
197
      dsk_f <- choose (0, dsk_t)
198
      cpu_t <- choose (0, maxCpu)
199
      offl <- arbitrary
200
      let n = Node.create name (fromIntegral mem_t) mem_n mem_f
201
              (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl
202
              0
203
          n' = Node.buildPeers n Container.empty
204
      return n'
205

    
206
-- replace disks
207
instance Arbitrary OpCodes.ReplaceDisksMode where
208
  arbitrary = elements [ OpCodes.ReplaceOnPrimary
209
                       , OpCodes.ReplaceOnSecondary
210
                       , OpCodes.ReplaceNewSecondary
211
                       , OpCodes.ReplaceAuto
212
                       ]
213

    
214
instance Arbitrary OpCodes.OpCode where
215
  arbitrary = do
216
    op_id <- elements [ "OP_TEST_DELAY"
217
                      , "OP_INSTANCE_REPLACE_DISKS"
218
                      , "OP_INSTANCE_FAILOVER"
219
                      , "OP_INSTANCE_MIGRATE"
220
                      ]
221
    (case op_id of
222
        "OP_TEST_DELAY" ->
223
          liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
224
        "OP_INSTANCE_REPLACE_DISKS" ->
225
          liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
226
          arbitrary arbitrary arbitrary
227
        "OP_INSTANCE_FAILOVER" ->
228
          liftM2 OpCodes.OpInstanceFailover arbitrary arbitrary
229
        "OP_INSTANCE_MIGRATE" ->
230
          liftM4 OpCodes.OpInstanceMigrate arbitrary arbitrary arbitrary
231
          arbitrary
232
        _ -> fail "Wrong opcode")
233

    
234
instance Arbitrary Jobs.OpStatus where
235
  arbitrary = elements [minBound..maxBound]
236

    
237
instance Arbitrary Jobs.JobStatus where
238
  arbitrary = elements [minBound..maxBound]
239

    
240
-- * Actual tests
241

    
242
-- If the list is not just an empty element, and if the elements do
243
-- not contain commas, then join+split should be idepotent
244
prop_Utils_commaJoinSplit lst = lst /= [""] &&
245
                                all (not . elem ',') lst ==>
246
                                Utils.sepSplit ',' (Utils.commaJoin lst) == lst
247
-- Split and join should always be idempotent
248
prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
249

    
250
-- | fromObjWithDefault, we test using the Maybe monad and an integer
251
-- value
252
prop_Utils_fromObjWithDefault def_value random_key =
253
    -- a missing key will be returned with the default
254
    Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
255
    -- a found key will be returned as is, not with default
256
    Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
257
         random_key (def_value+1) == Just def_value
258
        where _types = (def_value :: Integer)
259

    
260
testUtils =
261
  [ run prop_Utils_commaJoinSplit
262
  , run prop_Utils_commaSplitJoin
263
  , run prop_Utils_fromObjWithDefault
264
  ]
265

    
266
-- | Make sure add is idempotent
267
prop_PeerMap_addIdempotent pmap key em =
268
    fn puniq == fn (fn puniq)
269
    where _types = (pmap::PeerMap.PeerMap,
270
                    key::PeerMap.Key, em::PeerMap.Elem)
271
          fn = PeerMap.add key em
272
          puniq = PeerMap.accumArray const pmap
273

    
274
-- | Make sure remove is idempotent
275
prop_PeerMap_removeIdempotent pmap key =
276
    fn puniq == fn (fn puniq)
277
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
278
          fn = PeerMap.remove key
279
          puniq = PeerMap.accumArray const pmap
280

    
281
-- | Make sure a missing item returns 0
282
prop_PeerMap_findMissing pmap key =
283
    PeerMap.find key (PeerMap.remove key puniq) == 0
284
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
285
          puniq = PeerMap.accumArray const pmap
286

    
287
-- | Make sure an added item is found
288
prop_PeerMap_addFind pmap key em =
289
    PeerMap.find key (PeerMap.add key em puniq) == em
290
    where _types = (pmap::PeerMap.PeerMap,
291
                    key::PeerMap.Key, em::PeerMap.Elem)
292
          puniq = PeerMap.accumArray const pmap
293

    
294
-- | Manual check that maxElem returns the maximum indeed, or 0 for null
295
prop_PeerMap_maxElem pmap =
296
    PeerMap.maxElem puniq == if null puniq then 0
297
                             else (maximum . snd . unzip) puniq
298
    where _types = pmap::PeerMap.PeerMap
299
          puniq = PeerMap.accumArray const pmap
300

    
301
testPeerMap =
302
    [ run prop_PeerMap_addIdempotent
303
    , run prop_PeerMap_removeIdempotent
304
    , run prop_PeerMap_maxElem
305
    , run prop_PeerMap_addFind
306
    , run prop_PeerMap_findMissing
307
    ]
308

    
309
-- Container tests
310

    
311
prop_Container_addTwo cdata i1 i2 =
312
    fn i1 i2 cont == fn i2 i1 cont &&
313
       fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
314
    where _types = (cdata::[Int],
315
                    i1::Int, i2::Int)
316
          cont = foldl (\c x -> Container.add x x c) Container.empty cdata
317
          fn x1 x2 = Container.addTwo x1 x1 x2 x2
318

    
319
prop_Container_nameOf node =
320
  let nl = makeSmallCluster node 1
321
      fnode = head (Container.elems nl)
322
  in Container.nameOf nl (Node.idx fnode) == Node.name fnode
323

    
324
-- We test that in a cluster, given a random node, we can find it by
325
-- its name and alias, as long as all names and aliases are unique,
326
-- and that we fail to find a non-existing name
327
prop_Container_findByName node othername =
328
  forAll (choose (1, 20)) $ \ cnt ->
329
  forAll (choose (0, cnt - 1)) $ \ fidx ->
330
  forAll (vector cnt) $ \ names ->
331
  (length . nub) (map fst names ++ map snd names) ==
332
  length names * 2 &&
333
  not (othername `elem` (map fst names ++ map snd names)) ==>
334
  let nl = makeSmallCluster node cnt
335
      nodes = Container.elems nl
336
      nodes' = map (\((name, alias), nn) -> (Node.idx nn,
337
                                             nn { Node.name = name,
338
                                                  Node.alias = alias }))
339
               $ zip names nodes
340
      nl' = Container.fromList nodes'
341
      target = snd (nodes' !! fidx)
342
  in Container.findByName nl' (Node.name target) == Just target &&
343
     Container.findByName nl' (Node.alias target) == Just target &&
344
     Container.findByName nl' othername == Nothing
345

    
346
testContainer =
347
    [ run prop_Container_addTwo
348
    , run prop_Container_nameOf
349
    , run prop_Container_findByName
350
    ]
351

    
352
-- Simple instance tests, we only have setter/getters
353

    
354
prop_Instance_creat inst =
355
    Instance.name inst == Instance.alias inst
356

    
357
prop_Instance_setIdx inst idx =
358
    Instance.idx (Instance.setIdx inst idx) == idx
359
    where _types = (inst::Instance.Instance, idx::Types.Idx)
360

    
361
prop_Instance_setName inst name =
362
    Instance.name newinst == name &&
363
    Instance.alias newinst == name
364
    where _types = (inst::Instance.Instance, name::String)
365
          newinst = Instance.setName inst name
366

    
367
prop_Instance_setAlias inst name =
368
    Instance.name newinst == Instance.name inst &&
369
    Instance.alias newinst == name
370
    where _types = (inst::Instance.Instance, name::String)
371
          newinst = Instance.setAlias inst name
372

    
373
prop_Instance_setPri inst pdx =
374
    Instance.pNode (Instance.setPri inst pdx) == pdx
375
    where _types = (inst::Instance.Instance, pdx::Types.Ndx)
376

    
377
prop_Instance_setSec inst sdx =
378
    Instance.sNode (Instance.setSec inst sdx) == sdx
379
    where _types = (inst::Instance.Instance, sdx::Types.Ndx)
380

    
381
prop_Instance_setBoth inst pdx sdx =
382
    Instance.pNode si == pdx && Instance.sNode si == sdx
383
    where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
384
          si = Instance.setBoth inst pdx sdx
385

    
386
prop_Instance_runStatus_True inst =
387
    let run_st = Instance.running inst
388
        run_tx = Instance.runSt inst
389
    in
390
      run_tx `elem` Instance.runningStates ==> run_st
391

    
392
prop_Instance_runStatus_False inst =
393
    let run_st = Instance.running inst
394
        run_tx = Instance.runSt inst
395
    in
396
      run_tx `notElem` Instance.runningStates ==> not run_st
397

    
398
prop_Instance_shrinkMG inst =
399
    Instance.mem inst >= 2 * Types.unitMem ==>
400
        case Instance.shrinkByType inst Types.FailMem of
401
          Types.Ok inst' ->
402
              Instance.mem inst' == Instance.mem inst - Types.unitMem
403
          _ -> False
404

    
405
prop_Instance_shrinkMF inst =
406
    Instance.mem inst < 2 * Types.unitMem ==>
407
        Types.isBad $ Instance.shrinkByType inst Types.FailMem
408

    
409
prop_Instance_shrinkCG inst =
410
    Instance.vcpus inst >= 2 * Types.unitCpu ==>
411
        case Instance.shrinkByType inst Types.FailCPU of
412
          Types.Ok inst' ->
413
              Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
414
          _ -> False
415

    
416
prop_Instance_shrinkCF inst =
417
    Instance.vcpus inst < 2 * Types.unitCpu ==>
418
        Types.isBad $ Instance.shrinkByType inst Types.FailCPU
419

    
420
prop_Instance_shrinkDG inst =
421
    Instance.dsk inst >= 2 * Types.unitDsk ==>
422
        case Instance.shrinkByType inst Types.FailDisk of
423
          Types.Ok inst' ->
424
              Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
425
          _ -> False
426

    
427
prop_Instance_shrinkDF inst =
428
    Instance.dsk inst < 2 * Types.unitDsk ==>
429
        Types.isBad $ Instance.shrinkByType inst Types.FailDisk
430

    
431
prop_Instance_setMovable inst m =
432
    Instance.movable inst' == m
433
    where inst' = Instance.setMovable inst m
434

    
435
testInstance =
436
    [ run prop_Instance_creat
437
    , run prop_Instance_setIdx
438
    , run prop_Instance_setName
439
    , run prop_Instance_setAlias
440
    , run prop_Instance_setPri
441
    , run prop_Instance_setSec
442
    , run prop_Instance_setBoth
443
    , run prop_Instance_runStatus_True
444
    , run prop_Instance_runStatus_False
445
    , run prop_Instance_shrinkMG
446
    , run prop_Instance_shrinkMF
447
    , run prop_Instance_shrinkCG
448
    , run prop_Instance_shrinkCF
449
    , run prop_Instance_shrinkDG
450
    , run prop_Instance_shrinkDF
451
    , run prop_Instance_setMovable
452
    ]
453

    
454
-- Instance text loader tests
455

    
456
prop_Text_Load_Instance name mem dsk vcpus status pnode snode pdx sdx autobal =
457
    not (null pnode) && pdx >= 0 && sdx >= 0 ==>
458
    let vcpus_s = show vcpus
459
        dsk_s = show dsk
460
        mem_s = show mem
461
        rsdx = if pdx == sdx
462
               then sdx + 1
463
               else sdx
464
        ndx = if null snode
465
              then [(pnode, pdx)]
466
              else [(pnode, pdx), (snode, rsdx)]
467
        nl = Data.Map.fromList ndx
468
        tags = ""
469
        sbal = if autobal then "Y" else "N"
470
        inst = Text.loadInst nl
471
               [name, mem_s, dsk_s, vcpus_s, status,
472
                sbal, pnode, snode, tags]:: Maybe (String, Instance.Instance)
473
        fail1 = Text.loadInst nl
474
               [name, mem_s, dsk_s, vcpus_s, status,
475
                sbal, pnode, pnode, tags]:: Maybe (String, Instance.Instance)
476
        _types = ( name::String, mem::Int, dsk::Int
477
                 , vcpus::Int, status::String
478
                 , pnode::String, snode::String
479
                 , pdx::Types.Ndx, sdx::Types.Ndx
480
                 , autobal::Bool)
481
    in
482
      case inst of
483
        Nothing -> False
484
        Just (_, i) ->
485
            (Instance.name i == name &&
486
             Instance.vcpus i == vcpus &&
487
             Instance.mem i == mem &&
488
             Instance.pNode i == pdx &&
489
             Instance.sNode i == (if null snode
490
                                  then Node.noSecondary
491
                                  else rsdx) &&
492
             Instance.auto_balance i == autobal &&
493
             isNothing fail1)
494

    
495
prop_Text_Load_InstanceFail ktn fields =
496
    length fields /= 9 ==>
497
    case Text.loadInst nl fields of
498
      Right _ -> False
499
      Left msg -> isPrefixOf "Invalid/incomplete instance data: '" msg
500
    where nl = Data.Map.fromList ktn
501

    
502
prop_Text_Load_Node name tm nm fm td fd tc fo =
503
    let conv v = if v < 0
504
                    then "?"
505
                    else show v
506
        tm_s = conv tm
507
        nm_s = conv nm
508
        fm_s = conv fm
509
        td_s = conv td
510
        fd_s = conv fd
511
        tc_s = conv tc
512
        fo_s = if fo
513
               then "Y"
514
               else "N"
515
        any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
516
        gid = Group.uuid defGroup
517
    in case Text.loadNode defGroupAssoc
518
           [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
519
         Nothing -> False
520
         Just (name', node) ->
521
             if fo || any_broken
522
             then Node.offline node
523
             else Node.name node == name' && name' == name &&
524
                  Node.alias node == name &&
525
                  Node.tMem node == fromIntegral tm &&
526
                  Node.nMem node == nm &&
527
                  Node.fMem node == fm &&
528
                  Node.tDsk node == fromIntegral td &&
529
                  Node.fDsk node == fd &&
530
                  Node.tCpu node == fromIntegral tc
531

    
532
prop_Text_Load_NodeFail fields =
533
    length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
534

    
535
prop_Text_NodeLSIdempotent node =
536
    (Text.loadNode defGroupAssoc.
537
         Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
538
    Just (Node.name n, n)
539
    -- override failN1 to what loadNode returns by default
540
    where n = node { Node.failN1 = True, Node.offline = False }
541

    
542
testText =
543
    [ run prop_Text_Load_Instance
544
    , run prop_Text_Load_InstanceFail
545
    , run prop_Text_Load_Node
546
    , run prop_Text_Load_NodeFail
547
    , run prop_Text_NodeLSIdempotent
548
    ]
549

    
550
-- Node tests
551

    
552
prop_Node_setAlias node name =
553
    Node.name newnode == Node.name node &&
554
    Node.alias newnode == name
555
    where _types = (node::Node.Node, name::String)
556
          newnode = Node.setAlias node name
557

    
558
prop_Node_setOffline node status =
559
    Node.offline newnode == status
560
    where newnode = Node.setOffline node status
561

    
562
prop_Node_setXmem node xm =
563
    Node.xMem newnode == xm
564
    where newnode = Node.setXmem node xm
565

    
566
prop_Node_setMcpu node mc =
567
    Node.mCpu newnode == mc
568
    where newnode = Node.setMcpu node mc
569

    
570
-- | Check that an instance add with too high memory or disk will be rejected
571
prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
572
                               not (Node.failN1 node)
573
                               ==>
574
                               case Node.addPri node inst'' of
575
                                 Types.OpFail Types.FailMem -> True
576
                                 _ -> False
577
    where _types = (node::Node.Node, inst::Instance.Instance)
578
          inst' = setInstanceSmallerThanNode node inst
579
          inst'' = inst' { Instance.mem = Instance.mem inst }
580

    
581
prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
582
                               not (Node.failN1 node)
583
                               ==>
584
                               case Node.addPri node inst'' of
585
                                 Types.OpFail Types.FailDisk -> True
586
                                 _ -> False
587
    where _types = (node::Node.Node, inst::Instance.Instance)
588
          inst' = setInstanceSmallerThanNode node inst
589
          inst'' = inst' { Instance.dsk = Instance.dsk inst }
590

    
591
prop_Node_addPriFC node inst = Instance.vcpus inst > Node.availCpu node &&
592
                               not (Node.failN1 node)
593
                               ==>
594
                               case Node.addPri node inst'' of
595
                                 Types.OpFail Types.FailCPU -> True
596
                                 _ -> False
597
    where _types = (node::Node.Node, inst::Instance.Instance)
598
          inst' = setInstanceSmallerThanNode node inst
599
          inst'' = inst' { Instance.vcpus = Instance.vcpus inst }
600

    
601
-- | Check that an instance add with too high memory or disk will be rejected
602
prop_Node_addSec node inst pdx =
603
    (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
604
     Instance.dsk inst >= Node.fDsk node) &&
605
    not (Node.failN1 node)
606
    ==> isFailure (Node.addSec node inst pdx)
607
        where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
608

    
609
-- | Checks for memory reservation changes
610
prop_Node_rMem node inst =
611
    -- ab = auto_balance, nb = non-auto_balance
612
    -- we use -1 as the primary node of the instance
613
    let inst' = inst { Instance.pNode = (-1), Instance.auto_balance = True}
614
        inst_ab = setInstanceSmallerThanNode node inst'
615
        inst_nb = inst_ab { Instance.auto_balance = False }
616
        -- now we have the two instances, identical except the
617
        -- auto_balance attribute
618
        orig_rmem = Node.rMem node
619
        inst_idx = Instance.idx inst_ab
620
        node_add_ab = Node.addSec node inst_ab (-1)
621
        node_add_nb = Node.addSec node inst_nb (-1)
622
        node_del_ab = liftM (flip Node.removeSec inst_ab) node_add_ab
623
        node_del_nb = liftM (flip Node.removeSec inst_nb) node_add_nb
624
    in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
625
         (Types.OpGood a_ab, Types.OpGood a_nb,
626
          Types.OpGood d_ab, Types.OpGood d_nb) ->
627
             Node.rMem a_ab >  orig_rmem &&
628
             Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
629
             Node.rMem a_nb == orig_rmem &&
630
             Node.rMem d_ab == orig_rmem &&
631
             Node.rMem d_nb == orig_rmem &&
632
             -- this is not related to rMem, but as good a place to
633
             -- test as any
634
             inst_idx `elem` Node.sList a_ab &&
635
             not (inst_idx `elem` Node.sList d_ab)
636
         _ -> False
637

    
638
newtype SmallRatio = SmallRatio Double deriving Show
639
instance Arbitrary SmallRatio where
640
    arbitrary = do
641
      v <- choose (0, 1)
642
      return $ SmallRatio v
643

    
644
-- | Check mdsk setting
645
prop_Node_setMdsk node mx =
646
    Node.loDsk node' >= 0 &&
647
    fromIntegral (Node.loDsk node') <= Node.tDsk node &&
648
    Node.availDisk node' >= 0 &&
649
    Node.availDisk node' <= Node.fDsk node' &&
650
    fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
651
    Node.mDsk node' == mx'
652
    where _types = (node::Node.Node, mx::SmallRatio)
653
          node' = Node.setMdsk node mx'
654
          SmallRatio mx' = mx
655

    
656
-- Check tag maps
657
prop_Node_tagMaps_idempotent tags =
658
    Node.delTags (Node.addTags m tags) tags == m
659
    where m = Data.Map.empty
660

    
661
prop_Node_tagMaps_reject tags =
662
    not (null tags) ==>
663
    any (\t -> Node.rejectAddTags m [t]) tags
664
    where m = Node.addTags Data.Map.empty tags
665

    
666
prop_Node_showField node =
667
  forAll (elements Node.defaultFields) $ \ field ->
668
  fst (Node.showHeader field) /= Types.unknownField &&
669
  Node.showField node field /= Types.unknownField
670

    
671

    
672
prop_Node_computeGroups nodes =
673
  let ng = Node.computeGroups nodes
674
      onlyuuid = map fst ng
675
  in length nodes == sum (map (length . snd) ng) &&
676
     all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
677
     length (nub onlyuuid) == length onlyuuid &&
678
     if null nodes then True else not (null ng)
679

    
680
testNode =
681
    [ run prop_Node_setAlias
682
    , run prop_Node_setOffline
683
    , run prop_Node_setMcpu
684
    , run prop_Node_setXmem
685
    , run prop_Node_addPriFM
686
    , run prop_Node_addPriFD
687
    , run prop_Node_addPriFC
688
    , run prop_Node_addSec
689
    , run prop_Node_rMem
690
    , run prop_Node_setMdsk
691
    , run prop_Node_tagMaps_idempotent
692
    , run prop_Node_tagMaps_reject
693
    , run prop_Node_showField
694
    , run prop_Node_computeGroups
695
    ]
696

    
697

    
698
-- Cluster tests
699

    
700
-- | Check that the cluster score is close to zero for a homogeneous cluster
701
prop_Score_Zero node count =
702
    (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
703
     (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
704
    let fn = Node.buildPeers node Container.empty
705
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
706
        nl = Container.fromList nlst
707
        score = Cluster.compCV nl
708
    -- we can't say == 0 here as the floating point errors accumulate;
709
    -- this should be much lower than the default score in CLI.hs
710
    in score <= 1e-15
711

    
712
-- | Check that cluster stats are sane
713
prop_CStats_sane node count =
714
    (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
715
     (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
716
    let fn = Node.buildPeers node Container.empty
717
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
718
        nl = Container.fromList nlst
719
        cstats = Cluster.totalResources nl
720
    in Cluster.csAdsk cstats >= 0 &&
721
       Cluster.csAdsk cstats <= Cluster.csFdsk cstats
722

    
723
-- | Check that one instance is allocated correctly, without
724
-- rebalances needed
725
prop_ClusterAlloc_sane node inst =
726
    forAll (choose (5, 20)) $ \count ->
727
    not (Node.offline node)
728
            && not (Node.failN1 node)
729
            && Node.availDisk node > 0
730
            && Node.availMem node > 0
731
            ==>
732
    let nl = makeSmallCluster node count
733
        il = Container.empty
734
        inst' = setInstanceSmallerThanNode node inst
735
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
736
       Cluster.tryAlloc nl il inst' of
737
         Types.Bad _ -> False
738
         Types.Ok as ->
739
             case Cluster.asSolutions as of
740
               [] -> False
741
               (xnl, xi, _, cv):[] ->
742
                   let il' = Container.add (Instance.idx xi) xi il
743
                       tbl = Cluster.Table xnl il' cv []
744
                   in not (canBalance tbl True False)
745
               _ -> False
746

    
747
-- | Checks that on a 2-5 node cluster, we can allocate a random
748
-- instance spec via tiered allocation (whatever the original instance
749
-- spec), on either one or two nodes
750
prop_ClusterCanTieredAlloc node inst =
751
    forAll (choose (2, 5)) $ \count ->
752
    forAll (choose (1, 2)) $ \rqnodes ->
753
    not (Node.offline node)
754
            && not (Node.failN1 node)
755
            && isNodeBig node 4
756
            ==>
757
    let nl = makeSmallCluster node count
758
        il = Container.empty
759
        allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
760
    in case allocnodes >>= \allocnodes' ->
761
        Cluster.tieredAlloc nl il inst allocnodes' [] [] of
762
         Types.Bad _ -> False
763
         Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
764
                                      IntMap.size il' == length ixes &&
765
                                      length ixes == length cstats
766

    
767
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
768
-- we can also evacuate it
769
prop_ClusterAllocEvac node inst =
770
    forAll (choose (4, 8)) $ \count ->
771
    not (Node.offline node)
772
            && not (Node.failN1 node)
773
            && isNodeBig node 4
774
            ==>
775
    let nl = makeSmallCluster node count
776
        il = Container.empty
777
        inst' = setInstanceSmallerThanNode node inst
778
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
779
       Cluster.tryAlloc nl il inst' of
780
         Types.Bad _ -> False
781
         Types.Ok as ->
782
             case Cluster.asSolutions as of
783
               [] -> False
784
               (xnl, xi, _, _):[] ->
785
                   let sdx = Instance.sNode xi
786
                       il' = Container.add (Instance.idx xi) xi il
787
                   in case Cluster.tryEvac xnl il' [Instance.idx xi] [sdx] of
788
                        Just _ -> True
789
                        _ -> False
790
               _ -> False
791

    
792
-- | Check that allocating multiple instances on a cluster, then
793
-- adding an empty node, results in a valid rebalance
794
prop_ClusterAllocBalance node =
795
    forAll (choose (3, 5)) $ \count ->
796
    not (Node.offline node)
797
            && not (Node.failN1 node)
798
            && isNodeBig node 4
799
            && not (isNodeBig node 8)
800
            ==>
801
    let nl = makeSmallCluster node count
802
        (hnode, nl') = IntMap.deleteFindMax nl
803
        il = Container.empty
804
        allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
805
        i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
806
    in case allocnodes >>= \allocnodes' ->
807
        Cluster.iterateAlloc nl' il i_templ allocnodes' [] [] of
808
         Types.Bad _ -> False
809
         Types.Ok (_, xnl, il', _, _) ->
810
                   let ynl = Container.add (Node.idx hnode) hnode xnl
811
                       cv = Cluster.compCV ynl
812
                       tbl = Cluster.Table ynl il' cv []
813
                   in canBalance tbl True False
814

    
815
-- | Checks consistency
816
prop_ClusterCheckConsistency node inst =
817
  let nl = makeSmallCluster node 3
818
      [node1, node2, node3] = Container.elems nl
819
      node3' = node3 { Node.group = 1 }
820
      nl' = Container.add (Node.idx node3') node3' nl
821
      inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
822
      inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
823
      inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
824
      ccheck = Cluster.findSplitInstances nl' . Container.fromList
825
  in null (ccheck [(0, inst1)]) &&
826
     null (ccheck [(0, inst2)]) &&
827
     (not . null $ ccheck [(0, inst3)])
828

    
829
-- For now, we only test that we don't lose instances during the split
830
prop_ClusterSplitCluster node inst =
831
  forAll (choose (0, 100)) $ \icnt ->
832
  let nl = makeSmallCluster node 2
833
      (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
834
                   (nl, Container.empty) [1..icnt]
835
      gni = Cluster.splitCluster nl' il'
836
  in sum (map (Container.size . snd . snd) gni) == icnt &&
837
     all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
838
                                 (Container.elems nl'')) gni
839

    
840
testCluster =
841
    [ run prop_Score_Zero
842
    , run prop_CStats_sane
843
    , run prop_ClusterAlloc_sane
844
    , run prop_ClusterCanTieredAlloc
845
    , run prop_ClusterAllocEvac
846
    , run prop_ClusterAllocBalance
847
    , run prop_ClusterCheckConsistency
848
    , run prop_ClusterSplitCluster
849
    ]
850

    
851
-- | Check that opcode serialization is idempotent
852

    
853
prop_OpCodes_serialization op =
854
  case J.readJSON (J.showJSON op) of
855
    J.Error _ -> False
856
    J.Ok op' -> op == op'
857
  where _types = op::OpCodes.OpCode
858

    
859
testOpCodes =
860
  [ run prop_OpCodes_serialization
861
  ]
862

    
863
-- | Check that (queued) job\/opcode status serialization is idempotent
864
prop_OpStatus_serialization os =
865
  case J.readJSON (J.showJSON os) of
866
    J.Error _ -> False
867
    J.Ok os' -> os == os'
868
  where _types = os::Jobs.OpStatus
869

    
870
prop_JobStatus_serialization js =
871
  case J.readJSON (J.showJSON js) of
872
    J.Error _ -> False
873
    J.Ok js' -> js == js'
874
  where _types = js::Jobs.JobStatus
875

    
876
testJobs =
877
  [ run prop_OpStatus_serialization
878
  , run prop_JobStatus_serialization
879
  ]
880

    
881
-- | Loader tests
882

    
883
prop_Loader_lookupNode ktn inst node =
884
  Loader.lookupNode nl inst node == Data.Map.lookup node nl
885
  where nl = Data.Map.fromList ktn
886

    
887
prop_Loader_lookupInstance kti inst =
888
  Loader.lookupInstance il inst == Data.Map.lookup inst il
889
  where il = Data.Map.fromList kti
890

    
891
prop_Loader_assignIndices nodes =
892
  Data.Map.size nassoc == length nodes &&
893
  Container.size kt == length nodes &&
894
  (if not (null nodes)
895
   then maximum (IntMap.keys kt) == length nodes - 1
896
   else True)
897
  where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
898

    
899

    
900
-- | Checks that the number of primary instances recorded on the nodes
901
-- is zero
902
prop_Loader_mergeData ns =
903
  let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
904
  in case Loader.mergeData [] [] []
905
         (Loader.emptyCluster {Loader.cdNodes = na}) of
906
    Types.Bad _ -> False
907
    Types.Ok (Loader.ClusterData _ nl il _) ->
908
      let nodes = Container.elems nl
909
          instances = Container.elems il
910
      in (sum . map (length . Node.pList)) nodes == 0 &&
911
         null instances
912

    
913
testLoader =
914
  [ run prop_Loader_lookupNode
915
  , run prop_Loader_lookupInstance
916
  , run prop_Loader_assignIndices
917
  , run prop_Loader_mergeData
918
  ]