Statistics
| Branch: | Tag: | Revision:

root / htools / Ganeti / HTools / QC.hs @ 00c75986

History | View | Annotate | Download (32.9 kB)

1
{-| Unittests for ganeti-htools
2

    
3
-}
4

    
5
{-
6

    
7
Copyright (C) 2009, 2010, 2011 Google Inc.
8

    
9
This program is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 2 of the License, or
12
(at your option) any later version.
13

    
14
This program is distributed in the hope that it will be useful, but
15
WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
General Public License for more details.
18

    
19
You should have received a copy of the GNU General Public License
20
along with this program; if not, write to the Free Software
21
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22
02110-1301, USA.
23

    
24
-}
25

    
26
module Ganeti.HTools.QC
27
    ( testUtils
28
    , testPeerMap
29
    , testContainer
30
    , testInstance
31
    , testNode
32
    , testText
33
    , testOpCodes
34
    , testJobs
35
    , testCluster
36
    , testLoader
37
    ) where
38

    
39
import Test.QuickCheck
40
import Data.List (findIndex, intercalate, nub, isPrefixOf)
41
import Data.Maybe
42
import Control.Monad
43
import qualified Text.JSON as J
44
import qualified Data.Map
45
import qualified Data.IntMap as IntMap
46
import qualified Ganeti.OpCodes as OpCodes
47
import qualified Ganeti.Jobs as Jobs
48
import qualified Ganeti.Luxi
49
import qualified Ganeti.HTools.CLI as CLI
50
import qualified Ganeti.HTools.Cluster as Cluster
51
import qualified Ganeti.HTools.Container as Container
52
import qualified Ganeti.HTools.ExtLoader
53
import qualified Ganeti.HTools.IAlloc as IAlloc
54
import qualified Ganeti.HTools.Instance as Instance
55
import qualified Ganeti.HTools.Loader as Loader
56
import qualified Ganeti.HTools.Luxi
57
import qualified Ganeti.HTools.Node as Node
58
import qualified Ganeti.HTools.Group as Group
59
import qualified Ganeti.HTools.PeerMap as PeerMap
60
import qualified Ganeti.HTools.Rapi
61
import qualified Ganeti.HTools.Simu
62
import qualified Ganeti.HTools.Text as Text
63
import qualified Ganeti.HTools.Types as Types
64
import qualified Ganeti.HTools.Utils as Utils
65
import qualified Ganeti.HTools.Version
66
import qualified Ganeti.Constants as C
67

    
68
run :: Testable prop => prop -> Args -> IO Result
69
run = flip quickCheckWithResult
70

    
71
-- * Constants
72

    
73
-- | Maximum memory (1TiB, somewhat random value)
74
maxMem :: Int
75
maxMem = 1024 * 1024
76

    
77
-- | Maximum disk (8TiB, somewhat random value)
78
maxDsk :: Int
79
maxDsk = 1024 * 1024 * 8
80

    
81
-- | Max CPUs (1024, somewhat random value)
82
maxCpu :: Int
83
maxCpu = 1024
84

    
85
defGroup :: Group.Group
86
defGroup = flip Group.setIdx 0 $
87
               Group.create "default" Utils.defaultGroupID
88
                    Types.AllocPreferred
89

    
90
defGroupList :: Group.List
91
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
92

    
93
defGroupAssoc :: Data.Map.Map String Types.Gdx
94
defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
95

    
96
-- * Helper functions
97

    
98
-- | Simple checker for whether OpResult is fail or pass
99
isFailure :: Types.OpResult a -> Bool
100
isFailure (Types.OpFail _) = True
101
isFailure _ = False
102

    
103
-- | Update an instance to be smaller than a node
104
setInstanceSmallerThanNode node inst =
105
    inst { Instance.mem = Node.availMem node `div` 2
106
         , Instance.dsk = Node.availDisk node `div` 2
107
         , Instance.vcpus = Node.availCpu node `div` 2
108
         }
109

    
110
-- | Create an instance given its spec
111
createInstance mem dsk vcpus =
112
    Instance.create "inst-unnamed" mem dsk vcpus "running" [] True (-1) (-1)
113

    
114
-- | Create a small cluster by repeating a node spec
115
makeSmallCluster :: Node.Node -> Int -> Node.List
116
makeSmallCluster node count =
117
    let fn = Node.buildPeers node Container.empty
118
        namelst = map (\n -> (Node.name n, n)) (replicate count fn)
119
        (_, nlst) = Loader.assignIndices namelst
120
    in nlst
121

    
122
-- | Checks if a node is "big" enough
123
isNodeBig :: Node.Node -> Int -> Bool
124
isNodeBig node size = Node.availDisk node > size * Types.unitDsk
125
                      && Node.availMem node > size * Types.unitMem
126
                      && Node.availCpu node > size * Types.unitCpu
127

    
128
canBalance :: Cluster.Table -> Bool -> Bool -> Bool -> Bool
129
canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac 0 0
130

    
131
-- | Assigns a new fresh instance to a cluster; this is not
132
-- allocation, so no resource checks are done
133
assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
134
                  Types.Idx -> Types.Idx ->
135
                  (Node.List, Instance.List)
136
assignInstance nl il inst pdx sdx =
137
  let pnode = Container.find pdx nl
138
      snode = Container.find sdx nl
139
      maxiidx = if Container.null il
140
                then 0
141
                else fst (Container.findMax il) + 1
142
      inst' = inst { Instance.idx = maxiidx,
143
                     Instance.pNode = pdx, Instance.sNode = sdx }
144
      pnode' = Node.setPri pnode inst'
145
      snode' = Node.setSec snode inst'
146
      nl' = Container.addTwo pdx pnode' sdx snode' nl
147
      il' = Container.add maxiidx inst' il
148
  in (nl', il')
149

    
150
-- * Arbitrary instances
151

    
152
newtype DNSChar = DNSChar { dnsGetChar::Char }
153
instance Arbitrary DNSChar where
154
    arbitrary = do
155
      x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
156
      return (DNSChar x)
157

    
158
getName :: Gen String
159
getName = do
160
  n <- choose (1, 64)
161
  dn <- vector n::Gen [DNSChar]
162
  return (map dnsGetChar dn)
163

    
164

    
165
getFQDN :: Gen String
166
getFQDN = do
167
  felem <- getName
168
  ncomps <- choose (1, 4)
169
  frest <- vector ncomps::Gen [[DNSChar]]
170
  let frest' = map (map dnsGetChar) frest
171
  return (felem ++ "." ++ intercalate "." frest')
172

    
173
-- let's generate a random instance
174
instance Arbitrary Instance.Instance where
175
    arbitrary = do
176
      name <- getFQDN
177
      mem <- choose (0, maxMem)
178
      dsk <- choose (0, maxDsk)
179
      run_st <- elements [ C.inststErrorup
180
                         , C.inststErrordown
181
                         , C.inststAdmindown
182
                         , C.inststNodedown
183
                         , C.inststNodeoffline
184
                         , C.inststRunning
185
                         , "no_such_status1"
186
                         , "no_such_status2"]
187
      pn <- arbitrary
188
      sn <- arbitrary
189
      vcpus <- choose (0, maxCpu)
190
      return $ Instance.create name mem dsk vcpus run_st [] True pn sn
191

    
192
genNode :: Maybe Int -> Maybe Int -> Gen Node.Node
193
genNode min_multiplier max_multiplier = do
194
  let (base_mem, base_dsk, base_cpu) =
195
          case min_multiplier of
196
            Just mm -> (mm * Types.unitMem,
197
                        mm * Types.unitDsk,
198
                        mm * Types.unitCpu)
199
            Nothing -> (0, 0, 0)
200
      (top_mem, top_dsk, top_cpu)  =
201
          case max_multiplier of
202
            Just mm -> (mm * Types.unitMem,
203
                        mm * Types.unitDsk,
204
                        mm * Types.unitCpu)
205
            Nothing -> (maxMem, maxDsk, maxCpu)
206
  name  <- getFQDN
207
  mem_t <- choose (base_mem, top_mem)
208
  mem_f <- choose (base_mem, mem_t)
209
  mem_n <- choose (0, mem_t - mem_f)
210
  dsk_t <- choose (base_dsk, top_dsk)
211
  dsk_f <- choose (base_dsk, dsk_t)
212
  cpu_t <- choose (base_cpu, top_cpu)
213
  offl  <- arbitrary
214
  let n = Node.create name (fromIntegral mem_t) mem_n mem_f
215
          (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl 0
216
  return $ Node.buildPeers n Container.empty
217

    
218
-- and a random node
219
instance Arbitrary Node.Node where
220
    arbitrary = genNode Nothing Nothing
221

    
222
-- replace disks
223
instance Arbitrary OpCodes.ReplaceDisksMode where
224
  arbitrary = elements [ OpCodes.ReplaceOnPrimary
225
                       , OpCodes.ReplaceOnSecondary
226
                       , OpCodes.ReplaceNewSecondary
227
                       , OpCodes.ReplaceAuto
228
                       ]
229

    
230
instance Arbitrary OpCodes.OpCode where
231
  arbitrary = do
232
    op_id <- elements [ "OP_TEST_DELAY"
233
                      , "OP_INSTANCE_REPLACE_DISKS"
234
                      , "OP_INSTANCE_FAILOVER"
235
                      , "OP_INSTANCE_MIGRATE"
236
                      ]
237
    (case op_id of
238
        "OP_TEST_DELAY" ->
239
          liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
240
        "OP_INSTANCE_REPLACE_DISKS" ->
241
          liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
242
          arbitrary arbitrary arbitrary
243
        "OP_INSTANCE_FAILOVER" ->
244
          liftM2 OpCodes.OpInstanceFailover arbitrary arbitrary
245
        "OP_INSTANCE_MIGRATE" ->
246
          liftM4 OpCodes.OpInstanceMigrate arbitrary arbitrary arbitrary
247
          arbitrary
248
        _ -> fail "Wrong opcode")
249

    
250
instance Arbitrary Jobs.OpStatus where
251
  arbitrary = elements [minBound..maxBound]
252

    
253
instance Arbitrary Jobs.JobStatus where
254
  arbitrary = elements [minBound..maxBound]
255

    
256
-- * Actual tests
257

    
258
-- If the list is not just an empty element, and if the elements do
259
-- not contain commas, then join+split should be idepotent
260
prop_Utils_commaJoinSplit lst = lst /= [""] &&
261
                                all (not . elem ',') lst ==>
262
                                Utils.sepSplit ',' (Utils.commaJoin lst) == lst
263
-- Split and join should always be idempotent
264
prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
265

    
266
-- | fromObjWithDefault, we test using the Maybe monad and an integer
267
-- value
268
prop_Utils_fromObjWithDefault def_value random_key =
269
    -- a missing key will be returned with the default
270
    Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
271
    -- a found key will be returned as is, not with default
272
    Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
273
         random_key (def_value+1) == Just def_value
274
        where _types = def_value :: Integer
275

    
276
testUtils =
277
  [ run prop_Utils_commaJoinSplit
278
  , run prop_Utils_commaSplitJoin
279
  , run prop_Utils_fromObjWithDefault
280
  ]
281

    
282
-- | Make sure add is idempotent
283
prop_PeerMap_addIdempotent pmap key em =
284
    fn puniq == fn (fn puniq)
285
    where _types = (pmap::PeerMap.PeerMap,
286
                    key::PeerMap.Key, em::PeerMap.Elem)
287
          fn = PeerMap.add key em
288
          puniq = PeerMap.accumArray const pmap
289

    
290
-- | Make sure remove is idempotent
291
prop_PeerMap_removeIdempotent pmap key =
292
    fn puniq == fn (fn puniq)
293
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
294
          fn = PeerMap.remove key
295
          puniq = PeerMap.accumArray const pmap
296

    
297
-- | Make sure a missing item returns 0
298
prop_PeerMap_findMissing pmap key =
299
    PeerMap.find key (PeerMap.remove key puniq) == 0
300
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
301
          puniq = PeerMap.accumArray const pmap
302

    
303
-- | Make sure an added item is found
304
prop_PeerMap_addFind pmap key em =
305
    PeerMap.find key (PeerMap.add key em puniq) == em
306
    where _types = (pmap::PeerMap.PeerMap,
307
                    key::PeerMap.Key, em::PeerMap.Elem)
308
          puniq = PeerMap.accumArray const pmap
309

    
310
-- | Manual check that maxElem returns the maximum indeed, or 0 for null
311
prop_PeerMap_maxElem pmap =
312
    PeerMap.maxElem puniq == if null puniq then 0
313
                             else (maximum . snd . unzip) puniq
314
    where _types = pmap::PeerMap.PeerMap
315
          puniq = PeerMap.accumArray const pmap
316

    
317
testPeerMap =
318
    [ run prop_PeerMap_addIdempotent
319
    , run prop_PeerMap_removeIdempotent
320
    , run prop_PeerMap_maxElem
321
    , run prop_PeerMap_addFind
322
    , run prop_PeerMap_findMissing
323
    ]
324

    
325
-- Container tests
326

    
327
prop_Container_addTwo cdata i1 i2 =
328
    fn i1 i2 cont == fn i2 i1 cont &&
329
       fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
330
    where _types = (cdata::[Int],
331
                    i1::Int, i2::Int)
332
          cont = foldl (\c x -> Container.add x x c) Container.empty cdata
333
          fn x1 x2 = Container.addTwo x1 x1 x2 x2
334

    
335
prop_Container_nameOf node =
336
  let nl = makeSmallCluster node 1
337
      fnode = head (Container.elems nl)
338
  in Container.nameOf nl (Node.idx fnode) == Node.name fnode
339

    
340
-- We test that in a cluster, given a random node, we can find it by
341
-- its name and alias, as long as all names and aliases are unique,
342
-- and that we fail to find a non-existing name
343
prop_Container_findByName node othername =
344
  forAll (choose (1, 20)) $ \ cnt ->
345
  forAll (choose (0, cnt - 1)) $ \ fidx ->
346
  forAll (vector cnt) $ \ names ->
347
  (length . nub) (map fst names ++ map snd names) ==
348
  length names * 2 &&
349
  not (othername `elem` (map fst names ++ map snd names)) ==>
350
  let nl = makeSmallCluster node cnt
351
      nodes = Container.elems nl
352
      nodes' = map (\((name, alias), nn) -> (Node.idx nn,
353
                                             nn { Node.name = name,
354
                                                  Node.alias = alias }))
355
               $ zip names nodes
356
      nl' = Container.fromList nodes'
357
      target = snd (nodes' !! fidx)
358
  in Container.findByName nl' (Node.name target) == Just target &&
359
     Container.findByName nl' (Node.alias target) == Just target &&
360
     Container.findByName nl' othername == Nothing
361

    
362
testContainer =
363
    [ run prop_Container_addTwo
364
    , run prop_Container_nameOf
365
    , run prop_Container_findByName
366
    ]
367

    
368
-- Simple instance tests, we only have setter/getters
369

    
370
prop_Instance_creat inst =
371
    Instance.name inst == Instance.alias inst
372

    
373
prop_Instance_setIdx inst idx =
374
    Instance.idx (Instance.setIdx inst idx) == idx
375
    where _types = (inst::Instance.Instance, idx::Types.Idx)
376

    
377
prop_Instance_setName inst name =
378
    Instance.name newinst == name &&
379
    Instance.alias newinst == name
380
    where _types = (inst::Instance.Instance, name::String)
381
          newinst = Instance.setName inst name
382

    
383
prop_Instance_setAlias inst name =
384
    Instance.name newinst == Instance.name inst &&
385
    Instance.alias newinst == name
386
    where _types = (inst::Instance.Instance, name::String)
387
          newinst = Instance.setAlias inst name
388

    
389
prop_Instance_setPri inst pdx =
390
    Instance.pNode (Instance.setPri inst pdx) == pdx
391
    where _types = (inst::Instance.Instance, pdx::Types.Ndx)
392

    
393
prop_Instance_setSec inst sdx =
394
    Instance.sNode (Instance.setSec inst sdx) == sdx
395
    where _types = (inst::Instance.Instance, sdx::Types.Ndx)
396

    
397
prop_Instance_setBoth inst pdx sdx =
398
    Instance.pNode si == pdx && Instance.sNode si == sdx
399
    where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
400
          si = Instance.setBoth inst pdx sdx
401

    
402
prop_Instance_runStatus_True inst =
403
    let run_st = Instance.running inst
404
        run_tx = Instance.runSt inst
405
    in
406
      run_tx `elem` Instance.runningStates ==> run_st
407

    
408
prop_Instance_runStatus_False inst =
409
    let run_st = Instance.running inst
410
        run_tx = Instance.runSt inst
411
    in
412
      run_tx `notElem` Instance.runningStates ==> not run_st
413

    
414
prop_Instance_shrinkMG inst =
415
    Instance.mem inst >= 2 * Types.unitMem ==>
416
        case Instance.shrinkByType inst Types.FailMem of
417
          Types.Ok inst' ->
418
              Instance.mem inst' == Instance.mem inst - Types.unitMem
419
          _ -> False
420

    
421
prop_Instance_shrinkMF inst =
422
    forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
423
    let inst' = inst { Instance.mem = mem}
424
    in Types.isBad $ Instance.shrinkByType inst' Types.FailMem
425

    
426
prop_Instance_shrinkCG inst =
427
    Instance.vcpus inst >= 2 * Types.unitCpu ==>
428
        case Instance.shrinkByType inst Types.FailCPU of
429
          Types.Ok inst' ->
430
              Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
431
          _ -> False
432

    
433
prop_Instance_shrinkCF inst =
434
    forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
435
    let inst' = inst { Instance.vcpus = vcpus }
436
    in Types.isBad $ Instance.shrinkByType inst' Types.FailCPU
437

    
438
prop_Instance_shrinkDG inst =
439
    Instance.dsk inst >= 2 * Types.unitDsk ==>
440
        case Instance.shrinkByType inst Types.FailDisk of
441
          Types.Ok inst' ->
442
              Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
443
          _ -> False
444

    
445
prop_Instance_shrinkDF inst =
446
    forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
447
    let inst' = inst { Instance.dsk = dsk }
448
    in Types.isBad $ Instance.shrinkByType inst' Types.FailDisk
449

    
450
prop_Instance_setMovable inst m =
451
    Instance.movable inst' == m
452
    where inst' = Instance.setMovable inst m
453

    
454
testInstance =
455
    [ run prop_Instance_creat
456
    , run prop_Instance_setIdx
457
    , run prop_Instance_setName
458
    , run prop_Instance_setAlias
459
    , run prop_Instance_setPri
460
    , run prop_Instance_setSec
461
    , run prop_Instance_setBoth
462
    , run prop_Instance_runStatus_True
463
    , run prop_Instance_runStatus_False
464
    , run prop_Instance_shrinkMG
465
    , run prop_Instance_shrinkMF
466
    , run prop_Instance_shrinkCG
467
    , run prop_Instance_shrinkCF
468
    , run prop_Instance_shrinkDG
469
    , run prop_Instance_shrinkDF
470
    , run prop_Instance_setMovable
471
    ]
472

    
473
-- Instance text loader tests
474

    
475
prop_Text_Load_Instance name mem dsk vcpus status pnode snode pdx sdx autobal =
476
    not (null pnode) && pdx >= 0 && sdx >= 0 ==>
477
    let vcpus_s = show vcpus
478
        dsk_s = show dsk
479
        mem_s = show mem
480
        rsdx = if pdx == sdx
481
               then sdx + 1
482
               else sdx
483
        ndx = if null snode
484
              then [(pnode, pdx)]
485
              else [(pnode, pdx), (snode, rsdx)]
486
        nl = Data.Map.fromList ndx
487
        tags = ""
488
        sbal = if autobal then "Y" else "N"
489
        inst = Text.loadInst nl
490
               [name, mem_s, dsk_s, vcpus_s, status,
491
                sbal, pnode, snode, tags]:: Maybe (String, Instance.Instance)
492
        fail1 = Text.loadInst nl
493
               [name, mem_s, dsk_s, vcpus_s, status,
494
                sbal, pnode, pnode, tags]:: Maybe (String, Instance.Instance)
495
        _types = ( name::String, mem::Int, dsk::Int
496
                 , vcpus::Int, status::String
497
                 , pnode::String, snode::String
498
                 , pdx::Types.Ndx, sdx::Types.Ndx
499
                 , autobal::Bool)
500
    in
501
      case inst of
502
        Nothing -> False
503
        Just (_, i) ->
504
            Instance.name i == name &&
505
            Instance.vcpus i == vcpus &&
506
            Instance.mem i == mem &&
507
            Instance.pNode i == pdx &&
508
            Instance.sNode i == (if null snode
509
                                 then Node.noSecondary
510
                                 else rsdx) &&
511
            Instance.auto_balance i == autobal &&
512
            isNothing fail1
513

    
514
prop_Text_Load_InstanceFail ktn fields =
515
    length fields /= 9 ==>
516
    case Text.loadInst nl fields of
517
      Types.Ok _ -> False
518
      Types.Bad msg -> "Invalid/incomplete instance data: '" `isPrefixOf` msg
519
    where nl = Data.Map.fromList ktn
520

    
521
prop_Text_Load_Node name tm nm fm td fd tc fo =
522
    let conv v = if v < 0
523
                    then "?"
524
                    else show v
525
        tm_s = conv tm
526
        nm_s = conv nm
527
        fm_s = conv fm
528
        td_s = conv td
529
        fd_s = conv fd
530
        tc_s = conv tc
531
        fo_s = if fo
532
               then "Y"
533
               else "N"
534
        any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
535
        gid = Group.uuid defGroup
536
    in case Text.loadNode defGroupAssoc
537
           [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
538
         Nothing -> False
539
         Just (name', node) ->
540
             if fo || any_broken
541
             then Node.offline node
542
             else Node.name node == name' && name' == name &&
543
                  Node.alias node == name &&
544
                  Node.tMem node == fromIntegral tm &&
545
                  Node.nMem node == nm &&
546
                  Node.fMem node == fm &&
547
                  Node.tDsk node == fromIntegral td &&
548
                  Node.fDsk node == fd &&
549
                  Node.tCpu node == fromIntegral tc
550

    
551
prop_Text_Load_NodeFail fields =
552
    length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
553

    
554
prop_Text_NodeLSIdempotent node =
555
    (Text.loadNode defGroupAssoc.
556
         Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
557
    Just (Node.name n, n)
558
    -- override failN1 to what loadNode returns by default
559
    where n = node { Node.failN1 = True, Node.offline = False }
560

    
561
testText =
562
    [ run prop_Text_Load_Instance
563
    , run prop_Text_Load_InstanceFail
564
    , run prop_Text_Load_Node
565
    , run prop_Text_Load_NodeFail
566
    , run prop_Text_NodeLSIdempotent
567
    ]
568

    
569
-- Node tests
570

    
571
prop_Node_setAlias node name =
572
    Node.name newnode == Node.name node &&
573
    Node.alias newnode == name
574
    where _types = (node::Node.Node, name::String)
575
          newnode = Node.setAlias node name
576

    
577
prop_Node_setOffline node status =
578
    Node.offline newnode == status
579
    where newnode = Node.setOffline node status
580

    
581
prop_Node_setXmem node xm =
582
    Node.xMem newnode == xm
583
    where newnode = Node.setXmem node xm
584

    
585
prop_Node_setMcpu node mc =
586
    Node.mCpu newnode == mc
587
    where newnode = Node.setMcpu node mc
588

    
589
-- | Check that an instance add with too high memory or disk will be rejected
590
prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
591
                               not (Node.failN1 node)
592
                               ==>
593
                               case Node.addPri node inst'' of
594
                                 Types.OpFail Types.FailMem -> True
595
                                 _ -> False
596
    where _types = (node::Node.Node, inst::Instance.Instance)
597
          inst' = setInstanceSmallerThanNode node inst
598
          inst'' = inst' { Instance.mem = Instance.mem inst }
599

    
600
prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
601
                               not (Node.failN1 node)
602
                               ==>
603
                               case Node.addPri node inst'' of
604
                                 Types.OpFail Types.FailDisk -> True
605
                                 _ -> False
606
    where _types = (node::Node.Node, inst::Instance.Instance)
607
          inst' = setInstanceSmallerThanNode node inst
608
          inst'' = inst' { Instance.dsk = Instance.dsk inst }
609

    
610
prop_Node_addPriFC node inst (Positive extra) =
611
    not (Node.failN1 node) ==>
612
        case Node.addPri node inst'' of
613
          Types.OpFail Types.FailCPU -> True
614
          _ -> False
615
    where _types = (node::Node.Node, inst::Instance.Instance)
616
          inst' = setInstanceSmallerThanNode node inst
617
          inst'' = inst' { Instance.vcpus = Node.availCpu node + extra }
618

    
619
-- | Check that an instance add with too high memory or disk will be rejected
620
prop_Node_addSec node inst pdx =
621
    (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
622
     Instance.dsk inst >= Node.fDsk node) &&
623
    not (Node.failN1 node)
624
    ==> isFailure (Node.addSec node inst pdx)
625
        where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
626

    
627
-- | Checks for memory reservation changes
628
prop_Node_rMem node inst =
629
    -- ab = auto_balance, nb = non-auto_balance
630
    -- we use -1 as the primary node of the instance
631
    let inst' = inst { Instance.pNode = -1, Instance.auto_balance = True }
632
        inst_ab = setInstanceSmallerThanNode node inst'
633
        inst_nb = inst_ab { Instance.auto_balance = False }
634
        -- now we have the two instances, identical except the
635
        -- auto_balance attribute
636
        orig_rmem = Node.rMem node
637
        inst_idx = Instance.idx inst_ab
638
        node_add_ab = Node.addSec node inst_ab (-1)
639
        node_add_nb = Node.addSec node inst_nb (-1)
640
        node_del_ab = liftM (flip Node.removeSec inst_ab) node_add_ab
641
        node_del_nb = liftM (flip Node.removeSec inst_nb) node_add_nb
642
    in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
643
         (Types.OpGood a_ab, Types.OpGood a_nb,
644
          Types.OpGood d_ab, Types.OpGood d_nb) ->
645
             Node.rMem a_ab >  orig_rmem &&
646
             Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
647
             Node.rMem a_nb == orig_rmem &&
648
             Node.rMem d_ab == orig_rmem &&
649
             Node.rMem d_nb == orig_rmem &&
650
             -- this is not related to rMem, but as good a place to
651
             -- test as any
652
             inst_idx `elem` Node.sList a_ab &&
653
             not (inst_idx `elem` Node.sList d_ab)
654
         _ -> False
655

    
656
newtype SmallRatio = SmallRatio Double deriving Show
657
instance Arbitrary SmallRatio where
658
    arbitrary = do
659
      v <- choose (0, 1)
660
      return $ SmallRatio v
661

    
662
-- | Check mdsk setting
663
prop_Node_setMdsk node mx =
664
    Node.loDsk node' >= 0 &&
665
    fromIntegral (Node.loDsk node') <= Node.tDsk node &&
666
    Node.availDisk node' >= 0 &&
667
    Node.availDisk node' <= Node.fDsk node' &&
668
    fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
669
    Node.mDsk node' == mx'
670
    where _types = (node::Node.Node, mx::SmallRatio)
671
          node' = Node.setMdsk node mx'
672
          SmallRatio mx' = mx
673

    
674
-- Check tag maps
675
prop_Node_tagMaps_idempotent tags =
676
    Node.delTags (Node.addTags m tags) tags == m
677
    where m = Data.Map.empty
678

    
679
prop_Node_tagMaps_reject tags =
680
    not (null tags) ==>
681
    any (\t -> Node.rejectAddTags m [t]) tags
682
    where m = Node.addTags Data.Map.empty tags
683

    
684
prop_Node_showField node =
685
  forAll (elements Node.defaultFields) $ \ field ->
686
  fst (Node.showHeader field) /= Types.unknownField &&
687
  Node.showField node field /= Types.unknownField
688

    
689

    
690
prop_Node_computeGroups nodes =
691
  let ng = Node.computeGroups nodes
692
      onlyuuid = map fst ng
693
  in length nodes == sum (map (length . snd) ng) &&
694
     all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
695
     length (nub onlyuuid) == length onlyuuid &&
696
     (null nodes || not (null ng))
697

    
698
testNode =
699
    [ run prop_Node_setAlias
700
    , run prop_Node_setOffline
701
    , run prop_Node_setMcpu
702
    , run prop_Node_setXmem
703
    , run prop_Node_addPriFM
704
    , run prop_Node_addPriFD
705
    , run prop_Node_addPriFC
706
    , run prop_Node_addSec
707
    , run prop_Node_rMem
708
    , run prop_Node_setMdsk
709
    , run prop_Node_tagMaps_idempotent
710
    , run prop_Node_tagMaps_reject
711
    , run prop_Node_showField
712
    , run prop_Node_computeGroups
713
    ]
714

    
715

    
716
-- Cluster tests
717

    
718
-- | Check that the cluster score is close to zero for a homogeneous cluster
719
prop_Score_Zero node =
720
    forAll (choose (1, 1024)) $ \count ->
721
    (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
722
     (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
723
    let fn = Node.buildPeers node Container.empty
724
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
725
        nl = Container.fromList nlst
726
        score = Cluster.compCV nl
727
    -- we can't say == 0 here as the floating point errors accumulate;
728
    -- this should be much lower than the default score in CLI.hs
729
    in score <= 1e-12
730

    
731
-- | Check that cluster stats are sane
732
prop_CStats_sane node =
733
    forAll (choose (1, 1024)) $ \count ->
734
    (not (Node.offline node) && not (Node.failN1 node) &&
735
     (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
736
    let fn = Node.buildPeers node Container.empty
737
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
738
        nl = Container.fromList nlst
739
        cstats = Cluster.totalResources nl
740
    in Cluster.csAdsk cstats >= 0 &&
741
       Cluster.csAdsk cstats <= Cluster.csFdsk cstats
742

    
743
-- | Check that one instance is allocated correctly, without
744
-- rebalances needed
745
prop_ClusterAlloc_sane node inst =
746
    forAll (choose (5, 20)) $ \count ->
747
    not (Node.offline node)
748
            && not (Node.failN1 node)
749
            && Node.availDisk node > 0
750
            && Node.availMem node > 0
751
            ==>
752
    let nl = makeSmallCluster node count
753
        il = Container.empty
754
        inst' = setInstanceSmallerThanNode node inst
755
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
756
       Cluster.tryAlloc nl il inst' of
757
         Types.Bad _ -> False
758
         Types.Ok as ->
759
             case Cluster.asSolutions as of
760
               [] -> False
761
               (xnl, xi, _, cv):[] ->
762
                   let il' = Container.add (Instance.idx xi) xi il
763
                       tbl = Cluster.Table xnl il' cv []
764
                   in not (canBalance tbl True True False)
765
               _ -> False
766

    
767
-- | Checks that on a 2-5 node cluster, we can allocate a random
768
-- instance spec via tiered allocation (whatever the original instance
769
-- spec), on either one or two nodes
770
prop_ClusterCanTieredAlloc node inst =
771
    forAll (choose (2, 5)) $ \count ->
772
    forAll (choose (1, 2)) $ \rqnodes ->
773
    not (Node.offline node)
774
            && not (Node.failN1 node)
775
            && isNodeBig node 4
776
            ==>
777
    let nl = makeSmallCluster node count
778
        il = Container.empty
779
        allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
780
    in case allocnodes >>= \allocnodes' ->
781
        Cluster.tieredAlloc nl il inst allocnodes' [] [] of
782
         Types.Bad _ -> False
783
         Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
784
                                      IntMap.size il' == length ixes &&
785
                                      length ixes == length cstats
786

    
787
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
788
-- we can also evacuate it
789
prop_ClusterAllocEvac node inst =
790
    forAll (choose (4, 8)) $ \count ->
791
    not (Node.offline node)
792
            && not (Node.failN1 node)
793
            && isNodeBig node 4
794
            ==>
795
    let nl = makeSmallCluster node count
796
        il = Container.empty
797
        inst' = setInstanceSmallerThanNode node inst
798
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
799
       Cluster.tryAlloc nl il inst' of
800
         Types.Bad _ -> False
801
         Types.Ok as ->
802
             case Cluster.asSolutions as of
803
               [] -> False
804
               (xnl, xi, _, _):[] ->
805
                   let sdx = Instance.sNode xi
806
                       il' = Container.add (Instance.idx xi) xi il
807
                   in case Cluster.tryEvac xnl il' [Instance.idx xi] [sdx] of
808
                        Just _ -> True
809
                        _ -> False
810
               _ -> False
811

    
812
-- | Check that allocating multiple instances on a cluster, then
813
-- adding an empty node, results in a valid rebalance
814
prop_ClusterAllocBalance =
815
    forAll (genNode (Just 5) (Just 128)) $ \node ->
816
    forAll (choose (3, 5)) $ \count ->
817
    not (Node.offline node) && not (Node.failN1 node) ==>
818
    let nl = makeSmallCluster node count
819
        (hnode, nl') = IntMap.deleteFindMax nl
820
        il = Container.empty
821
        allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
822
        i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
823
    in case allocnodes >>= \allocnodes' ->
824
        Cluster.iterateAlloc nl' il i_templ allocnodes' [] [] of
825
         Types.Bad _ -> False
826
         Types.Ok (_, xnl, il', _, _) ->
827
                   let ynl = Container.add (Node.idx hnode) hnode xnl
828
                       cv = Cluster.compCV ynl
829
                       tbl = Cluster.Table ynl il' cv []
830
                   in canBalance tbl True True False
831

    
832
-- | Checks consistency
833
prop_ClusterCheckConsistency node inst =
834
  let nl = makeSmallCluster node 3
835
      [node1, node2, node3] = Container.elems nl
836
      node3' = node3 { Node.group = 1 }
837
      nl' = Container.add (Node.idx node3') node3' nl
838
      inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
839
      inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
840
      inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
841
      ccheck = Cluster.findSplitInstances nl' . Container.fromList
842
  in null (ccheck [(0, inst1)]) &&
843
     null (ccheck [(0, inst2)]) &&
844
     (not . null $ ccheck [(0, inst3)])
845

    
846
-- For now, we only test that we don't lose instances during the split
847
prop_ClusterSplitCluster node inst =
848
  forAll (choose (0, 100)) $ \icnt ->
849
  let nl = makeSmallCluster node 2
850
      (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
851
                   (nl, Container.empty) [1..icnt]
852
      gni = Cluster.splitCluster nl' il'
853
  in sum (map (Container.size . snd . snd) gni) == icnt &&
854
     all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
855
                                 (Container.elems nl'')) gni
856

    
857
testCluster =
858
    [ run prop_Score_Zero
859
    , run prop_CStats_sane
860
    , run prop_ClusterAlloc_sane
861
    , run prop_ClusterCanTieredAlloc
862
    , run prop_ClusterAllocEvac
863
    , run prop_ClusterAllocBalance
864
    , run prop_ClusterCheckConsistency
865
    , run prop_ClusterSplitCluster
866
    ]
867

    
868
-- | Check that opcode serialization is idempotent
869

    
870
prop_OpCodes_serialization op =
871
  case J.readJSON (J.showJSON op) of
872
    J.Error _ -> False
873
    J.Ok op' -> op == op'
874
  where _types = op::OpCodes.OpCode
875

    
876
testOpCodes =
877
  [ run prop_OpCodes_serialization
878
  ]
879

    
880
-- | Check that (queued) job\/opcode status serialization is idempotent
881
prop_OpStatus_serialization os =
882
  case J.readJSON (J.showJSON os) of
883
    J.Error _ -> False
884
    J.Ok os' -> os == os'
885
  where _types = os::Jobs.OpStatus
886

    
887
prop_JobStatus_serialization js =
888
  case J.readJSON (J.showJSON js) of
889
    J.Error _ -> False
890
    J.Ok js' -> js == js'
891
  where _types = js::Jobs.JobStatus
892

    
893
testJobs =
894
  [ run prop_OpStatus_serialization
895
  , run prop_JobStatus_serialization
896
  ]
897

    
898
-- | Loader tests
899

    
900
prop_Loader_lookupNode ktn inst node =
901
  Loader.lookupNode nl inst node == Data.Map.lookup node nl
902
  where nl = Data.Map.fromList ktn
903

    
904
prop_Loader_lookupInstance kti inst =
905
  Loader.lookupInstance il inst == Data.Map.lookup inst il
906
  where il = Data.Map.fromList kti
907

    
908
prop_Loader_assignIndices nodes =
909
  Data.Map.size nassoc == length nodes &&
910
  Container.size kt == length nodes &&
911
  (if not (null nodes)
912
   then maximum (IntMap.keys kt) == length nodes - 1
913
   else True)
914
  where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
915

    
916

    
917
-- | Checks that the number of primary instances recorded on the nodes
918
-- is zero
919
prop_Loader_mergeData ns =
920
  let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
921
  in case Loader.mergeData [] [] [] []
922
         (Loader.emptyCluster {Loader.cdNodes = na}) of
923
    Types.Bad _ -> False
924
    Types.Ok (Loader.ClusterData _ nl il _) ->
925
      let nodes = Container.elems nl
926
          instances = Container.elems il
927
      in (sum . map (length . Node.pList)) nodes == 0 &&
928
         null instances
929

    
930
testLoader =
931
  [ run prop_Loader_lookupNode
932
  , run prop_Loader_lookupInstance
933
  , run prop_Loader_assignIndices
934
  , run prop_Loader_mergeData
935
  ]