Statistics
| Branch: | Tag: | Revision:

root / htools / Ganeti / HTools / QC.hs @ 309e7c9a

History | View | Annotate | Download (32.8 kB)

1
{-| Unittests for ganeti-htools
2

    
3
-}
4

    
5
{-
6

    
7
Copyright (C) 2009, 2010, 2011 Google Inc.
8

    
9
This program is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 2 of the License, or
12
(at your option) any later version.
13

    
14
This program is distributed in the hope that it will be useful, but
15
WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
General Public License for more details.
18

    
19
You should have received a copy of the GNU General Public License
20
along with this program; if not, write to the Free Software
21
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22
02110-1301, USA.
23

    
24
-}
25

    
26
module Ganeti.HTools.QC
27
    ( testUtils
28
    , testPeerMap
29
    , testContainer
30
    , testInstance
31
    , testNode
32
    , testText
33
    , testOpCodes
34
    , testJobs
35
    , testCluster
36
    , testLoader
37
    ) where
38

    
39
import Test.QuickCheck
40
import Data.List (findIndex, intercalate, nub, isPrefixOf)
41
import Data.Maybe
42
import Control.Monad
43
import qualified Text.JSON as J
44
import qualified Data.Map
45
import qualified Data.IntMap as IntMap
46
import qualified Ganeti.OpCodes as OpCodes
47
import qualified Ganeti.Jobs as Jobs
48
import qualified Ganeti.Luxi
49
import qualified Ganeti.HTools.CLI as CLI
50
import qualified Ganeti.HTools.Cluster as Cluster
51
import qualified Ganeti.HTools.Container as Container
52
import qualified Ganeti.HTools.ExtLoader
53
import qualified Ganeti.HTools.IAlloc as IAlloc
54
import qualified Ganeti.HTools.Instance as Instance
55
import qualified Ganeti.HTools.Loader as Loader
56
import qualified Ganeti.HTools.Luxi
57
import qualified Ganeti.HTools.Node as Node
58
import qualified Ganeti.HTools.Group as Group
59
import qualified Ganeti.HTools.PeerMap as PeerMap
60
import qualified Ganeti.HTools.Rapi
61
import qualified Ganeti.HTools.Simu
62
import qualified Ganeti.HTools.Text as Text
63
import qualified Ganeti.HTools.Types as Types
64
import qualified Ganeti.HTools.Utils as Utils
65
import qualified Ganeti.HTools.Version
66
import qualified Ganeti.Constants as C
67

    
68
run :: Testable prop => prop -> Args -> IO Result
69
run = flip quickCheckWithResult
70

    
71
-- * Constants
72

    
73
-- | Maximum memory (1TiB, somewhat random value)
74
maxMem :: Int
75
maxMem = 1024 * 1024
76

    
77
-- | Maximum disk (8TiB, somewhat random value)
78
maxDsk :: Int
79
maxDsk = 1024 * 1024 * 8
80

    
81
-- | Max CPUs (1024, somewhat random value)
82
maxCpu :: Int
83
maxCpu = 1024
84

    
85
defGroup :: Group.Group
86
defGroup = flip Group.setIdx 0 $
87
               Group.create "default" Utils.defaultGroupID
88
                    Types.AllocPreferred
89

    
90
defGroupList :: Group.List
91
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
92

    
93
defGroupAssoc :: Data.Map.Map String Types.Gdx
94
defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
95

    
96
-- * Helper functions
97

    
98
-- | Simple checker for whether OpResult is fail or pass
99
isFailure :: Types.OpResult a -> Bool
100
isFailure (Types.OpFail _) = True
101
isFailure _ = False
102

    
103
-- | Update an instance to be smaller than a node
104
setInstanceSmallerThanNode node inst =
105
    inst { Instance.mem = Node.availMem node `div` 2
106
         , Instance.dsk = Node.availDisk node `div` 2
107
         , Instance.vcpus = Node.availCpu node `div` 2
108
         }
109

    
110
-- | Create an instance given its spec
111
createInstance mem dsk vcpus =
112
    Instance.create "inst-unnamed" mem dsk vcpus "running" [] True (-1) (-1)
113

    
114
-- | Create a small cluster by repeating a node spec
115
makeSmallCluster :: Node.Node -> Int -> Node.List
116
makeSmallCluster node count =
117
    let fn = Node.buildPeers node Container.empty
118
        namelst = map (\n -> (Node.name n, n)) (replicate count fn)
119
        (_, nlst) = Loader.assignIndices namelst
120
    in nlst
121

    
122
-- | Checks if a node is "big" enough
123
isNodeBig :: Node.Node -> Int -> Bool
124
isNodeBig node size = Node.availDisk node > size * Types.unitDsk
125
                      && Node.availMem node > size * Types.unitMem
126
                      && Node.availCpu node > size * Types.unitCpu
127

    
128
canBalance :: Cluster.Table -> Bool -> Bool -> Bool -> Bool
129
canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac 0 0
130

    
131
-- | Assigns a new fresh instance to a cluster; this is not
132
-- allocation, so no resource checks are done
133
assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
134
                  Types.Idx -> Types.Idx ->
135
                  (Node.List, Instance.List)
136
assignInstance nl il inst pdx sdx =
137
  let pnode = Container.find pdx nl
138
      snode = Container.find sdx nl
139
      maxiidx = if Container.null il
140
                then 0
141
                else fst (Container.findMax il) + 1
142
      inst' = inst { Instance.idx = maxiidx,
143
                     Instance.pNode = pdx, Instance.sNode = sdx }
144
      pnode' = Node.setPri pnode inst'
145
      snode' = Node.setSec snode inst'
146
      nl' = Container.addTwo pdx pnode' sdx snode' nl
147
      il' = Container.add maxiidx inst' il
148
  in (nl', il')
149

    
150
-- * Arbitrary instances
151

    
152
newtype DNSChar = DNSChar { dnsGetChar::Char }
153
instance Arbitrary DNSChar where
154
    arbitrary = do
155
      x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
156
      return (DNSChar x)
157

    
158
getName :: Gen String
159
getName = do
160
  n <- choose (1, 64)
161
  dn <- vector n::Gen [DNSChar]
162
  return (map dnsGetChar dn)
163

    
164

    
165
getFQDN :: Gen String
166
getFQDN = do
167
  felem <- getName
168
  ncomps <- choose (1, 4)
169
  frest <- vector ncomps::Gen [[DNSChar]]
170
  let frest' = map (map dnsGetChar) frest
171
  return (felem ++ "." ++ intercalate "." frest')
172

    
173
-- let's generate a random instance
174
instance Arbitrary Instance.Instance where
175
    arbitrary = do
176
      name <- getFQDN
177
      mem <- choose (0, maxMem)
178
      dsk <- choose (0, maxDsk)
179
      run_st <- elements [ C.inststErrorup
180
                         , C.inststErrordown
181
                         , C.inststAdmindown
182
                         , C.inststNodedown
183
                         , C.inststNodeoffline
184
                         , C.inststRunning
185
                         , "no_such_status1"
186
                         , "no_such_status2"]
187
      pn <- arbitrary
188
      sn <- arbitrary
189
      vcpus <- choose (0, maxCpu)
190
      return $ Instance.create name mem dsk vcpus run_st [] True pn sn
191

    
192
genNode :: Maybe Int -> Maybe Int -> Gen Node.Node
193
genNode min_multiplier max_multiplier = do
194
  let (base_mem, base_dsk, base_cpu) =
195
          case min_multiplier of
196
            Just mm -> (mm * Types.unitMem,
197
                        mm * Types.unitDsk,
198
                        mm * Types.unitCpu)
199
            Nothing -> (0, 0, 0)
200
      (top_mem, top_dsk, top_cpu)  =
201
          case max_multiplier of
202
            Just mm -> (mm * Types.unitMem,
203
                        mm * Types.unitDsk,
204
                        mm * Types.unitCpu)
205
            Nothing -> (maxMem, maxDsk, maxCpu)
206
  name  <- getFQDN
207
  mem_t <- choose (base_mem, top_mem)
208
  mem_f <- choose (base_mem, mem_t)
209
  mem_n <- choose (0, mem_t - mem_f)
210
  dsk_t <- choose (base_dsk, top_dsk)
211
  dsk_f <- choose (base_dsk, dsk_t)
212
  cpu_t <- choose (base_cpu, top_cpu)
213
  offl  <- arbitrary
214
  let n = Node.create name (fromIntegral mem_t) mem_n mem_f
215
          (fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl 0
216
  return $ Node.buildPeers n Container.empty
217

    
218
-- and a random node
219
instance Arbitrary Node.Node where
220
    arbitrary = genNode Nothing Nothing
221

    
222
-- replace disks
223
instance Arbitrary OpCodes.ReplaceDisksMode where
224
  arbitrary = elements [ OpCodes.ReplaceOnPrimary
225
                       , OpCodes.ReplaceOnSecondary
226
                       , OpCodes.ReplaceNewSecondary
227
                       , OpCodes.ReplaceAuto
228
                       ]
229

    
230
instance Arbitrary OpCodes.OpCode where
231
  arbitrary = do
232
    op_id <- elements [ "OP_TEST_DELAY"
233
                      , "OP_INSTANCE_REPLACE_DISKS"
234
                      , "OP_INSTANCE_FAILOVER"
235
                      , "OP_INSTANCE_MIGRATE"
236
                      ]
237
    (case op_id of
238
        "OP_TEST_DELAY" ->
239
          liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
240
        "OP_INSTANCE_REPLACE_DISKS" ->
241
          liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
242
          arbitrary arbitrary arbitrary
243
        "OP_INSTANCE_FAILOVER" ->
244
          liftM2 OpCodes.OpInstanceFailover arbitrary arbitrary
245
        "OP_INSTANCE_MIGRATE" ->
246
          liftM4 OpCodes.OpInstanceMigrate arbitrary arbitrary arbitrary
247
          arbitrary
248
        _ -> fail "Wrong opcode")
249

    
250
instance Arbitrary Jobs.OpStatus where
251
  arbitrary = elements [minBound..maxBound]
252

    
253
instance Arbitrary Jobs.JobStatus where
254
  arbitrary = elements [minBound..maxBound]
255

    
256
-- * Actual tests
257

    
258
-- If the list is not just an empty element, and if the elements do
259
-- not contain commas, then join+split should be idepotent
260
prop_Utils_commaJoinSplit =
261
    forAll (arbitrary `suchThat`
262
            (\l -> l /= [""] && all (not . elem ',') l )) $ \lst ->
263
    Utils.sepSplit ',' (Utils.commaJoin lst) == lst
264

    
265
-- Split and join should always be idempotent
266
prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
267

    
268
-- | fromObjWithDefault, we test using the Maybe monad and an integer
269
-- value
270
prop_Utils_fromObjWithDefault def_value random_key =
271
    -- a missing key will be returned with the default
272
    Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
273
    -- a found key will be returned as is, not with default
274
    Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
275
         random_key (def_value+1) == Just def_value
276
        where _types = def_value :: Integer
277

    
278
testUtils =
279
  [ run prop_Utils_commaJoinSplit
280
  , run prop_Utils_commaSplitJoin
281
  , run prop_Utils_fromObjWithDefault
282
  ]
283

    
284
-- | Make sure add is idempotent
285
prop_PeerMap_addIdempotent pmap key em =
286
    fn puniq == fn (fn puniq)
287
    where _types = (pmap::PeerMap.PeerMap,
288
                    key::PeerMap.Key, em::PeerMap.Elem)
289
          fn = PeerMap.add key em
290
          puniq = PeerMap.accumArray const pmap
291

    
292
-- | Make sure remove is idempotent
293
prop_PeerMap_removeIdempotent pmap key =
294
    fn puniq == fn (fn puniq)
295
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
296
          fn = PeerMap.remove key
297
          puniq = PeerMap.accumArray const pmap
298

    
299
-- | Make sure a missing item returns 0
300
prop_PeerMap_findMissing pmap key =
301
    PeerMap.find key (PeerMap.remove key puniq) == 0
302
    where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
303
          puniq = PeerMap.accumArray const pmap
304

    
305
-- | Make sure an added item is found
306
prop_PeerMap_addFind pmap key em =
307
    PeerMap.find key (PeerMap.add key em puniq) == em
308
    where _types = (pmap::PeerMap.PeerMap,
309
                    key::PeerMap.Key, em::PeerMap.Elem)
310
          puniq = PeerMap.accumArray const pmap
311

    
312
-- | Manual check that maxElem returns the maximum indeed, or 0 for null
313
prop_PeerMap_maxElem pmap =
314
    PeerMap.maxElem puniq == if null puniq then 0
315
                             else (maximum . snd . unzip) puniq
316
    where _types = pmap::PeerMap.PeerMap
317
          puniq = PeerMap.accumArray const pmap
318

    
319
testPeerMap =
320
    [ run prop_PeerMap_addIdempotent
321
    , run prop_PeerMap_removeIdempotent
322
    , run prop_PeerMap_maxElem
323
    , run prop_PeerMap_addFind
324
    , run prop_PeerMap_findMissing
325
    ]
326

    
327
-- Container tests
328

    
329
prop_Container_addTwo cdata i1 i2 =
330
    fn i1 i2 cont == fn i2 i1 cont &&
331
       fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
332
    where _types = (cdata::[Int],
333
                    i1::Int, i2::Int)
334
          cont = foldl (\c x -> Container.add x x c) Container.empty cdata
335
          fn x1 x2 = Container.addTwo x1 x1 x2 x2
336

    
337
prop_Container_nameOf node =
338
  let nl = makeSmallCluster node 1
339
      fnode = head (Container.elems nl)
340
  in Container.nameOf nl (Node.idx fnode) == Node.name fnode
341

    
342
-- We test that in a cluster, given a random node, we can find it by
343
-- its name and alias, as long as all names and aliases are unique,
344
-- and that we fail to find a non-existing name
345
prop_Container_findByName node othername =
346
  forAll (choose (1, 20)) $ \ cnt ->
347
  forAll (choose (0, cnt - 1)) $ \ fidx ->
348
  forAll (vector cnt) $ \ names ->
349
  (length . nub) (map fst names ++ map snd names) ==
350
  length names * 2 &&
351
  not (othername `elem` (map fst names ++ map snd names)) ==>
352
  let nl = makeSmallCluster node cnt
353
      nodes = Container.elems nl
354
      nodes' = map (\((name, alias), nn) -> (Node.idx nn,
355
                                             nn { Node.name = name,
356
                                                  Node.alias = alias }))
357
               $ zip names nodes
358
      nl' = Container.fromList nodes'
359
      target = snd (nodes' !! fidx)
360
  in Container.findByName nl' (Node.name target) == Just target &&
361
     Container.findByName nl' (Node.alias target) == Just target &&
362
     Container.findByName nl' othername == Nothing
363

    
364
testContainer =
365
    [ run prop_Container_addTwo
366
    , run prop_Container_nameOf
367
    , run prop_Container_findByName
368
    ]
369

    
370
-- Simple instance tests, we only have setter/getters
371

    
372
prop_Instance_creat inst =
373
    Instance.name inst == Instance.alias inst
374

    
375
prop_Instance_setIdx inst idx =
376
    Instance.idx (Instance.setIdx inst idx) == idx
377
    where _types = (inst::Instance.Instance, idx::Types.Idx)
378

    
379
prop_Instance_setName inst name =
380
    Instance.name newinst == name &&
381
    Instance.alias newinst == name
382
    where _types = (inst::Instance.Instance, name::String)
383
          newinst = Instance.setName inst name
384

    
385
prop_Instance_setAlias inst name =
386
    Instance.name newinst == Instance.name inst &&
387
    Instance.alias newinst == name
388
    where _types = (inst::Instance.Instance, name::String)
389
          newinst = Instance.setAlias inst name
390

    
391
prop_Instance_setPri inst pdx =
392
    Instance.pNode (Instance.setPri inst pdx) == pdx
393
    where _types = (inst::Instance.Instance, pdx::Types.Ndx)
394

    
395
prop_Instance_setSec inst sdx =
396
    Instance.sNode (Instance.setSec inst sdx) == sdx
397
    where _types = (inst::Instance.Instance, sdx::Types.Ndx)
398

    
399
prop_Instance_setBoth inst pdx sdx =
400
    Instance.pNode si == pdx && Instance.sNode si == sdx
401
    where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
402
          si = Instance.setBoth inst pdx sdx
403

    
404
prop_Instance_runStatus_True =
405
    forAll (arbitrary `suchThat`
406
            ((`elem` Instance.runningStates) . Instance.runSt))
407
    Instance.running
408

    
409
prop_Instance_runStatus_False inst =
410
    let run_st = Instance.running inst
411
        run_tx = Instance.runSt inst
412
    in
413
      run_tx `notElem` Instance.runningStates ==> not run_st
414

    
415
prop_Instance_shrinkMG inst =
416
    Instance.mem inst >= 2 * Types.unitMem ==>
417
        case Instance.shrinkByType inst Types.FailMem of
418
          Types.Ok inst' ->
419
              Instance.mem inst' == Instance.mem inst - Types.unitMem
420
          _ -> False
421

    
422
prop_Instance_shrinkMF inst =
423
    forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
424
    let inst' = inst { Instance.mem = mem}
425
    in Types.isBad $ Instance.shrinkByType inst' Types.FailMem
426

    
427
prop_Instance_shrinkCG inst =
428
    Instance.vcpus inst >= 2 * Types.unitCpu ==>
429
        case Instance.shrinkByType inst Types.FailCPU of
430
          Types.Ok inst' ->
431
              Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
432
          _ -> False
433

    
434
prop_Instance_shrinkCF inst =
435
    forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
436
    let inst' = inst { Instance.vcpus = vcpus }
437
    in Types.isBad $ Instance.shrinkByType inst' Types.FailCPU
438

    
439
prop_Instance_shrinkDG inst =
440
    Instance.dsk inst >= 2 * Types.unitDsk ==>
441
        case Instance.shrinkByType inst Types.FailDisk of
442
          Types.Ok inst' ->
443
              Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
444
          _ -> False
445

    
446
prop_Instance_shrinkDF inst =
447
    forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
448
    let inst' = inst { Instance.dsk = dsk }
449
    in Types.isBad $ Instance.shrinkByType inst' Types.FailDisk
450

    
451
prop_Instance_setMovable inst m =
452
    Instance.movable inst' == m
453
    where inst' = Instance.setMovable inst m
454

    
455
testInstance =
456
    [ run prop_Instance_creat
457
    , run prop_Instance_setIdx
458
    , run prop_Instance_setName
459
    , run prop_Instance_setAlias
460
    , run prop_Instance_setPri
461
    , run prop_Instance_setSec
462
    , run prop_Instance_setBoth
463
    , run prop_Instance_runStatus_True
464
    , run prop_Instance_runStatus_False
465
    , run prop_Instance_shrinkMG
466
    , run prop_Instance_shrinkMF
467
    , run prop_Instance_shrinkCG
468
    , run prop_Instance_shrinkCF
469
    , run prop_Instance_shrinkDG
470
    , run prop_Instance_shrinkDF
471
    , run prop_Instance_setMovable
472
    ]
473

    
474
-- Instance text loader tests
475

    
476
prop_Text_Load_Instance name mem dsk vcpus status
477
                        (NonEmpty pnode) snode
478
                        (NonNegative pdx) (NonNegative sdx) autobal =
479
    pnode /= snode && pdx /= sdx ==>
480
    let vcpus_s = show vcpus
481
        dsk_s = show dsk
482
        mem_s = show mem
483
        ndx = if null snode
484
              then [(pnode, pdx)]
485
              else [(pnode, pdx), (snode, sdx)]
486
        nl = Data.Map.fromList ndx
487
        tags = ""
488
        sbal = if autobal then "Y" else "N"
489
        inst = Text.loadInst nl
490
               [name, mem_s, dsk_s, vcpus_s, status,
491
                sbal, pnode, snode, tags]:: Maybe (String, Instance.Instance)
492
        fail1 = Text.loadInst nl
493
               [name, mem_s, dsk_s, vcpus_s, status,
494
                sbal, pnode, pnode, tags]:: Maybe (String, Instance.Instance)
495
        _types = ( name::String, mem::Int, dsk::Int
496
                 , vcpus::Int, status::String
497
                 , snode::String
498
                 , autobal::Bool)
499
    in
500
      case inst of
501
        Nothing -> False
502
        Just (_, i) ->
503
            Instance.name i == name &&
504
            Instance.vcpus i == vcpus &&
505
            Instance.mem i == mem &&
506
            Instance.pNode i == pdx &&
507
            Instance.sNode i == (if null snode
508
                                 then Node.noSecondary
509
                                 else sdx) &&
510
            Instance.auto_balance i == autobal &&
511
            isNothing fail1
512

    
513
prop_Text_Load_InstanceFail ktn fields =
514
    length fields /= 9 ==>
515
    case Text.loadInst nl fields of
516
      Types.Ok _ -> False
517
      Types.Bad msg -> "Invalid/incomplete instance data: '" `isPrefixOf` msg
518
    where nl = Data.Map.fromList ktn
519

    
520
prop_Text_Load_Node name tm nm fm td fd tc fo =
521
    let conv v = if v < 0
522
                    then "?"
523
                    else show v
524
        tm_s = conv tm
525
        nm_s = conv nm
526
        fm_s = conv fm
527
        td_s = conv td
528
        fd_s = conv fd
529
        tc_s = conv tc
530
        fo_s = if fo
531
               then "Y"
532
               else "N"
533
        any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
534
        gid = Group.uuid defGroup
535
    in case Text.loadNode defGroupAssoc
536
           [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
537
         Nothing -> False
538
         Just (name', node) ->
539
             if fo || any_broken
540
             then Node.offline node
541
             else Node.name node == name' && name' == name &&
542
                  Node.alias node == name &&
543
                  Node.tMem node == fromIntegral tm &&
544
                  Node.nMem node == nm &&
545
                  Node.fMem node == fm &&
546
                  Node.tDsk node == fromIntegral td &&
547
                  Node.fDsk node == fd &&
548
                  Node.tCpu node == fromIntegral tc
549

    
550
prop_Text_Load_NodeFail fields =
551
    length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
552

    
553
prop_Text_NodeLSIdempotent node =
554
    (Text.loadNode defGroupAssoc.
555
         Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
556
    Just (Node.name n, n)
557
    -- override failN1 to what loadNode returns by default
558
    where n = node { Node.failN1 = True, Node.offline = False }
559

    
560
testText =
561
    [ run prop_Text_Load_Instance
562
    , run prop_Text_Load_InstanceFail
563
    , run prop_Text_Load_Node
564
    , run prop_Text_Load_NodeFail
565
    , run prop_Text_NodeLSIdempotent
566
    ]
567

    
568
-- Node tests
569

    
570
prop_Node_setAlias node name =
571
    Node.name newnode == Node.name node &&
572
    Node.alias newnode == name
573
    where _types = (node::Node.Node, name::String)
574
          newnode = Node.setAlias node name
575

    
576
prop_Node_setOffline node status =
577
    Node.offline newnode == status
578
    where newnode = Node.setOffline node status
579

    
580
prop_Node_setXmem node xm =
581
    Node.xMem newnode == xm
582
    where newnode = Node.setXmem node xm
583

    
584
prop_Node_setMcpu node mc =
585
    Node.mCpu newnode == mc
586
    where newnode = Node.setMcpu node mc
587

    
588
-- | Check that an instance add with too high memory or disk will be rejected
589
prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
590
                               not (Node.failN1 node)
591
                               ==>
592
                               case Node.addPri node inst'' of
593
                                 Types.OpFail Types.FailMem -> True
594
                                 _ -> False
595
    where _types = (node::Node.Node, inst::Instance.Instance)
596
          inst' = setInstanceSmallerThanNode node inst
597
          inst'' = inst' { Instance.mem = Instance.mem inst }
598

    
599
prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
600
                               not (Node.failN1 node)
601
                               ==>
602
                               case Node.addPri node inst'' of
603
                                 Types.OpFail Types.FailDisk -> True
604
                                 _ -> False
605
    where _types = (node::Node.Node, inst::Instance.Instance)
606
          inst' = setInstanceSmallerThanNode node inst
607
          inst'' = inst' { Instance.dsk = Instance.dsk inst }
608

    
609
prop_Node_addPriFC node inst (Positive extra) =
610
    not (Node.failN1 node) ==>
611
        case Node.addPri node inst'' of
612
          Types.OpFail Types.FailCPU -> True
613
          _ -> False
614
    where _types = (node::Node.Node, inst::Instance.Instance)
615
          inst' = setInstanceSmallerThanNode node inst
616
          inst'' = inst' { Instance.vcpus = Node.availCpu node + extra }
617

    
618
-- | Check that an instance add with too high memory or disk will be rejected
619
prop_Node_addSec node inst pdx =
620
    (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
621
     Instance.dsk inst >= Node.fDsk node) &&
622
    not (Node.failN1 node)
623
    ==> isFailure (Node.addSec node inst pdx)
624
        where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
625

    
626
-- | Checks for memory reservation changes
627
prop_Node_rMem node inst =
628
    -- ab = auto_balance, nb = non-auto_balance
629
    -- we use -1 as the primary node of the instance
630
    let inst' = inst { Instance.pNode = -1, Instance.auto_balance = True }
631
        inst_ab = setInstanceSmallerThanNode node inst'
632
        inst_nb = inst_ab { Instance.auto_balance = False }
633
        -- now we have the two instances, identical except the
634
        -- auto_balance attribute
635
        orig_rmem = Node.rMem node
636
        inst_idx = Instance.idx inst_ab
637
        node_add_ab = Node.addSec node inst_ab (-1)
638
        node_add_nb = Node.addSec node inst_nb (-1)
639
        node_del_ab = liftM (flip Node.removeSec inst_ab) node_add_ab
640
        node_del_nb = liftM (flip Node.removeSec inst_nb) node_add_nb
641
    in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
642
         (Types.OpGood a_ab, Types.OpGood a_nb,
643
          Types.OpGood d_ab, Types.OpGood d_nb) ->
644
             Node.rMem a_ab >  orig_rmem &&
645
             Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
646
             Node.rMem a_nb == orig_rmem &&
647
             Node.rMem d_ab == orig_rmem &&
648
             Node.rMem d_nb == orig_rmem &&
649
             -- this is not related to rMem, but as good a place to
650
             -- test as any
651
             inst_idx `elem` Node.sList a_ab &&
652
             not (inst_idx `elem` Node.sList d_ab)
653
         _ -> False
654

    
655
newtype SmallRatio = SmallRatio Double deriving Show
656
instance Arbitrary SmallRatio where
657
    arbitrary = do
658
      v <- choose (0, 1)
659
      return $ SmallRatio v
660

    
661
-- | Check mdsk setting
662
prop_Node_setMdsk node mx =
663
    Node.loDsk node' >= 0 &&
664
    fromIntegral (Node.loDsk node') <= Node.tDsk node &&
665
    Node.availDisk node' >= 0 &&
666
    Node.availDisk node' <= Node.fDsk node' &&
667
    fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
668
    Node.mDsk node' == mx'
669
    where _types = (node::Node.Node, mx::SmallRatio)
670
          node' = Node.setMdsk node mx'
671
          SmallRatio mx' = mx
672

    
673
-- Check tag maps
674
prop_Node_tagMaps_idempotent tags =
675
    Node.delTags (Node.addTags m tags) tags == m
676
    where m = Data.Map.empty
677

    
678
prop_Node_tagMaps_reject tags =
679
    not (null tags) ==>
680
    any (\t -> Node.rejectAddTags m [t]) tags
681
    where m = Node.addTags Data.Map.empty tags
682

    
683
prop_Node_showField node =
684
  forAll (elements Node.defaultFields) $ \ field ->
685
  fst (Node.showHeader field) /= Types.unknownField &&
686
  Node.showField node field /= Types.unknownField
687

    
688

    
689
prop_Node_computeGroups nodes =
690
  let ng = Node.computeGroups nodes
691
      onlyuuid = map fst ng
692
  in length nodes == sum (map (length . snd) ng) &&
693
     all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng &&
694
     length (nub onlyuuid) == length onlyuuid &&
695
     (null nodes || not (null ng))
696

    
697
testNode =
698
    [ run prop_Node_setAlias
699
    , run prop_Node_setOffline
700
    , run prop_Node_setMcpu
701
    , run prop_Node_setXmem
702
    , run prop_Node_addPriFM
703
    , run prop_Node_addPriFD
704
    , run prop_Node_addPriFC
705
    , run prop_Node_addSec
706
    , run prop_Node_rMem
707
    , run prop_Node_setMdsk
708
    , run prop_Node_tagMaps_idempotent
709
    , run prop_Node_tagMaps_reject
710
    , run prop_Node_showField
711
    , run prop_Node_computeGroups
712
    ]
713

    
714

    
715
-- Cluster tests
716

    
717
-- | Check that the cluster score is close to zero for a homogeneous cluster
718
prop_Score_Zero node =
719
    forAll (choose (1, 1024)) $ \count ->
720
    (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
721
     (Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
722
    let fn = Node.buildPeers node Container.empty
723
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
724
        nl = Container.fromList nlst
725
        score = Cluster.compCV nl
726
    -- we can't say == 0 here as the floating point errors accumulate;
727
    -- this should be much lower than the default score in CLI.hs
728
    in score <= 1e-12
729

    
730
-- | Check that cluster stats are sane
731
prop_CStats_sane node =
732
    forAll (choose (1, 1024)) $ \count ->
733
    (not (Node.offline node) && not (Node.failN1 node) &&
734
     (Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
735
    let fn = Node.buildPeers node Container.empty
736
        nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
737
        nl = Container.fromList nlst
738
        cstats = Cluster.totalResources nl
739
    in Cluster.csAdsk cstats >= 0 &&
740
       Cluster.csAdsk cstats <= Cluster.csFdsk cstats
741

    
742
-- | Check that one instance is allocated correctly, without
743
-- rebalances needed
744
prop_ClusterAlloc_sane node inst =
745
    forAll (choose (5, 20)) $ \count ->
746
    not (Node.offline node)
747
            && not (Node.failN1 node)
748
            && Node.availDisk node > 0
749
            && Node.availMem node > 0
750
            ==>
751
    let nl = makeSmallCluster node count
752
        il = Container.empty
753
        inst' = setInstanceSmallerThanNode node inst
754
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
755
       Cluster.tryAlloc nl il inst' of
756
         Types.Bad _ -> False
757
         Types.Ok as ->
758
             case Cluster.asSolutions as of
759
               [] -> False
760
               (xnl, xi, _, cv):[] ->
761
                   let il' = Container.add (Instance.idx xi) xi il
762
                       tbl = Cluster.Table xnl il' cv []
763
                   in not (canBalance tbl True True False)
764
               _ -> False
765

    
766
-- | Checks that on a 2-5 node cluster, we can allocate a random
767
-- instance spec via tiered allocation (whatever the original instance
768
-- spec), on either one or two nodes
769
prop_ClusterCanTieredAlloc node inst =
770
    forAll (choose (2, 5)) $ \count ->
771
    forAll (choose (1, 2)) $ \rqnodes ->
772
    not (Node.offline node)
773
            && not (Node.failN1 node)
774
            && isNodeBig node 4
775
            ==>
776
    let nl = makeSmallCluster node count
777
        il = Container.empty
778
        allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
779
    in case allocnodes >>= \allocnodes' ->
780
        Cluster.tieredAlloc nl il inst allocnodes' [] [] of
781
         Types.Bad _ -> False
782
         Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
783
                                      IntMap.size il' == length ixes &&
784
                                      length ixes == length cstats
785

    
786
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
787
-- we can also evacuate it
788
prop_ClusterAllocEvac node inst =
789
    forAll (choose (4, 8)) $ \count ->
790
    not (Node.offline node)
791
            && not (Node.failN1 node)
792
            && isNodeBig node 4
793
            ==>
794
    let nl = makeSmallCluster node count
795
        il = Container.empty
796
        inst' = setInstanceSmallerThanNode node inst
797
    in case Cluster.genAllocNodes defGroupList nl 2 True >>=
798
       Cluster.tryAlloc nl il inst' of
799
         Types.Bad _ -> False
800
         Types.Ok as ->
801
             case Cluster.asSolutions as of
802
               [] -> False
803
               (xnl, xi, _, _):[] ->
804
                   let sdx = Instance.sNode xi
805
                       il' = Container.add (Instance.idx xi) xi il
806
                   in case Cluster.tryEvac xnl il' [Instance.idx xi] [sdx] of
807
                        Just _ -> True
808
                        _ -> False
809
               _ -> False
810

    
811
-- | Check that allocating multiple instances on a cluster, then
812
-- adding an empty node, results in a valid rebalance
813
prop_ClusterAllocBalance =
814
    forAll (genNode (Just 5) (Just 128)) $ \node ->
815
    forAll (choose (3, 5)) $ \count ->
816
    not (Node.offline node) && not (Node.failN1 node) ==>
817
    let nl = makeSmallCluster node count
818
        (hnode, nl') = IntMap.deleteFindMax nl
819
        il = Container.empty
820
        allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
821
        i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
822
    in case allocnodes >>= \allocnodes' ->
823
        Cluster.iterateAlloc nl' il i_templ allocnodes' [] [] of
824
         Types.Bad _ -> False
825
         Types.Ok (_, xnl, il', _, _) ->
826
                   let ynl = Container.add (Node.idx hnode) hnode xnl
827
                       cv = Cluster.compCV ynl
828
                       tbl = Cluster.Table ynl il' cv []
829
                   in canBalance tbl True True False
830

    
831
-- | Checks consistency
832
prop_ClusterCheckConsistency node inst =
833
  let nl = makeSmallCluster node 3
834
      [node1, node2, node3] = Container.elems nl
835
      node3' = node3 { Node.group = 1 }
836
      nl' = Container.add (Node.idx node3') node3' nl
837
      inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2)
838
      inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary
839
      inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3)
840
      ccheck = Cluster.findSplitInstances nl' . Container.fromList
841
  in null (ccheck [(0, inst1)]) &&
842
     null (ccheck [(0, inst2)]) &&
843
     (not . null $ ccheck [(0, inst3)])
844

    
845
-- For now, we only test that we don't lose instances during the split
846
prop_ClusterSplitCluster node inst =
847
  forAll (choose (0, 100)) $ \icnt ->
848
  let nl = makeSmallCluster node 2
849
      (nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1)
850
                   (nl, Container.empty) [1..icnt]
851
      gni = Cluster.splitCluster nl' il'
852
  in sum (map (Container.size . snd . snd) gni) == icnt &&
853
     all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
854
                                 (Container.elems nl'')) gni
855

    
856
testCluster =
857
    [ run prop_Score_Zero
858
    , run prop_CStats_sane
859
    , run prop_ClusterAlloc_sane
860
    , run prop_ClusterCanTieredAlloc
861
    , run prop_ClusterAllocEvac
862
    , run prop_ClusterAllocBalance
863
    , run prop_ClusterCheckConsistency
864
    , run prop_ClusterSplitCluster
865
    ]
866

    
867
-- | Check that opcode serialization is idempotent
868

    
869
prop_OpCodes_serialization op =
870
  case J.readJSON (J.showJSON op) of
871
    J.Error _ -> False
872
    J.Ok op' -> op == op'
873
  where _types = op::OpCodes.OpCode
874

    
875
testOpCodes =
876
  [ run prop_OpCodes_serialization
877
  ]
878

    
879
-- | Check that (queued) job\/opcode status serialization is idempotent
880
prop_OpStatus_serialization os =
881
  case J.readJSON (J.showJSON os) of
882
    J.Error _ -> False
883
    J.Ok os' -> os == os'
884
  where _types = os::Jobs.OpStatus
885

    
886
prop_JobStatus_serialization js =
887
  case J.readJSON (J.showJSON js) of
888
    J.Error _ -> False
889
    J.Ok js' -> js == js'
890
  where _types = js::Jobs.JobStatus
891

    
892
testJobs =
893
  [ run prop_OpStatus_serialization
894
  , run prop_JobStatus_serialization
895
  ]
896

    
897
-- | Loader tests
898

    
899
prop_Loader_lookupNode ktn inst node =
900
  Loader.lookupNode nl inst node == Data.Map.lookup node nl
901
  where nl = Data.Map.fromList ktn
902

    
903
prop_Loader_lookupInstance kti inst =
904
  Loader.lookupInstance il inst == Data.Map.lookup inst il
905
  where il = Data.Map.fromList kti
906

    
907
prop_Loader_assignIndices nodes =
908
  Data.Map.size nassoc == length nodes &&
909
  Container.size kt == length nodes &&
910
  (if not (null nodes)
911
   then maximum (IntMap.keys kt) == length nodes - 1
912
   else True)
913
  where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
914

    
915

    
916
-- | Checks that the number of primary instances recorded on the nodes
917
-- is zero
918
prop_Loader_mergeData ns =
919
  let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
920
  in case Loader.mergeData [] [] [] []
921
         (Loader.emptyCluster {Loader.cdNodes = na}) of
922
    Types.Bad _ -> False
923
    Types.Ok (Loader.ClusterData _ nl il _) ->
924
      let nodes = Container.elems nl
925
          instances = Container.elems il
926
      in (sum . map (length . Node.pList)) nodes == 0 &&
927
         null instances
928

    
929
testLoader =
930
  [ run prop_Loader_lookupNode
931
  , run prop_Loader_lookupInstance
932
  , run prop_Loader_assignIndices
933
  , run prop_Loader_mergeData
934
  ]