## root / htools / Ganeti / HTools / QC.hs @ 8d66f58a

History | View | Annotate | Download (30.1 kB)

1 |
{-| Unittests for ganeti-htools |
---|---|

2 | |

3 |
-} |

4 | |

5 |
{- |

6 | |

7 |
Copyright (C) 2009, 2010, 2011 Google Inc. |

8 | |

9 |
This program is free software; you can redistribute it and/or modify |

10 |
it under the terms of the GNU General Public License as published by |

11 |
the Free Software Foundation; either version 2 of the License, or |

12 |
(at your option) any later version. |

13 | |

14 |
This program is distributed in the hope that it will be useful, but |

15 |
WITHOUT ANY WARRANTY; without even the implied warranty of |

16 |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |

17 |
General Public License for more details. |

18 | |

19 |
You should have received a copy of the GNU General Public License |

20 |
along with this program; if not, write to the Free Software |

21 |
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |

22 |
02110-1301, USA. |

23 | |

24 |
-} |

25 | |

26 |
module Ganeti.HTools.QC |

27 |
( testUtils |

28 |
, testPeerMap |

29 |
, testContainer |

30 |
, testInstance |

31 |
, testNode |

32 |
, testText |

33 |
, testOpCodes |

34 |
, testJobs |

35 |
, testCluster |

36 |
, testLoader |

37 |
) where |

38 | |

39 |
import Test.QuickCheck |

40 |
import Test.QuickCheck.Batch |

41 |
import Data.List (findIndex, intercalate, nub) |

42 |
import Data.Maybe |

43 |
import Control.Monad |

44 |
import qualified Text.JSON as J |

45 |
import qualified Data.Map |

46 |
import qualified Data.IntMap as IntMap |

47 |
import qualified Ganeti.OpCodes as OpCodes |

48 |
import qualified Ganeti.Jobs as Jobs |

49 |
import qualified Ganeti.Luxi |

50 |
import qualified Ganeti.HTools.CLI as CLI |

51 |
import qualified Ganeti.HTools.Cluster as Cluster |

52 |
import qualified Ganeti.HTools.Container as Container |

53 |
import qualified Ganeti.HTools.ExtLoader |

54 |
import qualified Ganeti.HTools.IAlloc as IAlloc |

55 |
import qualified Ganeti.HTools.Instance as Instance |

56 |
import qualified Ganeti.HTools.Loader as Loader |

57 |
import qualified Ganeti.HTools.Luxi |

58 |
import qualified Ganeti.HTools.Node as Node |

59 |
import qualified Ganeti.HTools.Group as Group |

60 |
import qualified Ganeti.HTools.PeerMap as PeerMap |

61 |
import qualified Ganeti.HTools.Rapi |

62 |
import qualified Ganeti.HTools.Simu |

63 |
import qualified Ganeti.HTools.Text as Text |

64 |
import qualified Ganeti.HTools.Types as Types |

65 |
import qualified Ganeti.HTools.Utils as Utils |

66 |
import qualified Ganeti.HTools.Version |

67 | |

68 |
-- * Constants |

69 | |

70 |
-- | Maximum memory (1TiB, somewhat random value) |

71 |
maxMem :: Int |

72 |
maxMem = 1024 * 1024 |

73 | |

74 |
-- | Maximum disk (8TiB, somewhat random value) |

75 |
maxDsk :: Int |

76 |
maxDsk = 1024 * 1024 * 8 |

77 | |

78 |
-- | Max CPUs (1024, somewhat random value) |

79 |
maxCpu :: Int |

80 |
maxCpu = 1024 |

81 | |

82 |
defGroup :: Group.Group |

83 |
defGroup = flip Group.setIdx 0 $ |

84 |
Group.create "default" Utils.defaultGroupID |

85 |
Types.AllocPreferred |

86 | |

87 |
defGroupList :: Group.List |

88 |
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)] |

89 | |

90 |
defGroupAssoc :: Data.Map.Map String Types.Gdx |

91 |
defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup) |

92 | |

93 |
-- * Helper functions |

94 | |

95 |
-- | Simple checker for whether OpResult is fail or pass |

96 |
isFailure :: Types.OpResult a -> Bool |

97 |
isFailure (Types.OpFail _) = True |

98 |
isFailure _ = False |

99 | |

100 |
-- | Update an instance to be smaller than a node |

101 |
setInstanceSmallerThanNode node inst = |

102 |
inst { Instance.mem = Node.availMem node `div` 2 |

103 |
, Instance.dsk = Node.availDisk node `div` 2 |

104 |
, Instance.vcpus = Node.availCpu node `div` 2 |

105 |
} |

106 | |

107 |
-- | Create an instance given its spec |

108 |
createInstance mem dsk vcpus = |

109 |
Instance.create "inst-unnamed" mem dsk vcpus "running" [] (-1) (-1) |

110 | |

111 |
-- | Create a small cluster by repeating a node spec |

112 |
makeSmallCluster :: Node.Node -> Int -> Node.List |

113 |
makeSmallCluster node count = |

114 |
let fn = Node.buildPeers node Container.empty |

115 |
namelst = map (\n -> (Node.name n, n)) (replicate count fn) |

116 |
(_, nlst) = Loader.assignIndices namelst |

117 |
in nlst |

118 | |

119 |
-- | Checks if a node is "big" enough |

120 |
isNodeBig :: Node.Node -> Int -> Bool |

121 |
isNodeBig node size = Node.availDisk node > size * Types.unitDsk |

122 |
&& Node.availMem node > size * Types.unitMem |

123 |
&& Node.availCpu node > size * Types.unitCpu |

124 | |

125 |
canBalance :: Cluster.Table -> Bool -> Bool -> Bool |

126 |
canBalance tbl dm evac = isJust $ Cluster.tryBalance tbl dm evac 0 0 |

127 | |

128 |
-- | Assigns a new fresh instance to a cluster; this is not |

129 |
-- allocation, so no resource checks are done |

130 |
assignInstance :: Node.List -> Instance.List -> Instance.Instance -> |

131 |
Types.Idx -> Types.Idx -> |

132 |
(Node.List, Instance.List) |

133 |
assignInstance nl il inst pdx sdx = |

134 |
let pnode = Container.find pdx nl |

135 |
snode = Container.find sdx nl |

136 |
maxiidx = if Container.null il |

137 |
then 0 |

138 |
else fst (Container.findMax il) + 1 |

139 |
inst' = inst { Instance.idx = maxiidx, |

140 |
Instance.pNode = pdx, Instance.sNode = sdx } |

141 |
pnode' = Node.setPri pnode inst' |

142 |
snode' = Node.setSec snode inst' |

143 |
nl' = Container.addTwo pdx pnode' sdx snode' nl |

144 |
il' = Container.add maxiidx inst' il |

145 |
in (nl', il') |

146 | |

147 |
-- * Arbitrary instances |

148 | |

149 |
-- copied from the introduction to quickcheck |

150 |
instance Arbitrary Char where |

151 |
arbitrary = choose ('\32', '\128') |

152 | |

153 |
newtype DNSChar = DNSChar { dnsGetChar::Char } |

154 |
instance Arbitrary DNSChar where |

155 |
arbitrary = do |

156 |
x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-") |

157 |
return (DNSChar x) |

158 | |

159 |
getName :: Gen String |

160 |
getName = do |

161 |
n <- choose (1, 64) |

162 |
dn <- vector n::Gen [DNSChar] |

163 |
return (map dnsGetChar dn) |

164 | |

165 | |

166 |
getFQDN :: Gen String |

167 |
getFQDN = do |

168 |
felem <- getName |

169 |
ncomps <- choose (1, 4) |

170 |
frest <- vector ncomps::Gen [[DNSChar]] |

171 |
let frest' = map (map dnsGetChar) frest |

172 |
return (felem ++ "." ++ intercalate "." frest') |

173 | |

174 |
-- let's generate a random instance |

175 |
instance Arbitrary Instance.Instance where |

176 |
arbitrary = do |

177 |
name <- getFQDN |

178 |
mem <- choose (0, maxMem) |

179 |
dsk <- choose (0, maxDsk) |

180 |
run_st <- elements ["ERROR_up", "ERROR_down", "ADMIN_down" |

181 |
, "ERROR_nodedown", "ERROR_nodeoffline" |

182 |
, "running" |

183 |
, "no_such_status1", "no_such_status2"] |

184 |
pn <- arbitrary |

185 |
sn <- arbitrary |

186 |
vcpus <- choose (0, maxCpu) |

187 |
return $ Instance.create name mem dsk vcpus run_st [] pn sn |

188 | |

189 |
-- and a random node |

190 |
instance Arbitrary Node.Node where |

191 |
arbitrary = do |

192 |
name <- getFQDN |

193 |
mem_t <- choose (0, maxMem) |

194 |
mem_f <- choose (0, mem_t) |

195 |
mem_n <- choose (0, mem_t - mem_f) |

196 |
dsk_t <- choose (0, maxDsk) |

197 |
dsk_f <- choose (0, dsk_t) |

198 |
cpu_t <- choose (0, maxCpu) |

199 |
offl <- arbitrary |

200 |
let n = Node.create name (fromIntegral mem_t) mem_n mem_f |

201 |
(fromIntegral dsk_t) dsk_f (fromIntegral cpu_t) offl |

202 |
0 |

203 |
n' = Node.buildPeers n Container.empty |

204 |
return n' |

205 | |

206 |
-- replace disks |

207 |
instance Arbitrary OpCodes.ReplaceDisksMode where |

208 |
arbitrary = elements [ OpCodes.ReplaceOnPrimary |

209 |
, OpCodes.ReplaceOnSecondary |

210 |
, OpCodes.ReplaceNewSecondary |

211 |
, OpCodes.ReplaceAuto |

212 |
] |

213 | |

214 |
instance Arbitrary OpCodes.OpCode where |

215 |
arbitrary = do |

216 |
op_id <- elements [ "OP_TEST_DELAY" |

217 |
, "OP_INSTANCE_REPLACE_DISKS" |

218 |
, "OP_INSTANCE_FAILOVER" |

219 |
, "OP_INSTANCE_MIGRATE" |

220 |
] |

221 |
(case op_id of |

222 |
"OP_TEST_DELAY" -> |

223 |
liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary |

224 |
"OP_INSTANCE_REPLACE_DISKS" -> |

225 |
liftM5 OpCodes.OpReplaceDisks arbitrary arbitrary |

226 |
arbitrary arbitrary arbitrary |

227 |
"OP_INSTANCE_FAILOVER" -> |

228 |
liftM2 OpCodes.OpFailoverInstance arbitrary arbitrary |

229 |
"OP_INSTANCE_MIGRATE" -> |

230 |
liftM4 OpCodes.OpMigrateInstance arbitrary arbitrary arbitrary |

231 |
arbitrary |

232 |
_ -> fail "Wrong opcode") |

233 | |

234 |
instance Arbitrary Jobs.OpStatus where |

235 |
arbitrary = elements [minBound..maxBound] |

236 | |

237 |
instance Arbitrary Jobs.JobStatus where |

238 |
arbitrary = elements [minBound..maxBound] |

239 | |

240 |
-- * Actual tests |

241 | |

242 |
-- If the list is not just an empty element, and if the elements do |

243 |
-- not contain commas, then join+split should be idepotent |

244 |
prop_Utils_commaJoinSplit lst = lst /= [""] && |

245 |
all (not . elem ',') lst ==> |

246 |
Utils.sepSplit ',' (Utils.commaJoin lst) == lst |

247 |
-- Split and join should always be idempotent |

248 |
prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s |

249 | |

250 |
testUtils = |

251 |
[ run prop_Utils_commaJoinSplit |

252 |
, run prop_Utils_commaSplitJoin |

253 |
] |

254 | |

255 |
-- | Make sure add is idempotent |

256 |
prop_PeerMap_addIdempotent pmap key em = |

257 |
fn puniq == fn (fn puniq) |

258 |
where _types = (pmap::PeerMap.PeerMap, |

259 |
key::PeerMap.Key, em::PeerMap.Elem) |

260 |
fn = PeerMap.add key em |

261 |
puniq = PeerMap.accumArray const pmap |

262 | |

263 |
-- | Make sure remove is idempotent |

264 |
prop_PeerMap_removeIdempotent pmap key = |

265 |
fn puniq == fn (fn puniq) |

266 |
where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key) |

267 |
fn = PeerMap.remove key |

268 |
puniq = PeerMap.accumArray const pmap |

269 | |

270 |
-- | Make sure a missing item returns 0 |

271 |
prop_PeerMap_findMissing pmap key = |

272 |
PeerMap.find key (PeerMap.remove key puniq) == 0 |

273 |
where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key) |

274 |
puniq = PeerMap.accumArray const pmap |

275 | |

276 |
-- | Make sure an added item is found |

277 |
prop_PeerMap_addFind pmap key em = |

278 |
PeerMap.find key (PeerMap.add key em puniq) == em |

279 |
where _types = (pmap::PeerMap.PeerMap, |

280 |
key::PeerMap.Key, em::PeerMap.Elem) |

281 |
puniq = PeerMap.accumArray const pmap |

282 | |

283 |
-- | Manual check that maxElem returns the maximum indeed, or 0 for null |

284 |
prop_PeerMap_maxElem pmap = |

285 |
PeerMap.maxElem puniq == if null puniq then 0 |

286 |
else (maximum . snd . unzip) puniq |

287 |
where _types = pmap::PeerMap.PeerMap |

288 |
puniq = PeerMap.accumArray const pmap |

289 | |

290 |
testPeerMap = |

291 |
[ run prop_PeerMap_addIdempotent |

292 |
, run prop_PeerMap_removeIdempotent |

293 |
, run prop_PeerMap_maxElem |

294 |
, run prop_PeerMap_addFind |

295 |
, run prop_PeerMap_findMissing |

296 |
] |

297 | |

298 |
-- Container tests |

299 | |

300 |
prop_Container_addTwo cdata i1 i2 = |

301 |
fn i1 i2 cont == fn i2 i1 cont && |

302 |
fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont) |

303 |
where _types = (cdata::[Int], |

304 |
i1::Int, i2::Int) |

305 |
cont = foldl (\c x -> Container.add x x c) Container.empty cdata |

306 |
fn x1 x2 = Container.addTwo x1 x1 x2 x2 |

307 | |

308 |
prop_Container_nameOf node = |

309 |
let nl = makeSmallCluster node 1 |

310 |
fnode = head (Container.elems nl) |

311 |
in Container.nameOf nl (Node.idx fnode) == Node.name fnode |

312 | |

313 |
-- We test that in a cluster, given a random node, we can find it by |

314 |
-- its name and alias, as long as all names and aliases are unique, |

315 |
-- and that we fail to find a non-existing name |

316 |
prop_Container_findByName node othername = |

317 |
forAll (choose (1, 20)) $ \ cnt -> |

318 |
forAll (choose (0, cnt - 1)) $ \ fidx -> |

319 |
forAll (vector cnt) $ \ names -> |

320 |
(length . nub) (map fst names ++ map snd names) == |

321 |
length names * 2 && |

322 |
not (othername `elem` (map fst names ++ map snd names)) ==> |

323 |
let nl = makeSmallCluster node cnt |

324 |
nodes = Container.elems nl |

325 |
nodes' = map (\((name, alias), nn) -> (Node.idx nn, |

326 |
nn { Node.name = name, |

327 |
Node.alias = alias })) |

328 |
$ zip names nodes |

329 |
nl' = Container.fromList nodes' |

330 |
target = snd (nodes' !! fidx) |

331 |
in Container.findByName nl' (Node.name target) == Just target && |

332 |
Container.findByName nl' (Node.alias target) == Just target && |

333 |
Container.findByName nl' othername == Nothing |

334 | |

335 |
testContainer = |

336 |
[ run prop_Container_addTwo |

337 |
, run prop_Container_nameOf |

338 |
, run prop_Container_findByName |

339 |
] |

340 | |

341 |
-- Simple instance tests, we only have setter/getters |

342 | |

343 |
prop_Instance_creat inst = |

344 |
Instance.name inst == Instance.alias inst |

345 | |

346 |
prop_Instance_setIdx inst idx = |

347 |
Instance.idx (Instance.setIdx inst idx) == idx |

348 |
where _types = (inst::Instance.Instance, idx::Types.Idx) |

349 | |

350 |
prop_Instance_setName inst name = |

351 |
Instance.name newinst == name && |

352 |
Instance.alias newinst == name |

353 |
where _types = (inst::Instance.Instance, name::String) |

354 |
newinst = Instance.setName inst name |

355 | |

356 |
prop_Instance_setAlias inst name = |

357 |
Instance.name newinst == Instance.name inst && |

358 |
Instance.alias newinst == name |

359 |
where _types = (inst::Instance.Instance, name::String) |

360 |
newinst = Instance.setAlias inst name |

361 | |

362 |
prop_Instance_setPri inst pdx = |

363 |
Instance.pNode (Instance.setPri inst pdx) == pdx |

364 |
where _types = (inst::Instance.Instance, pdx::Types.Ndx) |

365 | |

366 |
prop_Instance_setSec inst sdx = |

367 |
Instance.sNode (Instance.setSec inst sdx) == sdx |

368 |
where _types = (inst::Instance.Instance, sdx::Types.Ndx) |

369 | |

370 |
prop_Instance_setBoth inst pdx sdx = |

371 |
Instance.pNode si == pdx && Instance.sNode si == sdx |

372 |
where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx) |

373 |
si = Instance.setBoth inst pdx sdx |

374 | |

375 |
prop_Instance_runStatus_True inst = |

376 |
let run_st = Instance.running inst |

377 |
run_tx = Instance.runSt inst |

378 |
in |

379 |
run_tx `elem` Instance.runningStates ==> run_st |

380 | |

381 |
prop_Instance_runStatus_False inst = |

382 |
let run_st = Instance.running inst |

383 |
run_tx = Instance.runSt inst |

384 |
in |

385 |
run_tx `notElem` Instance.runningStates ==> not run_st |

386 | |

387 |
prop_Instance_shrinkMG inst = |

388 |
Instance.mem inst >= 2 * Types.unitMem ==> |

389 |
case Instance.shrinkByType inst Types.FailMem of |

390 |
Types.Ok inst' -> |

391 |
Instance.mem inst' == Instance.mem inst - Types.unitMem |

392 |
_ -> False |

393 | |

394 |
prop_Instance_shrinkMF inst = |

395 |
Instance.mem inst < 2 * Types.unitMem ==> |

396 |
Types.isBad $ Instance.shrinkByType inst Types.FailMem |

397 | |

398 |
prop_Instance_shrinkCG inst = |

399 |
Instance.vcpus inst >= 2 * Types.unitCpu ==> |

400 |
case Instance.shrinkByType inst Types.FailCPU of |

401 |
Types.Ok inst' -> |

402 |
Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu |

403 |
_ -> False |

404 | |

405 |
prop_Instance_shrinkCF inst = |

406 |
Instance.vcpus inst < 2 * Types.unitCpu ==> |

407 |
Types.isBad $ Instance.shrinkByType inst Types.FailCPU |

408 | |

409 |
prop_Instance_shrinkDG inst = |

410 |
Instance.dsk inst >= 2 * Types.unitDsk ==> |

411 |
case Instance.shrinkByType inst Types.FailDisk of |

412 |
Types.Ok inst' -> |

413 |
Instance.dsk inst' == Instance.dsk inst - Types.unitDsk |

414 |
_ -> False |

415 | |

416 |
prop_Instance_shrinkDF inst = |

417 |
Instance.dsk inst < 2 * Types.unitDsk ==> |

418 |
Types.isBad $ Instance.shrinkByType inst Types.FailDisk |

419 | |

420 |
prop_Instance_setMovable inst m = |

421 |
Instance.movable inst' == m |

422 |
where inst' = Instance.setMovable inst m |

423 | |

424 |
testInstance = |

425 |
[ run prop_Instance_creat |

426 |
, run prop_Instance_setIdx |

427 |
, run prop_Instance_setName |

428 |
, run prop_Instance_setAlias |

429 |
, run prop_Instance_setPri |

430 |
, run prop_Instance_setSec |

431 |
, run prop_Instance_setBoth |

432 |
, run prop_Instance_runStatus_True |

433 |
, run prop_Instance_runStatus_False |

434 |
, run prop_Instance_shrinkMG |

435 |
, run prop_Instance_shrinkMF |

436 |
, run prop_Instance_shrinkCG |

437 |
, run prop_Instance_shrinkCF |

438 |
, run prop_Instance_shrinkDG |

439 |
, run prop_Instance_shrinkDF |

440 |
, run prop_Instance_setMovable |

441 |
] |

442 | |

443 |
-- Instance text loader tests |

444 | |

445 |
prop_Text_Load_Instance name mem dsk vcpus status pnode snode pdx sdx = |

446 |
not (null pnode) && pdx >= 0 && sdx >= 0 ==> |

447 |
let vcpus_s = show vcpus |

448 |
dsk_s = show dsk |

449 |
mem_s = show mem |

450 |
rsdx = if pdx == sdx |

451 |
then sdx + 1 |

452 |
else sdx |

453 |
ndx = if null snode |

454 |
then [(pnode, pdx)] |

455 |
else [(pnode, pdx), (snode, rsdx)] |

456 |
nl = Data.Map.fromList ndx |

457 |
tags = "" |

458 |
inst = Text.loadInst nl |

459 |
[name, mem_s, dsk_s, vcpus_s, status, pnode, snode, tags]:: |

460 |
Maybe (String, Instance.Instance) |

461 |
fail1 = Text.loadInst nl |

462 |
[name, mem_s, dsk_s, vcpus_s, status, pnode, pnode, tags]:: |

463 |
Maybe (String, Instance.Instance) |

464 |
_types = ( name::String, mem::Int, dsk::Int |

465 |
, vcpus::Int, status::String |

466 |
, pnode::String, snode::String |

467 |
, pdx::Types.Ndx, sdx::Types.Ndx) |

468 |
in |

469 |
case inst of |

470 |
Nothing -> False |

471 |
Just (_, i) -> |

472 |
(Instance.name i == name && |

473 |
Instance.vcpus i == vcpus && |

474 |
Instance.mem i == mem && |

475 |
Instance.pNode i == pdx && |

476 |
Instance.sNode i == (if null snode |

477 |
then Node.noSecondary |

478 |
else rsdx) && |

479 |
isNothing fail1) |

480 | |

481 |
prop_Text_Load_InstanceFail ktn fields = |

482 |
length fields /= 8 ==> isNothing $ Text.loadInst nl fields |

483 |
where nl = Data.Map.fromList ktn |

484 | |

485 |
prop_Text_Load_Node name tm nm fm td fd tc fo = |

486 |
let conv v = if v < 0 |

487 |
then "?" |

488 |
else show v |

489 |
tm_s = conv tm |

490 |
nm_s = conv nm |

491 |
fm_s = conv fm |

492 |
td_s = conv td |

493 |
fd_s = conv fd |

494 |
tc_s = conv tc |

495 |
fo_s = if fo |

496 |
then "Y" |

497 |
else "N" |

498 |
any_broken = any (< 0) [tm, nm, fm, td, fd, tc] |

499 |
gid = Group.uuid defGroup |

500 |
in case Text.loadNode defGroupAssoc |

501 |
[name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of |

502 |
Nothing -> False |

503 |
Just (name', node) -> |

504 |
if fo || any_broken |

505 |
then Node.offline node |

506 |
else Node.name node == name' && name' == name && |

507 |
Node.alias node == name && |

508 |
Node.tMem node == fromIntegral tm && |

509 |
Node.nMem node == nm && |

510 |
Node.fMem node == fm && |

511 |
Node.tDsk node == fromIntegral td && |

512 |
Node.fDsk node == fd && |

513 |
Node.tCpu node == fromIntegral tc |

514 | |

515 |
prop_Text_Load_NodeFail fields = |

516 |
length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields |

517 | |

518 |
prop_Text_NodeLSIdempotent node = |

519 |
(Text.loadNode defGroupAssoc. |

520 |
Utils.sepSplit '|' . Text.serializeNode defGroupList) n == |

521 |
Just (Node.name n, n) |

522 |
-- override failN1 to what loadNode returns by default |

523 |
where n = node { Node.failN1 = True, Node.offline = False } |

524 | |

525 |
testText = |

526 |
[ run prop_Text_Load_Instance |

527 |
, run prop_Text_Load_InstanceFail |

528 |
, run prop_Text_Load_Node |

529 |
, run prop_Text_Load_NodeFail |

530 |
, run prop_Text_NodeLSIdempotent |

531 |
] |

532 | |

533 |
-- Node tests |

534 | |

535 |
prop_Node_setAlias node name = |

536 |
Node.name newnode == Node.name node && |

537 |
Node.alias newnode == name |

538 |
where _types = (node::Node.Node, name::String) |

539 |
newnode = Node.setAlias node name |

540 | |

541 |
prop_Node_setOffline node status = |

542 |
Node.offline newnode == status |

543 |
where newnode = Node.setOffline node status |

544 | |

545 |
prop_Node_setXmem node xm = |

546 |
Node.xMem newnode == xm |

547 |
where newnode = Node.setXmem node xm |

548 | |

549 |
prop_Node_setMcpu node mc = |

550 |
Node.mCpu newnode == mc |

551 |
where newnode = Node.setMcpu node mc |

552 | |

553 |
-- | Check that an instance add with too high memory or disk will be rejected |

554 |
prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node && |

555 |
not (Node.failN1 node) |

556 |
==> |

557 |
case Node.addPri node inst'' of |

558 |
Types.OpFail Types.FailMem -> True |

559 |
_ -> False |

560 |
where _types = (node::Node.Node, inst::Instance.Instance) |

561 |
inst' = setInstanceSmallerThanNode node inst |

562 |
inst'' = inst' { Instance.mem = Instance.mem inst } |

563 | |

564 |
prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node && |

565 |
not (Node.failN1 node) |

566 |
==> |

567 |
case Node.addPri node inst'' of |

568 |
Types.OpFail Types.FailDisk -> True |

569 |
_ -> False |

570 |
where _types = (node::Node.Node, inst::Instance.Instance) |

571 |
inst' = setInstanceSmallerThanNode node inst |

572 |
inst'' = inst' { Instance.dsk = Instance.dsk inst } |

573 | |

574 |
prop_Node_addPriFC node inst = Instance.vcpus inst > Node.availCpu node && |

575 |
not (Node.failN1 node) |

576 |
==> |

577 |
case Node.addPri node inst'' of |

578 |
Types.OpFail Types.FailCPU -> True |

579 |
_ -> False |

580 |
where _types = (node::Node.Node, inst::Instance.Instance) |

581 |
inst' = setInstanceSmallerThanNode node inst |

582 |
inst'' = inst' { Instance.vcpus = Instance.vcpus inst } |

583 | |

584 |
-- | Check that an instance add with too high memory or disk will be rejected |

585 |
prop_Node_addSec node inst pdx = |

586 |
(Instance.mem inst >= (Node.fMem node - Node.rMem node) || |

587 |
Instance.dsk inst >= Node.fDsk node) && |

588 |
not (Node.failN1 node) |

589 |
==> isFailure (Node.addSec node inst pdx) |

590 |
where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int) |

591 | |

592 |
newtype SmallRatio = SmallRatio Double deriving Show |

593 |
instance Arbitrary SmallRatio where |

594 |
arbitrary = do |

595 |
v <- choose (0, 1) |

596 |
return $ SmallRatio v |

597 | |

598 |
-- | Check mdsk setting |

599 |
prop_Node_setMdsk node mx = |

600 |
Node.loDsk node' >= 0 && |

601 |
fromIntegral (Node.loDsk node') <= Node.tDsk node && |

602 |
Node.availDisk node' >= 0 && |

603 |
Node.availDisk node' <= Node.fDsk node' && |

604 |
fromIntegral (Node.availDisk node') <= Node.tDsk node' && |

605 |
Node.mDsk node' == mx' |

606 |
where _types = (node::Node.Node, mx::SmallRatio) |

607 |
node' = Node.setMdsk node mx' |

608 |
SmallRatio mx' = mx |

609 | |

610 |
-- Check tag maps |

611 |
prop_Node_tagMaps_idempotent tags = |

612 |
Node.delTags (Node.addTags m tags) tags == m |

613 |
where m = Data.Map.empty |

614 | |

615 |
prop_Node_tagMaps_reject tags = |

616 |
not (null tags) ==> |

617 |
any (\t -> Node.rejectAddTags m [t]) tags |

618 |
where m = Node.addTags Data.Map.empty tags |

619 | |

620 |
prop_Node_showField node = |

621 |
forAll (elements Node.defaultFields) $ \ field -> |

622 |
fst (Node.showHeader field) /= Types.unknownField && |

623 |
Node.showField node field /= Types.unknownField |

624 | |

625 | |

626 |
prop_Node_computeGroups nodes = |

627 |
let ng = Node.computeGroups nodes |

628 |
onlyuuid = map fst ng |

629 |
in length nodes == sum (map (length . snd) ng) && |

630 |
all (\(guuid, ns) -> all ((== guuid) . Node.group) ns) ng && |

631 |
length (nub onlyuuid) == length onlyuuid && |

632 |
if null nodes then True else not (null ng) |

633 | |

634 |
testNode = |

635 |
[ run prop_Node_setAlias |

636 |
, run prop_Node_setOffline |

637 |
, run prop_Node_setMcpu |

638 |
, run prop_Node_setXmem |

639 |
, run prop_Node_addPriFM |

640 |
, run prop_Node_addPriFD |

641 |
, run prop_Node_addPriFC |

642 |
, run prop_Node_addSec |

643 |
, run prop_Node_setMdsk |

644 |
, run prop_Node_tagMaps_idempotent |

645 |
, run prop_Node_tagMaps_reject |

646 |
, run prop_Node_showField |

647 |
, run prop_Node_computeGroups |

648 |
] |

649 | |

650 | |

651 |
-- Cluster tests |

652 | |

653 |
-- | Check that the cluster score is close to zero for a homogeneous cluster |

654 |
prop_Score_Zero node count = |

655 |
(not (Node.offline node) && not (Node.failN1 node) && (count > 0) && |

656 |
(Node.tDsk node > 0) && (Node.tMem node > 0)) ==> |

657 |
let fn = Node.buildPeers node Container.empty |

658 |
nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)] |

659 |
nl = Container.fromList nlst |

660 |
score = Cluster.compCV nl |

661 |
-- we can't say == 0 here as the floating point errors accumulate; |

662 |
-- this should be much lower than the default score in CLI.hs |

663 |
in score <= 1e-15 |

664 | |

665 |
-- | Check that cluster stats are sane |

666 |
prop_CStats_sane node count = |

667 |
(not (Node.offline node) && not (Node.failN1 node) && (count > 0) && |

668 |
(Node.availDisk node > 0) && (Node.availMem node > 0)) ==> |

669 |
let fn = Node.buildPeers node Container.empty |

670 |
nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)] |

671 |
nl = Container.fromList nlst |

672 |
cstats = Cluster.totalResources nl |

673 |
in Cluster.csAdsk cstats >= 0 && |

674 |
Cluster.csAdsk cstats <= Cluster.csFdsk cstats |

675 | |

676 |
-- | Check that one instance is allocated correctly, without |

677 |
-- rebalances needed |

678 |
prop_ClusterAlloc_sane node inst = |

679 |
forAll (choose (5, 20)) $ \count -> |

680 |
not (Node.offline node) |

681 |
&& not (Node.failN1 node) |

682 |
&& Node.availDisk node > 0 |

683 |
&& Node.availMem node > 0 |

684 |
==> |

685 |
let nl = makeSmallCluster node count |

686 |
il = Container.empty |

687 |
inst' = setInstanceSmallerThanNode node inst |

688 |
in case Cluster.genAllocNodes defGroupList nl 2 True >>= |

689 |
Cluster.tryAlloc nl il inst' of |

690 |
Types.Bad _ -> False |

691 |
Types.Ok as -> |

692 |
case Cluster.asSolutions as of |

693 |
[] -> False |

694 |
(xnl, xi, _, cv):[] -> |

695 |
let il' = Container.add (Instance.idx xi) xi il |

696 |
tbl = Cluster.Table xnl il' cv [] |

697 |
in not (canBalance tbl True False) |

698 |
_ -> False |

699 | |

700 |
-- | Checks that on a 2-5 node cluster, we can allocate a random |

701 |
-- instance spec via tiered allocation (whatever the original instance |

702 |
-- spec), on either one or two nodes |

703 |
prop_ClusterCanTieredAlloc node inst = |

704 |
forAll (choose (2, 5)) $ \count -> |

705 |
forAll (choose (1, 2)) $ \rqnodes -> |

706 |
not (Node.offline node) |

707 |
&& not (Node.failN1 node) |

708 |
&& isNodeBig node 4 |

709 |
==> |

710 |
let nl = makeSmallCluster node count |

711 |
il = Container.empty |

712 |
allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True |

713 |
in case allocnodes >>= \allocnodes' -> |

714 |
Cluster.tieredAlloc nl il inst allocnodes' [] [] of |

715 |
Types.Bad _ -> False |

716 |
Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) && |

717 |
IntMap.size il' == length ixes && |

718 |
length ixes == length cstats |

719 | |

720 |
-- | Checks that on a 4-8 node cluster, once we allocate an instance, |

721 |
-- we can also evacuate it |

722 |
prop_ClusterAllocEvac node inst = |

723 |
forAll (choose (4, 8)) $ \count -> |

724 |
not (Node.offline node) |

725 |
&& not (Node.failN1 node) |

726 |
&& isNodeBig node 4 |

727 |
==> |

728 |
let nl = makeSmallCluster node count |

729 |
il = Container.empty |

730 |
inst' = setInstanceSmallerThanNode node inst |

731 |
in case Cluster.genAllocNodes defGroupList nl 2 True >>= |

732 |
Cluster.tryAlloc nl il inst' of |

733 |
Types.Bad _ -> False |

734 |
Types.Ok as -> |

735 |
case Cluster.asSolutions as of |

736 |
[] -> False |

737 |
(xnl, xi, _, _):[] -> |

738 |
let sdx = Instance.sNode xi |

739 |
il' = Container.add (Instance.idx xi) xi il |

740 |
in case Cluster.tryEvac xnl il' [Instance.idx xi] [sdx] of |

741 |
Just _ -> True |

742 |
_ -> False |

743 |
_ -> False |

744 | |

745 |
-- | Check that allocating multiple instances on a cluster, then |

746 |
-- adding an empty node, results in a valid rebalance |

747 |
prop_ClusterAllocBalance node = |

748 |
forAll (choose (3, 5)) $ \count -> |

749 |
not (Node.offline node) |

750 |
&& not (Node.failN1 node) |

751 |
&& isNodeBig node 4 |

752 |
&& not (isNodeBig node 8) |

753 |
==> |

754 |
let nl = makeSmallCluster node count |

755 |
(hnode, nl') = IntMap.deleteFindMax nl |

756 |
il = Container.empty |

757 |
allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True |

758 |
i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu |

759 |
in case allocnodes >>= \allocnodes' -> |

760 |
Cluster.iterateAlloc nl' il i_templ allocnodes' [] [] of |

761 |
Types.Bad _ -> False |

762 |
Types.Ok (_, xnl, il', _, _) -> |

763 |
let ynl = Container.add (Node.idx hnode) hnode xnl |

764 |
cv = Cluster.compCV ynl |

765 |
tbl = Cluster.Table ynl il' cv [] |

766 |
in canBalance tbl True False |

767 | |

768 |
-- | Checks consistency |

769 |
prop_ClusterCheckConsistency node inst = |

770 |
let nl = makeSmallCluster node 3 |

771 |
[node1, node2, node3] = Container.elems nl |

772 |
node3' = node3 { Node.group = 1 } |

773 |
nl' = Container.add (Node.idx node3') node3' nl |

774 |
inst1 = Instance.setBoth inst (Node.idx node1) (Node.idx node2) |

775 |
inst2 = Instance.setBoth inst (Node.idx node1) Node.noSecondary |

776 |
inst3 = Instance.setBoth inst (Node.idx node1) (Node.idx node3) |

777 |
ccheck = Cluster.findSplitInstances nl' . Container.fromList |

778 |
in null (ccheck [(0, inst1)]) && |

779 |
null (ccheck [(0, inst2)]) && |

780 |
(not . null $ ccheck [(0, inst3)]) |

781 | |

782 |
-- For now, we only test that we don't lose instances during the split |

783 |
prop_ClusterSplitCluster node inst = |

784 |
forAll (choose (0, 100)) $ \icnt -> |

785 |
let nl = makeSmallCluster node 2 |

786 |
(nl', il') = foldl (\(ns, is) _ -> assignInstance ns is inst 0 1) |

787 |
(nl, Container.empty) [1..icnt] |

788 |
gni = Cluster.splitCluster nl' il' |

789 |
in sum (map (Container.size . snd . snd) gni) == icnt && |

790 |
all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group) |

791 |
(Container.elems nl'')) gni |

792 | |

793 |
testCluster = |

794 |
[ run prop_Score_Zero |

795 |
, run prop_CStats_sane |

796 |
, run prop_ClusterAlloc_sane |

797 |
, run prop_ClusterCanTieredAlloc |

798 |
, run prop_ClusterAllocEvac |

799 |
, run prop_ClusterAllocBalance |

800 |
, run prop_ClusterCheckConsistency |

801 |
, run prop_ClusterSplitCluster |

802 |
] |

803 | |

804 |
-- | Check that opcode serialization is idempotent |

805 | |

806 |
prop_OpCodes_serialization op = |

807 |
case J.readJSON (J.showJSON op) of |

808 |
J.Error _ -> False |

809 |
J.Ok op' -> op == op' |

810 |
where _types = op::OpCodes.OpCode |

811 | |

812 |
testOpCodes = |

813 |
[ run prop_OpCodes_serialization |

814 |
] |

815 | |

816 |
-- | Check that (queued) job\/opcode status serialization is idempotent |

817 |
prop_OpStatus_serialization os = |

818 |
case J.readJSON (J.showJSON os) of |

819 |
J.Error _ -> False |

820 |
J.Ok os' -> os == os' |

821 |
where _types = os::Jobs.OpStatus |

822 | |

823 |
prop_JobStatus_serialization js = |

824 |
case J.readJSON (J.showJSON js) of |

825 |
J.Error _ -> False |

826 |
J.Ok js' -> js == js' |

827 |
where _types = js::Jobs.JobStatus |

828 | |

829 |
testJobs = |

830 |
[ run prop_OpStatus_serialization |

831 |
, run prop_JobStatus_serialization |

832 |
] |

833 | |

834 |
-- | Loader tests |

835 | |

836 |
prop_Loader_lookupNode ktn inst node = |

837 |
Loader.lookupNode nl inst node == Data.Map.lookup node nl |

838 |
where nl = Data.Map.fromList ktn |

839 | |

840 |
prop_Loader_lookupInstance kti inst = |

841 |
Loader.lookupInstance il inst == Data.Map.lookup inst il |

842 |
where il = Data.Map.fromList kti |

843 | |

844 |
prop_Loader_assignIndices nodes = |

845 |
Data.Map.size nassoc == length nodes && |

846 |
Container.size kt == length nodes && |

847 |
(if not (null nodes) |

848 |
then maximum (IntMap.keys kt) == length nodes - 1 |

849 |
else True) |

850 |
where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes) |

851 | |

852 | |

853 |
-- | Checks that the number of primary instances recorded on the nodes |

854 |
-- is zero |

855 |
prop_Loader_mergeData ns = |

856 |
let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns |

857 |
in case Loader.mergeData [] [] [] |

858 |
(Loader.emptyCluster {Loader.cdNodes = na}) of |

859 |
Types.Bad _ -> False |

860 |
Types.Ok (Loader.ClusterData _ nl il _) -> |

861 |
let nodes = Container.elems nl |

862 |
instances = Container.elems il |

863 |
in (sum . map (length . Node.pList)) nodes == 0 && |

864 |
null instances |

865 | |

866 |
testLoader = |

867 |
[ run prop_Loader_lookupNode |

868 |
, run prop_Loader_lookupInstance |

869 |
, run prop_Loader_assignIndices |

870 |
, run prop_Loader_mergeData |

871 |
] |