## root / src / Ganeti / HTools / Node.hs @ 0cc3d742

History | View | Annotate | Download (28.7 kB)

1 |
{-| Module describing a node. |
---|---|

2 | |

3 |
All updates are functional (copy-based) and return a new node with |

4 |
updated value. |

5 |
-} |

6 | |

7 |
{- |

8 | |

9 |
Copyright (C) 2009, 2010, 2011, 2012, 2013 Google Inc. |

10 | |

11 |
This program is free software; you can redistribute it and/or modify |

12 |
it under the terms of the GNU General Public License as published by |

13 |
the Free Software Foundation; either version 2 of the License, or |

14 |
(at your option) any later version. |

15 | |

16 |
This program is distributed in the hope that it will be useful, but |

17 |
WITHOUT ANY WARRANTY; without even the implied warranty of |

18 |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |

19 |
General Public License for more details. |

20 | |

21 |
You should have received a copy of the GNU General Public License |

22 |
along with this program; if not, write to the Free Software |

23 |
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |

24 |
02110-1301, USA. |

25 | |

26 |
-} |

27 | |

28 |
module Ganeti.HTools.Node |

29 |
( Node(..) |

30 |
, List |

31 |
-- * Constructor |

32 |
, create |

33 |
-- ** Finalization after data loading |

34 |
, buildPeers |

35 |
, setIdx |

36 |
, setAlias |

37 |
, setOffline |

38 |
, setXmem |

39 |
, setFmem |

40 |
, setPri |

41 |
, setSec |

42 |
, setMaster |

43 |
, setNodeTags |

44 |
, setMdsk |

45 |
, setMcpu |

46 |
, setPolicy |

47 |
-- * Tag maps |

48 |
, addTags |

49 |
, delTags |

50 |
, rejectAddTags |

51 |
-- * Instance (re)location |

52 |
, removePri |

53 |
, removeSec |

54 |
, addPri |

55 |
, addPriEx |

56 |
, addSec |

57 |
, addSecEx |

58 |
-- * Stats |

59 |
, availDisk |

60 |
, availMem |

61 |
, availCpu |

62 |
, iMem |

63 |
, iDsk |

64 |
, conflictingPrimaries |

65 |
-- * Formatting |

66 |
, defaultFields |

67 |
, showHeader |

68 |
, showField |

69 |
, list |

70 |
-- * Misc stuff |

71 |
, AssocList |

72 |
, AllocElement |

73 |
, noSecondary |

74 |
, computeGroups |

75 |
, mkNodeGraph |

76 |
, mkRebootNodeGraph |

77 |
, haveExclStorage |

78 |
) where |

79 | |

80 |
import Control.Monad (liftM, liftM2) |

81 |
import Control.Applicative ((<$>), (<*>)) |

82 |
import qualified Data.Foldable as Foldable |

83 |
import Data.Function (on) |

84 |
import qualified Data.Graph as Graph |

85 |
import qualified Data.IntMap as IntMap |

86 |
import Data.List hiding (group) |

87 |
import qualified Data.Map as Map |

88 |
import Data.Ord (comparing) |

89 |
import Text.Printf (printf) |

90 | |

91 |
import qualified Ganeti.HTools.Container as Container |

92 |
import qualified Ganeti.HTools.Instance as Instance |

93 |
import qualified Ganeti.HTools.PeerMap as P |

94 | |

95 |
import Ganeti.BasicTypes |

96 |
import qualified Ganeti.HTools.Types as T |

97 | |

98 |
-- * Type declarations |

99 | |

100 |
-- | The tag map type. |

101 |
type TagMap = Map.Map String Int |

102 | |

103 |
-- | The node type. |

104 |
data Node = Node |

105 |
{ name :: String -- ^ The node name |

106 |
, alias :: String -- ^ The shortened name (for display purposes) |

107 |
, tMem :: Double -- ^ Total memory (MiB) |

108 |
, nMem :: Int -- ^ Node memory (MiB) |

109 |
, fMem :: Int -- ^ Free memory (MiB) |

110 |
, xMem :: Int -- ^ Unaccounted memory (MiB) |

111 |
, tDsk :: Double -- ^ Total disk space (MiB) |

112 |
, fDsk :: Int -- ^ Free disk space (MiB) |

113 |
, tCpu :: Double -- ^ Total CPU count |

114 |
, uCpu :: Int -- ^ Used VCPU count |

115 |
, tSpindles :: Int -- ^ Node spindles (spindle_count node parameter, |

116 |
-- or actual spindles, see note below) |

117 |
, fSpindles :: Int -- ^ Free spindles (see note below) |

118 |
, pList :: [T.Idx] -- ^ List of primary instance indices |

119 |
, sList :: [T.Idx] -- ^ List of secondary instance indices |

120 |
, idx :: T.Ndx -- ^ Internal index for book-keeping |

121 |
, peers :: P.PeerMap -- ^ Pnode to instance mapping |

122 |
, failN1 :: Bool -- ^ Whether the node has failed n1 |

123 |
, rMem :: Int -- ^ Maximum memory needed for failover by |

124 |
-- primaries of this node |

125 |
, pMem :: Double -- ^ Percent of free memory |

126 |
, pDsk :: Double -- ^ Percent of free disk |

127 |
, pRem :: Double -- ^ Percent of reserved memory |

128 |
, pCpu :: Double -- ^ Ratio of virtual to physical CPUs |

129 |
, mDsk :: Double -- ^ Minimum free disk ratio |

130 |
, loDsk :: Int -- ^ Autocomputed from mDsk low disk |

131 |
-- threshold |

132 |
, hiCpu :: Int -- ^ Autocomputed from mCpu high cpu |

133 |
-- threshold |

134 |
, hiSpindles :: Double -- ^ Limit auto-computed from policy spindle_ratio |

135 |
-- and the node spindle count (see note below) |

136 |
, instSpindles :: Double -- ^ Spindles used by instances (see note below) |

137 |
, offline :: Bool -- ^ Whether the node should not be used for |

138 |
-- allocations and skipped from score |

139 |
-- computations |

140 |
, isMaster :: Bool -- ^ Whether the node is the master node |

141 |
, nTags :: [String] -- ^ The node tags for this node |

142 |
, utilPool :: T.DynUtil -- ^ Total utilisation capacity |

143 |
, utilLoad :: T.DynUtil -- ^ Sum of instance utilisation |

144 |
, pTags :: TagMap -- ^ Primary instance exclusion tags and their count |

145 |
, group :: T.Gdx -- ^ The node's group (index) |

146 |
, iPolicy :: T.IPolicy -- ^ The instance policy (of the node's group) |

147 |
, exclStorage :: Bool -- ^ Effective value of exclusive_storage |

148 |
} deriving (Show, Eq) |

149 |
{- A note on how we handle spindles |

150 | |

151 |
With exclusive storage spindles is a resource, so we track the number of |

152 |
spindles still available (fSpindles). This is the only reliable way, as some |

153 |
spindles could be used outside of Ganeti. When exclusive storage is off, |

154 |
spindles are a way to represent disk I/O pressure, and hence we track the amount |

155 |
used by the instances. We compare it against 'hiSpindles', computed from the |

156 |
instance policy, to avoid policy violations. In both cases we store the total |

157 |
spindles in 'tSpindles'. |

158 |
-} |

159 | |

160 |
instance T.Element Node where |

161 |
nameOf = name |

162 |
idxOf = idx |

163 |
setAlias = setAlias |

164 |
setIdx = setIdx |

165 |
allNames n = [name n, alias n] |

166 | |

167 |
-- | A simple name for the int, node association list. |

168 |
type AssocList = [(T.Ndx, Node)] |

169 | |

170 |
-- | A simple name for a node map. |

171 |
type List = Container.Container Node |

172 | |

173 |
-- | A simple name for an allocation element (here just for logistic |

174 |
-- reasons). |

175 |
type AllocElement = (List, Instance.Instance, [Node], T.Score) |

176 | |

177 |
-- | Constant node index for a non-moveable instance. |

178 |
noSecondary :: T.Ndx |

179 |
noSecondary = -1 |

180 | |

181 |
-- * Helper functions |

182 | |

183 |
-- | Add a tag to a tagmap. |

184 |
addTag :: TagMap -> String -> TagMap |

185 |
addTag t s = Map.insertWith (+) s 1 t |

186 | |

187 |
-- | Add multiple tags. |

188 |
addTags :: TagMap -> [String] -> TagMap |

189 |
addTags = foldl' addTag |

190 | |

191 |
-- | Adjust or delete a tag from a tagmap. |

192 |
delTag :: TagMap -> String -> TagMap |

193 |
delTag t s = Map.update (\v -> if v > 1 |

194 |
then Just (v-1) |

195 |
else Nothing) |

196 |
s t |

197 | |

198 |
-- | Remove multiple tags. |

199 |
delTags :: TagMap -> [String] -> TagMap |

200 |
delTags = foldl' delTag |

201 | |

202 |
-- | Check if we can add a list of tags to a tagmap. |

203 |
rejectAddTags :: TagMap -> [String] -> Bool |

204 |
rejectAddTags t = any (`Map.member` t) |

205 | |

206 |
-- | Check how many primary instances have conflicting tags. The |

207 |
-- algorithm to compute this is to sum the count of all tags, then |

208 |
-- subtract the size of the tag map (since each tag has at least one, |

209 |
-- non-conflicting instance); this is equivalent to summing the |

210 |
-- values in the tag map minus one. |

211 |
conflictingPrimaries :: Node -> Int |

212 |
conflictingPrimaries (Node { pTags = t }) = Foldable.sum t - Map.size t |

213 | |

214 |
-- | Helper function to increment a base value depending on the passed |

215 |
-- boolean argument. |

216 |
incIf :: (Num a) => Bool -> a -> a -> a |

217 |
incIf True base delta = base + delta |

218 |
incIf False base _ = base |

219 | |

220 |
-- | Helper function to decrement a base value depending on the passed |

221 |
-- boolean argument. |

222 |
decIf :: (Num a) => Bool -> a -> a -> a |

223 |
decIf True base delta = base - delta |

224 |
decIf False base _ = base |

225 | |

226 |
-- | Is exclusive storage enabled on any node? |

227 |
haveExclStorage :: List -> Bool |

228 |
haveExclStorage nl = |

229 |
any exclStorage $ Container.elems nl |

230 | |

231 |
-- * Initialization functions |

232 | |

233 |
-- | Create a new node. |

234 |
-- |

235 |
-- The index and the peers maps are empty, and will be need to be |

236 |
-- update later via the 'setIdx' and 'buildPeers' functions. |

237 |
create :: String -> Double -> Int -> Int -> Double |

238 |
-> Int -> Double -> Bool -> Int -> Int -> T.Gdx -> Bool -> Node |

239 |
create name_init mem_t_init mem_n_init mem_f_init |

240 |
dsk_t_init dsk_f_init cpu_t_init offline_init spindles_t_init |

241 |
spindles_f_init group_init excl_stor = |

242 |
Node { name = name_init |

243 |
, alias = name_init |

244 |
, tMem = mem_t_init |

245 |
, nMem = mem_n_init |

246 |
, fMem = mem_f_init |

247 |
, tDsk = dsk_t_init |

248 |
, fDsk = dsk_f_init |

249 |
, tCpu = cpu_t_init |

250 |
, tSpindles = spindles_t_init |

251 |
, fSpindles = spindles_f_init |

252 |
, uCpu = 0 |

253 |
, pList = [] |

254 |
, sList = [] |

255 |
, failN1 = True |

256 |
, idx = -1 |

257 |
, peers = P.empty |

258 |
, rMem = 0 |

259 |
, pMem = fromIntegral mem_f_init / mem_t_init |

260 |
, pDsk = if excl_stor |

261 |
then computePDsk spindles_f_init $ fromIntegral spindles_t_init |

262 |
else computePDsk dsk_f_init dsk_t_init |

263 |
, pRem = 0 |

264 |
, pCpu = 0 |

265 |
, offline = offline_init |

266 |
, isMaster = False |

267 |
, nTags = [] |

268 |
, xMem = 0 |

269 |
, mDsk = T.defReservedDiskRatio |

270 |
, loDsk = mDskToloDsk T.defReservedDiskRatio dsk_t_init |

271 |
, hiCpu = mCpuTohiCpu (T.iPolicyVcpuRatio T.defIPolicy) cpu_t_init |

272 |
, hiSpindles = computeHiSpindles (T.iPolicySpindleRatio T.defIPolicy) |

273 |
spindles_t_init |

274 |
, instSpindles = 0 |

275 |
, utilPool = T.baseUtil |

276 |
, utilLoad = T.zeroUtil |

277 |
, pTags = Map.empty |

278 |
, group = group_init |

279 |
, iPolicy = T.defIPolicy |

280 |
, exclStorage = excl_stor |

281 |
} |

282 | |

283 |
-- | Conversion formula from mDsk\/tDsk to loDsk. |

284 |
mDskToloDsk :: Double -> Double -> Int |

285 |
mDskToloDsk mval = floor . (mval *) |

286 | |

287 |
-- | Conversion formula from mCpu\/tCpu to hiCpu. |

288 |
mCpuTohiCpu :: Double -> Double -> Int |

289 |
mCpuTohiCpu mval = floor . (mval *) |

290 | |

291 |
-- | Conversiojn formula from spindles and spindle ratio to hiSpindles. |

292 |
computeHiSpindles :: Double -> Int -> Double |

293 |
computeHiSpindles spindle_ratio = (spindle_ratio *) . fromIntegral |

294 | |

295 |
-- | Changes the index. |

296 |
-- |

297 |
-- This is used only during the building of the data structures. |

298 |
setIdx :: Node -> T.Ndx -> Node |

299 |
setIdx t i = t {idx = i} |

300 | |

301 |
-- | Changes the alias. |

302 |
-- |

303 |
-- This is used only during the building of the data structures. |

304 |
setAlias :: Node -> String -> Node |

305 |
setAlias t s = t { alias = s } |

306 | |

307 |
-- | Sets the offline attribute. |

308 |
setOffline :: Node -> Bool -> Node |

309 |
setOffline t val = t { offline = val } |

310 | |

311 |
-- | Sets the master attribute |

312 |
setMaster :: Node -> Bool -> Node |

313 |
setMaster t val = t { isMaster = val } |

314 | |

315 |
-- | Sets the node tags attribute |

316 |
setNodeTags :: Node -> [String] -> Node |

317 |
setNodeTags t val = t { nTags = val } |

318 | |

319 |
-- | Sets the unnaccounted memory. |

320 |
setXmem :: Node -> Int -> Node |

321 |
setXmem t val = t { xMem = val } |

322 | |

323 |
-- | Sets the max disk usage ratio. |

324 |
setMdsk :: Node -> Double -> Node |

325 |
setMdsk t val = t { mDsk = val, loDsk = mDskToloDsk val (tDsk t) } |

326 | |

327 |
-- | Sets the max cpu usage ratio. This will update the node's |

328 |
-- ipolicy, losing sharing (but it should be a seldomly done operation). |

329 |
setMcpu :: Node -> Double -> Node |

330 |
setMcpu t val = |

331 |
let new_ipol = (iPolicy t) { T.iPolicyVcpuRatio = val } |

332 |
in t { hiCpu = mCpuTohiCpu val (tCpu t), iPolicy = new_ipol } |

333 | |

334 |
-- | Sets the policy. |

335 |
setPolicy :: T.IPolicy -> Node -> Node |

336 |
setPolicy pol node = |

337 |
node { iPolicy = pol |

338 |
, hiCpu = mCpuTohiCpu (T.iPolicyVcpuRatio pol) (tCpu node) |

339 |
, hiSpindles = computeHiSpindles (T.iPolicySpindleRatio pol) |

340 |
(tSpindles node) |

341 |
} |

342 | |

343 |
-- | Computes the maximum reserved memory for peers from a peer map. |

344 |
computeMaxRes :: P.PeerMap -> P.Elem |

345 |
computeMaxRes = P.maxElem |

346 | |

347 |
-- | Builds the peer map for a given node. |

348 |
buildPeers :: Node -> Instance.List -> Node |

349 |
buildPeers t il = |

350 |
let mdata = map |

351 |
(\i_idx -> let inst = Container.find i_idx il |

352 |
mem = if Instance.usesSecMem inst |

353 |
then Instance.mem inst |

354 |
else 0 |

355 |
in (Instance.pNode inst, mem)) |

356 |
(sList t) |

357 |
pmap = P.accumArray (+) mdata |

358 |
new_rmem = computeMaxRes pmap |

359 |
new_failN1 = fMem t <= new_rmem |

360 |
new_prem = fromIntegral new_rmem / tMem t |

361 |
in t {peers=pmap, failN1 = new_failN1, rMem = new_rmem, pRem = new_prem} |

362 | |

363 |
-- | Calculate the new spindle usage |

364 |
calcSpindleUse :: |

365 |
Bool -- Action: True = adding instance, False = removing it |

366 |
-> Node -> Instance.Instance -> Double |

367 |
calcSpindleUse _ (Node {exclStorage = True}) _ = 0.0 |

368 |
calcSpindleUse act n@(Node {exclStorage = False}) i = |

369 |
f (Instance.usesLocalStorage i) (instSpindles n) |

370 |
(fromIntegral $ Instance.spindleUse i) |

371 |
where |

372 |
f :: Bool -> Double -> Double -> Double -- avoid monomorphism restriction |

373 |
f = if act then incIf else decIf |

374 | |

375 |
-- | Calculate the new number of free spindles |

376 |
calcNewFreeSpindles :: |

377 |
Bool -- Action: True = adding instance, False = removing |

378 |
-> Node -> Instance.Instance -> Int |

379 |
calcNewFreeSpindles _ (Node {exclStorage = False}) _ = 0 |

380 |
calcNewFreeSpindles act n@(Node {exclStorage = True}) i = |

381 |
case Instance.getTotalSpindles i of |

382 |
Nothing -> if act |

383 |
then -1 -- Force a spindle error, so the instance don't go here |

384 |
else fSpindles n -- No change, as we aren't sure |

385 |
Just s -> (if act then (-) else (+)) (fSpindles n) s |

386 | |

387 |
-- | Assigns an instance to a node as primary and update the used VCPU |

388 |
-- count, utilisation data and tags map. |

389 |
setPri :: Node -> Instance.Instance -> Node |

390 |
setPri t inst = t { pList = Instance.idx inst:pList t |

391 |
, uCpu = new_count |

392 |
, pCpu = fromIntegral new_count / tCpu t |

393 |
, utilLoad = utilLoad t `T.addUtil` Instance.util inst |

394 |
, pTags = addTags (pTags t) (Instance.exclTags inst) |

395 |
, instSpindles = calcSpindleUse True t inst |

396 |
} |

397 |
where new_count = Instance.applyIfOnline inst (+ Instance.vcpus inst) |

398 |
(uCpu t ) |

399 | |

400 |
-- | Assigns an instance to a node as secondary and updates disk utilisation. |

401 |
setSec :: Node -> Instance.Instance -> Node |

402 |
setSec t inst = t { sList = Instance.idx inst:sList t |

403 |
, utilLoad = old_load { T.dskWeight = T.dskWeight old_load + |

404 |
T.dskWeight (Instance.util inst) } |

405 |
, instSpindles = calcSpindleUse True t inst |

406 |
} |

407 |
where old_load = utilLoad t |

408 | |

409 |
-- | Computes the new 'pDsk' value, handling nodes without local disk |

410 |
-- storage (we consider all their disk unused). |

411 |
computePDsk :: Int -> Double -> Double |

412 |
computePDsk _ 0 = 1 |

413 |
computePDsk free total = fromIntegral free / total |

414 | |

415 |
-- | Computes the new 'pDsk' value, handling the exclusive storage state. |

416 |
computeNewPDsk :: Node -> Int -> Int -> Double |

417 |
computeNewPDsk node new_free_sp new_free_dsk = |

418 |
if exclStorage node |

419 |
then computePDsk new_free_sp . fromIntegral $ tSpindles node |

420 |
else computePDsk new_free_dsk $ tDsk node |

421 | |

422 |
-- * Update functions |

423 | |

424 |
-- | Sets the free memory. |

425 |
setFmem :: Node -> Int -> Node |

426 |
setFmem t new_mem = |

427 |
let new_n1 = new_mem < rMem t |

428 |
new_mp = fromIntegral new_mem / tMem t |

429 |
in t { fMem = new_mem, failN1 = new_n1, pMem = new_mp } |

430 | |

431 |
-- | Removes a primary instance. |

432 |
removePri :: Node -> Instance.Instance -> Node |

433 |
removePri t inst = |

434 |
let iname = Instance.idx inst |

435 |
i_online = Instance.notOffline inst |

436 |
uses_disk = Instance.usesLocalStorage inst |

437 |
new_plist = delete iname (pList t) |

438 |
new_mem = incIf i_online (fMem t) (Instance.mem inst) |

439 |
new_dsk = incIf uses_disk (fDsk t) (Instance.dsk inst) |

440 |
new_free_sp = calcNewFreeSpindles False t inst |

441 |
new_inst_sp = calcSpindleUse False t inst |

442 |
new_mp = fromIntegral new_mem / tMem t |

443 |
new_dp = computeNewPDsk t new_free_sp new_dsk |

444 |
new_failn1 = new_mem <= rMem t |

445 |
new_ucpu = decIf i_online (uCpu t) (Instance.vcpus inst) |

446 |
new_rcpu = fromIntegral new_ucpu / tCpu t |

447 |
new_load = utilLoad t `T.subUtil` Instance.util inst |

448 |
in t { pList = new_plist, fMem = new_mem, fDsk = new_dsk |

449 |
, failN1 = new_failn1, pMem = new_mp, pDsk = new_dp |

450 |
, uCpu = new_ucpu, pCpu = new_rcpu, utilLoad = new_load |

451 |
, pTags = delTags (pTags t) (Instance.exclTags inst) |

452 |
, instSpindles = new_inst_sp, fSpindles = new_free_sp |

453 |
} |

454 | |

455 |
-- | Removes a secondary instance. |

456 |
removeSec :: Node -> Instance.Instance -> Node |

457 |
removeSec t inst = |

458 |
let iname = Instance.idx inst |

459 |
uses_disk = Instance.usesLocalStorage inst |

460 |
cur_dsk = fDsk t |

461 |
pnode = Instance.pNode inst |

462 |
new_slist = delete iname (sList t) |

463 |
new_dsk = incIf uses_disk cur_dsk (Instance.dsk inst) |

464 |
new_free_sp = calcNewFreeSpindles False t inst |

465 |
new_inst_sp = calcSpindleUse False t inst |

466 |
old_peers = peers t |

467 |
old_peem = P.find pnode old_peers |

468 |
new_peem = decIf (Instance.usesSecMem inst) old_peem (Instance.mem inst) |

469 |
new_peers = if new_peem > 0 |

470 |
then P.add pnode new_peem old_peers |

471 |
else P.remove pnode old_peers |

472 |
old_rmem = rMem t |

473 |
new_rmem = if old_peem < old_rmem |

474 |
then old_rmem |

475 |
else computeMaxRes new_peers |

476 |
new_prem = fromIntegral new_rmem / tMem t |

477 |
new_failn1 = fMem t <= new_rmem |

478 |
new_dp = computeNewPDsk t new_free_sp new_dsk |

479 |
old_load = utilLoad t |

480 |
new_load = old_load { T.dskWeight = T.dskWeight old_load - |

481 |
T.dskWeight (Instance.util inst) } |

482 |
in t { sList = new_slist, fDsk = new_dsk, peers = new_peers |

483 |
, failN1 = new_failn1, rMem = new_rmem, pDsk = new_dp |

484 |
, pRem = new_prem, utilLoad = new_load |

485 |
, instSpindles = new_inst_sp, fSpindles = new_free_sp |

486 |
} |

487 | |

488 |
-- | Adds a primary instance (basic version). |

489 |
addPri :: Node -> Instance.Instance -> T.OpResult Node |

490 |
addPri = addPriEx False |

491 | |

492 |
-- | Adds a primary instance (extended version). |

493 |
addPriEx :: Bool -- ^ Whether to override the N+1 and |

494 |
-- other /soft/ checks, useful if we |

495 |
-- come from a worse status |

496 |
-- (e.g. offline) |

497 |
-> Node -- ^ The target node |

498 |
-> Instance.Instance -- ^ The instance to add |

499 |
-> T.OpResult Node -- ^ The result of the operation, |

500 |
-- either the new version of the node |

501 |
-- or a failure mode |

502 |
addPriEx force t inst = |

503 |
let iname = Instance.idx inst |

504 |
i_online = Instance.notOffline inst |

505 |
uses_disk = Instance.usesLocalStorage inst |

506 |
cur_dsk = fDsk t |

507 |
new_mem = decIf i_online (fMem t) (Instance.mem inst) |

508 |
new_dsk = decIf uses_disk cur_dsk (Instance.dsk inst) |

509 |
new_free_sp = calcNewFreeSpindles True t inst |

510 |
new_inst_sp = calcSpindleUse True t inst |

511 |
new_failn1 = new_mem <= rMem t |

512 |
new_ucpu = incIf i_online (uCpu t) (Instance.vcpus inst) |

513 |
new_pcpu = fromIntegral new_ucpu / tCpu t |

514 |
new_dp = computeNewPDsk t new_free_sp new_dsk |

515 |
l_cpu = T.iPolicyVcpuRatio $ iPolicy t |

516 |
new_load = utilLoad t `T.addUtil` Instance.util inst |

517 |
inst_tags = Instance.exclTags inst |

518 |
old_tags = pTags t |

519 |
strict = not force |

520 |
in case () of |

521 |
_ | new_mem <= 0 -> Bad T.FailMem |

522 |
| uses_disk && new_dsk <= 0 -> Bad T.FailDisk |

523 |
| uses_disk && new_dsk < loDsk t && strict -> Bad T.FailDisk |

524 |
| uses_disk && exclStorage t && new_free_sp < 0 -> Bad T.FailSpindles |

525 |
| uses_disk && new_inst_sp > hiSpindles t && strict -> Bad T.FailDisk |

526 |
| new_failn1 && not (failN1 t) && strict -> Bad T.FailMem |

527 |
| l_cpu >= 0 && l_cpu < new_pcpu && strict -> Bad T.FailCPU |

528 |
| rejectAddTags old_tags inst_tags -> Bad T.FailTags |

529 |
| otherwise -> |

530 |
let new_plist = iname:pList t |

531 |
new_mp = fromIntegral new_mem / tMem t |

532 |
r = t { pList = new_plist, fMem = new_mem, fDsk = new_dsk |

533 |
, failN1 = new_failn1, pMem = new_mp, pDsk = new_dp |

534 |
, uCpu = new_ucpu, pCpu = new_pcpu |

535 |
, utilLoad = new_load |

536 |
, pTags = addTags old_tags inst_tags |

537 |
, instSpindles = new_inst_sp |

538 |
, fSpindles = new_free_sp |

539 |
} |

540 |
in Ok r |

541 | |

542 |
-- | Adds a secondary instance (basic version). |

543 |
addSec :: Node -> Instance.Instance -> T.Ndx -> T.OpResult Node |

544 |
addSec = addSecEx False |

545 | |

546 |
-- | Adds a secondary instance (extended version). |

547 |
addSecEx :: Bool -> Node -> Instance.Instance -> T.Ndx -> T.OpResult Node |

548 |
addSecEx force t inst pdx = |

549 |
let iname = Instance.idx inst |

550 |
old_peers = peers t |

551 |
old_mem = fMem t |

552 |
new_dsk = fDsk t - Instance.dsk inst |

553 |
new_free_sp = calcNewFreeSpindles True t inst |

554 |
new_inst_sp = calcSpindleUse True t inst |

555 |
secondary_needed_mem = if Instance.usesSecMem inst |

556 |
then Instance.mem inst |

557 |
else 0 |

558 |
new_peem = P.find pdx old_peers + secondary_needed_mem |

559 |
new_peers = P.add pdx new_peem old_peers |

560 |
new_rmem = max (rMem t) new_peem |

561 |
new_prem = fromIntegral new_rmem / tMem t |

562 |
new_failn1 = old_mem <= new_rmem |

563 |
new_dp = computeNewPDsk t new_free_sp new_dsk |

564 |
old_load = utilLoad t |

565 |
new_load = old_load { T.dskWeight = T.dskWeight old_load + |

566 |
T.dskWeight (Instance.util inst) } |

567 |
strict = not force |

568 |
in case () of |

569 |
_ | not (Instance.hasSecondary inst) -> Bad T.FailDisk |

570 |
| new_dsk <= 0 -> Bad T.FailDisk |

571 |
| new_dsk < loDsk t && strict -> Bad T.FailDisk |

572 |
| exclStorage t && new_free_sp < 0 -> Bad T.FailSpindles |

573 |
| new_inst_sp > hiSpindles t && strict -> Bad T.FailDisk |

574 |
| secondary_needed_mem >= old_mem && strict -> Bad T.FailMem |

575 |
| new_failn1 && not (failN1 t) && strict -> Bad T.FailMem |

576 |
| otherwise -> |

577 |
let new_slist = iname:sList t |

578 |
r = t { sList = new_slist, fDsk = new_dsk |

579 |
, peers = new_peers, failN1 = new_failn1 |

580 |
, rMem = new_rmem, pDsk = new_dp |

581 |
, pRem = new_prem, utilLoad = new_load |

582 |
, instSpindles = new_inst_sp |

583 |
, fSpindles = new_free_sp |

584 |
} |

585 |
in Ok r |

586 | |

587 |
-- * Stats functions |

588 | |

589 |
-- | Computes the amount of available disk on a given node. |

590 |
availDisk :: Node -> Int |

591 |
availDisk t = |

592 |
let _f = fDsk t |

593 |
_l = loDsk t |

594 |
in if _f < _l |

595 |
then 0 |

596 |
else _f - _l |

597 | |

598 |
-- | Computes the amount of used disk on a given node. |

599 |
iDsk :: Node -> Int |

600 |
iDsk t = truncate (tDsk t) - fDsk t |

601 | |

602 |
-- | Computes the amount of available memory on a given node. |

603 |
availMem :: Node -> Int |

604 |
availMem t = |

605 |
let _f = fMem t |

606 |
_l = rMem t |

607 |
in if _f < _l |

608 |
then 0 |

609 |
else _f - _l |

610 | |

611 |
-- | Computes the amount of available memory on a given node. |

612 |
availCpu :: Node -> Int |

613 |
availCpu t = |

614 |
let _u = uCpu t |

615 |
_l = hiCpu t |

616 |
in if _l >= _u |

617 |
then _l - _u |

618 |
else 0 |

619 | |

620 |
-- | The memory used by instances on a given node. |

621 |
iMem :: Node -> Int |

622 |
iMem t = truncate (tMem t) - nMem t - xMem t - fMem t |

623 | |

624 |
-- * Node graph functions |

625 |
-- These functions do the transformations needed so that nodes can be |

626 |
-- represented as a graph connected by the instances that are replicated |

627 |
-- on them. |

628 | |

629 |
-- * Making of a Graph from a node/instance list |

630 | |

631 |
-- | Transform an instance into a list of edges on the node graph |

632 |
instanceToEdges :: Instance.Instance -> [Graph.Edge] |

633 |
instanceToEdges i |

634 |
| Instance.hasSecondary i = [(pnode,snode), (snode,pnode)] |

635 |
| otherwise = [] |

636 |
where pnode = Instance.pNode i |

637 |
snode = Instance.sNode i |

638 | |

639 |
-- | Transform the list of instances into list of destination edges |

640 |
instancesToEdges :: Instance.List -> [Graph.Edge] |

641 |
instancesToEdges = concatMap instanceToEdges . Container.elems |

642 | |

643 |
-- | Transform the list of nodes into vertices bounds. |

644 |
-- Returns Nothing is the list is empty. |

645 |
nodesToBounds :: List -> Maybe Graph.Bounds |

646 |
nodesToBounds nl = liftM2 (,) nmin nmax |

647 |
where nmin = fmap (fst . fst) (IntMap.minViewWithKey nl) |

648 |
nmax = fmap (fst . fst) (IntMap.maxViewWithKey nl) |

649 | |

650 |
-- | The clique of the primary nodes of the instances with a given secondary. |

651 |
-- Return the full graph of those nodes that are primary node of at least one |

652 |
-- instance that has the given node as secondary. |

653 |
nodeToSharedSecondaryEdge :: Instance.List -> Node -> [Graph.Edge] |

654 |
nodeToSharedSecondaryEdge il n = (,) <$> primaries <*> primaries |

655 |
where primaries = map (Instance.pNode . flip Container.find il) $ sList n |

656 | |

657 | |

658 |
-- | Predicate of an edge having both vertices in a set of nodes. |

659 |
filterValid :: List -> [Graph.Edge] -> [Graph.Edge] |

660 |
filterValid nl = filter $ \(x,y) -> IntMap.member x nl && IntMap.member y nl |

661 | |

662 |
-- | Transform a Node + Instance list into a NodeGraph type. |

663 |
-- Returns Nothing if the node list is empty. |

664 |
mkNodeGraph :: List -> Instance.List -> Maybe Graph.Graph |

665 |
mkNodeGraph nl il = |

666 |
liftM (`Graph.buildG` (filterValid nl . instancesToEdges $ il)) |

667 |
(nodesToBounds nl) |

668 | |

669 |
-- | Transform a Nodes + Instances into a NodeGraph with all reboot exclusions. |

670 |
-- This includes edges between nodes that are the primary nodes of instances |

671 |
-- that have the same secondary node. Nodes not in the node list will not be |

672 |
-- part of the graph, but they are still considered for the edges arising from |

673 |
-- two instances having the same secondary node. |

674 |
-- Return Nothing if the node list is empty. |

675 |
mkRebootNodeGraph :: List -> List -> Instance.List -> Maybe Graph.Graph |

676 |
mkRebootNodeGraph allnodes nl il = |

677 |
liftM (`Graph.buildG` filterValid nl edges) (nodesToBounds nl) |

678 |
where |

679 |
edges = instancesToEdges il `union` |

680 |
(Container.elems allnodes >>= nodeToSharedSecondaryEdge il) |

681 | |

682 |
-- * Display functions |

683 | |

684 |
-- | Return a field for a given node. |

685 |
showField :: Node -- ^ Node which we're querying |

686 |
-> String -- ^ Field name |

687 |
-> String -- ^ Field value as string |

688 |
showField t field = |

689 |
case field of |

690 |
"idx" -> printf "%4d" $ idx t |

691 |
"name" -> alias t |

692 |
"fqdn" -> name t |

693 |
"status" -> case () of |

694 |
_ | offline t -> "-" |

695 |
| failN1 t -> "*" |

696 |
| otherwise -> " " |

697 |
"tmem" -> printf "%5.0f" $ tMem t |

698 |
"nmem" -> printf "%5d" $ nMem t |

699 |
"xmem" -> printf "%5d" $ xMem t |

700 |
"fmem" -> printf "%5d" $ fMem t |

701 |
"imem" -> printf "%5d" $ iMem t |

702 |
"rmem" -> printf "%5d" $ rMem t |

703 |
"amem" -> printf "%5d" $ fMem t - rMem t |

704 |
"tdsk" -> printf "%5.0f" $ tDsk t / 1024 |

705 |
"fdsk" -> printf "%5d" $ fDsk t `div` 1024 |

706 |
"tcpu" -> printf "%4.0f" $ tCpu t |

707 |
"ucpu" -> printf "%4d" $ uCpu t |

708 |
"pcnt" -> printf "%3d" $ length (pList t) |

709 |
"scnt" -> printf "%3d" $ length (sList t) |

710 |
"plist" -> show $ pList t |

711 |
"slist" -> show $ sList t |

712 |
"pfmem" -> printf "%6.4f" $ pMem t |

713 |
"pfdsk" -> printf "%6.4f" $ pDsk t |

714 |
"rcpu" -> printf "%5.2f" $ pCpu t |

715 |
"cload" -> printf "%5.3f" uC |

716 |
"mload" -> printf "%5.3f" uM |

717 |
"dload" -> printf "%5.3f" uD |

718 |
"nload" -> printf "%5.3f" uN |

719 |
"ptags" -> intercalate "," . map (uncurry (printf "%s=%d")) . |

720 |
Map.toList $ pTags t |

721 |
"peermap" -> show $ peers t |

722 |
"spindle_count" -> show $ tSpindles t |

723 |
"hi_spindles" -> show $ hiSpindles t |

724 |
"inst_spindles" -> show $ instSpindles t |

725 |
_ -> T.unknownField |

726 |
where |

727 |
T.DynUtil { T.cpuWeight = uC, T.memWeight = uM, |

728 |
T.dskWeight = uD, T.netWeight = uN } = utilLoad t |

729 | |

730 |
-- | Returns the header and numeric propery of a field. |

731 |
showHeader :: String -> (String, Bool) |

732 |
showHeader field = |

733 |
case field of |

734 |
"idx" -> ("Index", True) |

735 |
"name" -> ("Name", False) |

736 |
"fqdn" -> ("Name", False) |

737 |
"status" -> ("F", False) |

738 |
"tmem" -> ("t_mem", True) |

739 |
"nmem" -> ("n_mem", True) |

740 |
"xmem" -> ("x_mem", True) |

741 |
"fmem" -> ("f_mem", True) |

742 |
"imem" -> ("i_mem", True) |

743 |
"rmem" -> ("r_mem", True) |

744 |
"amem" -> ("a_mem", True) |

745 |
"tdsk" -> ("t_dsk", True) |

746 |
"fdsk" -> ("f_dsk", True) |

747 |
"tcpu" -> ("pcpu", True) |

748 |
"ucpu" -> ("vcpu", True) |

749 |
"pcnt" -> ("pcnt", True) |

750 |
"scnt" -> ("scnt", True) |

751 |
"plist" -> ("primaries", True) |

752 |
"slist" -> ("secondaries", True) |

753 |
"pfmem" -> ("p_fmem", True) |

754 |
"pfdsk" -> ("p_fdsk", True) |

755 |
"rcpu" -> ("r_cpu", True) |

756 |
"cload" -> ("lCpu", True) |

757 |
"mload" -> ("lMem", True) |

758 |
"dload" -> ("lDsk", True) |

759 |
"nload" -> ("lNet", True) |

760 |
"ptags" -> ("PrimaryTags", False) |

761 |
"peermap" -> ("PeerMap", False) |

762 |
"spindle_count" -> ("NodeSpindles", True) |

763 |
"hi_spindles" -> ("MaxSpindles", True) |

764 |
"inst_spindles" -> ("InstSpindles", True) |

765 |
-- TODO: add node fields (group.uuid, group) |

766 |
_ -> (T.unknownField, False) |

767 | |

768 |
-- | String converter for the node list functionality. |

769 |
list :: [String] -> Node -> [String] |

770 |
list fields t = map (showField t) fields |

771 | |

772 |
-- | Constant holding the fields we're displaying by default. |

773 |
defaultFields :: [String] |

774 |
defaultFields = |

775 |
[ "status", "name", "tmem", "nmem", "imem", "xmem", "fmem" |

776 |
, "rmem", "tdsk", "fdsk", "tcpu", "ucpu", "pcnt", "scnt" |

777 |
, "pfmem", "pfdsk", "rcpu" |

778 |
, "cload", "mload", "dload", "nload" ] |

779 | |

780 |
{-# ANN computeGroups "HLint: ignore Use alternative" #-} |

781 |
-- | Split a list of nodes into a list of (node group UUID, list of |

782 |
-- associated nodes). |

783 |
computeGroups :: [Node] -> [(T.Gdx, [Node])] |

784 |
computeGroups nodes = |

785 |
let nodes' = sortBy (comparing group) nodes |

786 |
nodes'' = groupBy ((==) `on` group) nodes' |

787 |
-- use of head here is OK, since groupBy returns non-empty lists; if |

788 |
-- you remove groupBy, also remove use of head |

789 |
in map (\nl -> (group (head nl), nl)) nodes'' |