Statistics
| Branch: | Tag: | Revision:

root / src / Ganeti / HTools / Node.hs @ 5cbf7832

History | View | Annotate | Download (28.9 kB)

1
{-| Module describing a node.
2

    
3
    All updates are functional (copy-based) and return a new node with
4
    updated value.
5
-}
6

    
7
{-
8

    
9
Copyright (C) 2009, 2010, 2011, 2012, 2013 Google Inc.
10

    
11
This program is free software; you can redistribute it and/or modify
12
it under the terms of the GNU General Public License as published by
13
the Free Software Foundation; either version 2 of the License, or
14
(at your option) any later version.
15

    
16
This program is distributed in the hope that it will be useful, but
17
WITHOUT ANY WARRANTY; without even the implied warranty of
18
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19
General Public License for more details.
20

    
21
You should have received a copy of the GNU General Public License
22
along with this program; if not, write to the Free Software
23
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
24
02110-1301, USA.
25

    
26
-}
27

    
28
module Ganeti.HTools.Node
29
  ( Node(..)
30
  , List
31
  -- * Constructor
32
  , create
33
  -- ** Finalization after data loading
34
  , buildPeers
35
  , setIdx
36
  , setAlias
37
  , setOffline
38
  , setXmem
39
  , setFmem
40
  , setPri
41
  , setSec
42
  , setMaster
43
  , setNodeTags
44
  , setMdsk
45
  , setMcpu
46
  , setPolicy
47
  -- * Tag maps
48
  , addTags
49
  , delTags
50
  , rejectAddTags
51
  -- * Instance (re)location
52
  , removePri
53
  , removeSec
54
  , addPri
55
  , addPriEx
56
  , addSec
57
  , addSecEx
58
  -- * Stats
59
  , availDisk
60
  , availMem
61
  , availCpu
62
  , iMem
63
  , iDsk
64
  , conflictingPrimaries
65
  -- * Formatting
66
  , defaultFields
67
  , showHeader
68
  , showField
69
  , list
70
  -- * Misc stuff
71
  , AssocList
72
  , AllocElement
73
  , noSecondary
74
  , computeGroups
75
  , mkNodeGraph
76
  , mkRebootNodeGraph
77
  , haveExclStorage
78
  ) where
79

    
80
import Control.Monad (liftM, liftM2)
81
import Control.Applicative ((<$>), (<*>))
82
import qualified Data.Foldable as Foldable
83
import Data.Function (on)
84
import qualified Data.Graph as Graph
85
import qualified Data.IntMap as IntMap
86
import Data.List hiding (group)
87
import qualified Data.Map as Map
88
import Data.Ord (comparing)
89
import Text.Printf (printf)
90

    
91
import qualified Ganeti.HTools.Container as Container
92
import qualified Ganeti.HTools.Instance as Instance
93
import qualified Ganeti.HTools.PeerMap as P
94

    
95
import Ganeti.BasicTypes
96
import qualified Ganeti.HTools.Types as T
97

    
98
-- * Type declarations
99

    
100
-- | The tag map type.
101
type TagMap = Map.Map String Int
102

    
103
-- | The node type.
104
data Node = Node
105
  { name     :: String    -- ^ The node name
106
  , alias    :: String    -- ^ The shortened name (for display purposes)
107
  , tMem     :: Double    -- ^ Total memory (MiB)
108
  , nMem     :: Int       -- ^ Node memory (MiB)
109
  , fMem     :: Int       -- ^ Free memory (MiB)
110
  , xMem     :: Int       -- ^ Unaccounted memory (MiB)
111
  , tDsk     :: Double    -- ^ Total disk space (MiB)
112
  , fDsk     :: Int       -- ^ Free disk space (MiB)
113
  , tCpu     :: Double    -- ^ Total CPU count
114
  , nCpu     :: Int       -- ^ VCPUs used by the node OS
115
  , uCpu     :: Int       -- ^ Used VCPU count
116
  , tSpindles :: Int      -- ^ Node spindles (spindle_count node parameter,
117
                          -- or actual spindles, see note below)
118
  , fSpindles :: Int      -- ^ Free spindles (see note below)
119
  , pList    :: [T.Idx]   -- ^ List of primary instance indices
120
  , sList    :: [T.Idx]   -- ^ List of secondary instance indices
121
  , idx      :: T.Ndx     -- ^ Internal index for book-keeping
122
  , peers    :: P.PeerMap -- ^ Pnode to instance mapping
123
  , failN1   :: Bool      -- ^ Whether the node has failed n1
124
  , rMem     :: Int       -- ^ Maximum memory needed for failover by
125
                          -- primaries of this node
126
  , pMem     :: Double    -- ^ Percent of free memory
127
  , pDsk     :: Double    -- ^ Percent of free disk
128
  , pRem     :: Double    -- ^ Percent of reserved memory
129
  , pCpu     :: Double    -- ^ Ratio of virtual to physical CPUs
130
  , mDsk     :: Double    -- ^ Minimum free disk ratio
131
  , loDsk    :: Int       -- ^ Autocomputed from mDsk low disk
132
                          -- threshold
133
  , hiCpu    :: Int       -- ^ Autocomputed from mCpu high cpu
134
                          -- threshold
135
  , hiSpindles :: Double  -- ^ Limit auto-computed from policy spindle_ratio
136
                          -- and the node spindle count (see note below)
137
  , instSpindles :: Double -- ^ Spindles used by instances (see note below)
138
  , offline  :: Bool      -- ^ Whether the node should not be used for
139
                          -- allocations and skipped from score
140
                          -- computations
141
  , isMaster :: Bool      -- ^ Whether the node is the master node
142
  , nTags    :: [String]  -- ^ The node tags for this node
143
  , utilPool :: T.DynUtil -- ^ Total utilisation capacity
144
  , utilLoad :: T.DynUtil -- ^ Sum of instance utilisation
145
  , pTags    :: TagMap    -- ^ Primary instance exclusion tags and their count
146
  , group    :: T.Gdx     -- ^ The node's group (index)
147
  , iPolicy  :: T.IPolicy -- ^ The instance policy (of the node's group)
148
  , exclStorage :: Bool   -- ^ Effective value of exclusive_storage
149
  } deriving (Show, Eq)
150
{- A note on how we handle spindles
151

    
152
With exclusive storage spindles is a resource, so we track the number of
153
spindles still available (fSpindles). This is the only reliable way, as some
154
spindles could be used outside of Ganeti. When exclusive storage is off,
155
spindles are a way to represent disk I/O pressure, and hence we track the amount
156
used by the instances. We compare it against 'hiSpindles', computed from the
157
instance policy, to avoid policy violations. In both cases we store the total
158
spindles in 'tSpindles'.
159
-}
160

    
161
instance T.Element Node where
162
  nameOf = name
163
  idxOf = idx
164
  setAlias = setAlias
165
  setIdx = setIdx
166
  allNames n = [name n, alias n]
167

    
168
-- | A simple name for the int, node association list.
169
type AssocList = [(T.Ndx, Node)]
170

    
171
-- | A simple name for a node map.
172
type List = Container.Container Node
173

    
174
-- | A simple name for an allocation element (here just for logistic
175
-- reasons).
176
type AllocElement = (List, Instance.Instance, [Node], T.Score)
177

    
178
-- | Constant node index for a non-moveable instance.
179
noSecondary :: T.Ndx
180
noSecondary = -1
181

    
182
-- * Helper functions
183

    
184
-- | Add a tag to a tagmap.
185
addTag :: TagMap -> String -> TagMap
186
addTag t s = Map.insertWith (+) s 1 t
187

    
188
-- | Add multiple tags.
189
addTags :: TagMap -> [String] -> TagMap
190
addTags = foldl' addTag
191

    
192
-- | Adjust or delete a tag from a tagmap.
193
delTag :: TagMap -> String -> TagMap
194
delTag t s = Map.update (\v -> if v > 1
195
                                 then Just (v-1)
196
                                 else Nothing)
197
             s t
198

    
199
-- | Remove multiple tags.
200
delTags :: TagMap -> [String] -> TagMap
201
delTags = foldl' delTag
202

    
203
-- | Check if we can add a list of tags to a tagmap.
204
rejectAddTags :: TagMap -> [String] -> Bool
205
rejectAddTags t = any (`Map.member` t)
206

    
207
-- | Check how many primary instances have conflicting tags. The
208
-- algorithm to compute this is to sum the count of all tags, then
209
-- subtract the size of the tag map (since each tag has at least one,
210
-- non-conflicting instance); this is equivalent to summing the
211
-- values in the tag map minus one.
212
conflictingPrimaries :: Node -> Int
213
conflictingPrimaries (Node { pTags = t }) = Foldable.sum t - Map.size t
214

    
215
-- | Helper function to increment a base value depending on the passed
216
-- boolean argument.
217
incIf :: (Num a) => Bool -> a -> a -> a
218
incIf True  base delta = base + delta
219
incIf False base _     = base
220

    
221
-- | Helper function to decrement a base value depending on the passed
222
-- boolean argument.
223
decIf :: (Num a) => Bool -> a -> a -> a
224
decIf True  base delta = base - delta
225
decIf False base _     = base
226

    
227
-- | Is exclusive storage enabled on any node?
228
haveExclStorage :: List -> Bool
229
haveExclStorage nl =
230
  any exclStorage $ Container.elems nl
231

    
232
-- * Initialization functions
233

    
234
-- | Create a new node.
235
--
236
-- The index and the peers maps are empty, and will be need to be
237
-- update later via the 'setIdx' and 'buildPeers' functions.
238
create :: String -> Double -> Int -> Int
239
       -> Double -> Int -> Double -> Int -> Bool
240
       -> Int -> Int -> T.Gdx -> Bool
241
       -> Node
242
create name_init mem_t_init mem_n_init mem_f_init
243
       dsk_t_init dsk_f_init cpu_t_init cpu_n_init offline_init
244
       spindles_t_init spindles_f_init group_init excl_stor =
245
  Node { name = name_init
246
       , alias = name_init
247
       , tMem = mem_t_init
248
       , nMem = mem_n_init
249
       , fMem = mem_f_init
250
       , tDsk = dsk_t_init
251
       , fDsk = dsk_f_init
252
       , tCpu = cpu_t_init
253
       , nCpu = cpu_n_init
254
       , uCpu = cpu_n_init
255
       , tSpindles = spindles_t_init
256
       , fSpindles = spindles_f_init
257
       , pList = []
258
       , sList = []
259
       , failN1 = True
260
       , idx = -1
261
       , peers = P.empty
262
       , rMem = 0
263
       , pMem = fromIntegral mem_f_init / mem_t_init
264
       , pDsk = if excl_stor
265
                then computePDsk spindles_f_init $ fromIntegral spindles_t_init
266
                else computePDsk dsk_f_init dsk_t_init
267
       , pRem = 0
268
       , pCpu = fromIntegral cpu_n_init / cpu_t_init
269
       , offline = offline_init
270
       , isMaster = False
271
       , nTags = []
272
       , xMem = 0
273
       , mDsk = T.defReservedDiskRatio
274
       , loDsk = mDskToloDsk T.defReservedDiskRatio dsk_t_init
275
       , hiCpu = mCpuTohiCpu (T.iPolicyVcpuRatio T.defIPolicy) cpu_t_init
276
       , hiSpindles = computeHiSpindles (T.iPolicySpindleRatio T.defIPolicy)
277
                      spindles_t_init
278
       , instSpindles = 0
279
       , utilPool = T.baseUtil
280
       , utilLoad = T.zeroUtil
281
       , pTags = Map.empty
282
       , group = group_init
283
       , iPolicy = T.defIPolicy
284
       , exclStorage = excl_stor
285
       }
286

    
287
-- | Conversion formula from mDsk\/tDsk to loDsk.
288
mDskToloDsk :: Double -> Double -> Int
289
mDskToloDsk mval = floor . (mval *)
290

    
291
-- | Conversion formula from mCpu\/tCpu to hiCpu.
292
mCpuTohiCpu :: Double -> Double -> Int
293
mCpuTohiCpu mval = floor . (mval *)
294

    
295
-- | Conversiojn formula from spindles and spindle ratio to hiSpindles.
296
computeHiSpindles :: Double -> Int -> Double
297
computeHiSpindles spindle_ratio = (spindle_ratio *) . fromIntegral
298

    
299
-- | Changes the index.
300
--
301
-- This is used only during the building of the data structures.
302
setIdx :: Node -> T.Ndx -> Node
303
setIdx t i = t {idx = i}
304

    
305
-- | Changes the alias.
306
--
307
-- This is used only during the building of the data structures.
308
setAlias :: Node -> String -> Node
309
setAlias t s = t { alias = s }
310

    
311
-- | Sets the offline attribute.
312
setOffline :: Node -> Bool -> Node
313
setOffline t val = t { offline = val }
314

    
315
-- | Sets the master attribute
316
setMaster :: Node -> Bool -> Node
317
setMaster t val = t { isMaster = val }
318

    
319
-- | Sets the node tags attribute
320
setNodeTags :: Node -> [String] -> Node
321
setNodeTags t val = t { nTags = val }
322

    
323
-- | Sets the unnaccounted memory.
324
setXmem :: Node -> Int -> Node
325
setXmem t val = t { xMem = val }
326

    
327
-- | Sets the max disk usage ratio.
328
setMdsk :: Node -> Double -> Node
329
setMdsk t val = t { mDsk = val, loDsk = mDskToloDsk val (tDsk t) }
330

    
331
-- | Sets the max cpu usage ratio. This will update the node's
332
-- ipolicy, losing sharing (but it should be a seldomly done operation).
333
setMcpu :: Node -> Double -> Node
334
setMcpu t val =
335
  let new_ipol = (iPolicy t) { T.iPolicyVcpuRatio = val }
336
  in t { hiCpu = mCpuTohiCpu val (tCpu t), iPolicy = new_ipol }
337

    
338
-- | Sets the policy.
339
setPolicy :: T.IPolicy -> Node -> Node
340
setPolicy pol node =
341
  node { iPolicy = pol
342
       , hiCpu = mCpuTohiCpu (T.iPolicyVcpuRatio pol) (tCpu node)
343
       , hiSpindles = computeHiSpindles (T.iPolicySpindleRatio pol)
344
                      (tSpindles node)
345
       }
346

    
347
-- | Computes the maximum reserved memory for peers from a peer map.
348
computeMaxRes :: P.PeerMap -> P.Elem
349
computeMaxRes = P.maxElem
350

    
351
-- | Builds the peer map for a given node.
352
buildPeers :: Node -> Instance.List -> Node
353
buildPeers t il =
354
  let mdata = map
355
              (\i_idx -> let inst = Container.find i_idx il
356
                             mem = if Instance.usesSecMem inst
357
                                     then Instance.mem inst
358
                                     else 0
359
                         in (Instance.pNode inst, mem))
360
              (sList t)
361
      pmap = P.accumArray (+) mdata
362
      new_rmem = computeMaxRes pmap
363
      new_failN1 = fMem t <= new_rmem
364
      new_prem = fromIntegral new_rmem / tMem t
365
  in t {peers=pmap, failN1 = new_failN1, rMem = new_rmem, pRem = new_prem}
366

    
367
-- | Calculate the new spindle usage
368
calcSpindleUse ::
369
                  Bool -- Action: True = adding instance, False = removing it
370
               -> Node -> Instance.Instance -> Double
371
calcSpindleUse _ (Node {exclStorage = True}) _ = 0.0
372
calcSpindleUse act n@(Node {exclStorage = False}) i =
373
  f (Instance.usesLocalStorage i) (instSpindles n)
374
    (fromIntegral $ Instance.spindleUse i)
375
    where
376
      f :: Bool -> Double -> Double -> Double -- avoid monomorphism restriction
377
      f = if act then incIf else decIf
378

    
379
-- | Calculate the new number of free spindles
380
calcNewFreeSpindles ::
381
                       Bool -- Action: True = adding instance, False = removing
382
                    -> Node -> Instance.Instance -> Int
383
calcNewFreeSpindles _ (Node {exclStorage = False}) _ = 0
384
calcNewFreeSpindles act n@(Node {exclStorage = True}) i =
385
  case Instance.getTotalSpindles i of
386
    Nothing -> if act
387
               then -1 -- Force a spindle error, so the instance don't go here
388
               else fSpindles n -- No change, as we aren't sure
389
    Just s -> (if act then (-) else (+)) (fSpindles n) s
390

    
391
-- | Assigns an instance to a node as primary and update the used VCPU
392
-- count, utilisation data and tags map.
393
setPri :: Node -> Instance.Instance -> Node
394
setPri t inst = t { pList = Instance.idx inst:pList t
395
                  , uCpu = new_count
396
                  , pCpu = fromIntegral new_count / tCpu t
397
                  , utilLoad = utilLoad t `T.addUtil` Instance.util inst
398
                  , pTags = addTags (pTags t) (Instance.exclTags inst)
399
                  , instSpindles = calcSpindleUse True t inst
400
                  }
401
  where new_count = Instance.applyIfOnline inst (+ Instance.vcpus inst)
402
                    (uCpu t )
403

    
404
-- | Assigns an instance to a node as secondary and updates disk utilisation.
405
setSec :: Node -> Instance.Instance -> Node
406
setSec t inst = t { sList = Instance.idx inst:sList t
407
                  , utilLoad = old_load { T.dskWeight = T.dskWeight old_load +
408
                                          T.dskWeight (Instance.util inst) }
409
                  , instSpindles = calcSpindleUse True t inst
410
                  }
411
  where old_load = utilLoad t
412

    
413
-- | Computes the new 'pDsk' value, handling nodes without local disk
414
-- storage (we consider all their disk unused).
415
computePDsk :: Int -> Double -> Double
416
computePDsk _    0     = 1
417
computePDsk free total = fromIntegral free / total
418

    
419
-- | Computes the new 'pDsk' value, handling the exclusive storage state.
420
computeNewPDsk :: Node -> Int -> Int -> Double
421
computeNewPDsk node new_free_sp new_free_dsk =
422
  if exclStorage node
423
  then computePDsk new_free_sp . fromIntegral $ tSpindles node
424
  else computePDsk new_free_dsk $ tDsk node
425

    
426
-- * Update functions
427

    
428
-- | Sets the free memory.
429
setFmem :: Node -> Int -> Node
430
setFmem t new_mem =
431
  let new_n1 = new_mem < rMem t
432
      new_mp = fromIntegral new_mem / tMem t
433
  in t { fMem = new_mem, failN1 = new_n1, pMem = new_mp }
434

    
435
-- | Removes a primary instance.
436
removePri :: Node -> Instance.Instance -> Node
437
removePri t inst =
438
  let iname = Instance.idx inst
439
      i_online = Instance.notOffline inst
440
      uses_disk = Instance.usesLocalStorage inst
441
      new_plist = delete iname (pList t)
442
      new_mem = incIf i_online (fMem t) (Instance.mem inst)
443
      new_dsk = incIf uses_disk (fDsk t) (Instance.dsk inst)
444
      new_free_sp = calcNewFreeSpindles False t inst
445
      new_inst_sp = calcSpindleUse False t inst
446
      new_mp = fromIntegral new_mem / tMem t
447
      new_dp = computeNewPDsk t new_free_sp new_dsk
448
      new_failn1 = new_mem <= rMem t
449
      new_ucpu = decIf i_online (uCpu t) (Instance.vcpus inst)
450
      new_rcpu = fromIntegral new_ucpu / tCpu t
451
      new_load = utilLoad t `T.subUtil` Instance.util inst
452
  in t { pList = new_plist, fMem = new_mem, fDsk = new_dsk
453
       , failN1 = new_failn1, pMem = new_mp, pDsk = new_dp
454
       , uCpu = new_ucpu, pCpu = new_rcpu, utilLoad = new_load
455
       , pTags = delTags (pTags t) (Instance.exclTags inst)
456
       , instSpindles = new_inst_sp, fSpindles = new_free_sp
457
       }
458

    
459
-- | Removes a secondary instance.
460
removeSec :: Node -> Instance.Instance -> Node
461
removeSec t inst =
462
  let iname = Instance.idx inst
463
      uses_disk = Instance.usesLocalStorage inst
464
      cur_dsk = fDsk t
465
      pnode = Instance.pNode inst
466
      new_slist = delete iname (sList t)
467
      new_dsk = incIf uses_disk cur_dsk (Instance.dsk inst)
468
      new_free_sp = calcNewFreeSpindles False t inst
469
      new_inst_sp = calcSpindleUse False t inst
470
      old_peers = peers t
471
      old_peem = P.find pnode old_peers
472
      new_peem = decIf (Instance.usesSecMem inst) old_peem (Instance.mem inst)
473
      new_peers = if new_peem > 0
474
                    then P.add pnode new_peem old_peers
475
                    else P.remove pnode old_peers
476
      old_rmem = rMem t
477
      new_rmem = if old_peem < old_rmem
478
                   then old_rmem
479
                   else computeMaxRes new_peers
480
      new_prem = fromIntegral new_rmem / tMem t
481
      new_failn1 = fMem t <= new_rmem
482
      new_dp = computeNewPDsk t new_free_sp new_dsk
483
      old_load = utilLoad t
484
      new_load = old_load { T.dskWeight = T.dskWeight old_load -
485
                                          T.dskWeight (Instance.util inst) }
486
  in t { sList = new_slist, fDsk = new_dsk, peers = new_peers
487
       , failN1 = new_failn1, rMem = new_rmem, pDsk = new_dp
488
       , pRem = new_prem, utilLoad = new_load
489
       , instSpindles = new_inst_sp, fSpindles = new_free_sp
490
       }
491

    
492
-- | Adds a primary instance (basic version).
493
addPri :: Node -> Instance.Instance -> T.OpResult Node
494
addPri = addPriEx False
495

    
496
-- | Adds a primary instance (extended version).
497
addPriEx :: Bool               -- ^ Whether to override the N+1 and
498
                               -- other /soft/ checks, useful if we
499
                               -- come from a worse status
500
                               -- (e.g. offline)
501
         -> Node               -- ^ The target node
502
         -> Instance.Instance  -- ^ The instance to add
503
         -> T.OpResult Node    -- ^ The result of the operation,
504
                               -- either the new version of the node
505
                               -- or a failure mode
506
addPriEx force t inst =
507
  let iname = Instance.idx inst
508
      i_online = Instance.notOffline inst
509
      uses_disk = Instance.usesLocalStorage inst
510
      cur_dsk = fDsk t
511
      new_mem = decIf i_online (fMem t) (Instance.mem inst)
512
      new_dsk = decIf uses_disk cur_dsk (Instance.dsk inst)
513
      new_free_sp = calcNewFreeSpindles True t inst
514
      new_inst_sp = calcSpindleUse True t inst
515
      new_failn1 = new_mem <= rMem t
516
      new_ucpu = incIf i_online (uCpu t) (Instance.vcpus inst)
517
      new_pcpu = fromIntegral new_ucpu / tCpu t
518
      new_dp = computeNewPDsk t new_free_sp new_dsk
519
      l_cpu = T.iPolicyVcpuRatio $ iPolicy t
520
      new_load = utilLoad t `T.addUtil` Instance.util inst
521
      inst_tags = Instance.exclTags inst
522
      old_tags = pTags t
523
      strict = not force
524
  in case () of
525
       _ | new_mem <= 0 -> Bad T.FailMem
526
         | uses_disk && new_dsk <= 0 -> Bad T.FailDisk
527
         | uses_disk && new_dsk < loDsk t && strict -> Bad T.FailDisk
528
         | uses_disk && exclStorage t && new_free_sp < 0 -> Bad T.FailSpindles
529
         | uses_disk && new_inst_sp > hiSpindles t && strict -> Bad T.FailDisk
530
         | new_failn1 && not (failN1 t) && strict -> Bad T.FailMem
531
         | l_cpu >= 0 && l_cpu < new_pcpu && strict -> Bad T.FailCPU
532
         | rejectAddTags old_tags inst_tags -> Bad T.FailTags
533
         | otherwise ->
534
           let new_plist = iname:pList t
535
               new_mp = fromIntegral new_mem / tMem t
536
               r = t { pList = new_plist, fMem = new_mem, fDsk = new_dsk
537
                     , failN1 = new_failn1, pMem = new_mp, pDsk = new_dp
538
                     , uCpu = new_ucpu, pCpu = new_pcpu
539
                     , utilLoad = new_load
540
                     , pTags = addTags old_tags inst_tags
541
                     , instSpindles = new_inst_sp
542
                     , fSpindles = new_free_sp
543
                     }
544
           in Ok r
545

    
546
-- | Adds a secondary instance (basic version).
547
addSec :: Node -> Instance.Instance -> T.Ndx -> T.OpResult Node
548
addSec = addSecEx False
549

    
550
-- | Adds a secondary instance (extended version).
551
addSecEx :: Bool -> Node -> Instance.Instance -> T.Ndx -> T.OpResult Node
552
addSecEx force t inst pdx =
553
  let iname = Instance.idx inst
554
      old_peers = peers t
555
      old_mem = fMem t
556
      new_dsk = fDsk t - Instance.dsk inst
557
      new_free_sp = calcNewFreeSpindles True t inst
558
      new_inst_sp = calcSpindleUse True t inst
559
      secondary_needed_mem = if Instance.usesSecMem inst
560
                               then Instance.mem inst
561
                               else 0
562
      new_peem = P.find pdx old_peers + secondary_needed_mem
563
      new_peers = P.add pdx new_peem old_peers
564
      new_rmem = max (rMem t) new_peem
565
      new_prem = fromIntegral new_rmem / tMem t
566
      new_failn1 = old_mem <= new_rmem
567
      new_dp = computeNewPDsk t new_free_sp new_dsk
568
      old_load = utilLoad t
569
      new_load = old_load { T.dskWeight = T.dskWeight old_load +
570
                                          T.dskWeight (Instance.util inst) }
571
      strict = not force
572
  in case () of
573
       _ | not (Instance.hasSecondary inst) -> Bad T.FailDisk
574
         | new_dsk <= 0 -> Bad T.FailDisk
575
         | new_dsk < loDsk t && strict -> Bad T.FailDisk
576
         | exclStorage t && new_free_sp < 0 -> Bad T.FailSpindles
577
         | new_inst_sp > hiSpindles t && strict -> Bad T.FailDisk
578
         | secondary_needed_mem >= old_mem && strict -> Bad T.FailMem
579
         | new_failn1 && not (failN1 t) && strict -> Bad T.FailMem
580
         | otherwise ->
581
           let new_slist = iname:sList t
582
               r = t { sList = new_slist, fDsk = new_dsk
583
                     , peers = new_peers, failN1 = new_failn1
584
                     , rMem = new_rmem, pDsk = new_dp
585
                     , pRem = new_prem, utilLoad = new_load
586
                     , instSpindles = new_inst_sp
587
                     , fSpindles = new_free_sp
588
                     }
589
           in Ok r
590

    
591
-- * Stats functions
592

    
593
-- | Computes the amount of available disk on a given node.
594
availDisk :: Node -> Int
595
availDisk t =
596
  let _f = fDsk t
597
      _l = loDsk t
598
  in if _f < _l
599
       then 0
600
       else _f - _l
601

    
602
-- | Computes the amount of used disk on a given node.
603
iDsk :: Node -> Int
604
iDsk t = truncate (tDsk t) - fDsk t
605

    
606
-- | Computes the amount of available memory on a given node.
607
availMem :: Node -> Int
608
availMem t =
609
  let _f = fMem t
610
      _l = rMem t
611
  in if _f < _l
612
       then 0
613
       else _f - _l
614

    
615
-- | Computes the amount of available memory on a given node.
616
availCpu :: Node -> Int
617
availCpu t =
618
  let _u = uCpu t
619
      _l = hiCpu t
620
  in if _l >= _u
621
       then _l - _u
622
       else 0
623

    
624
-- | The memory used by instances on a given node.
625
iMem :: Node -> Int
626
iMem t = truncate (tMem t) - nMem t - xMem t - fMem t
627

    
628
-- * Node graph functions
629
-- These functions do the transformations needed so that nodes can be
630
-- represented as a graph connected by the instances that are replicated
631
-- on them.
632

    
633
-- * Making of a Graph from a node/instance list
634

    
635
-- | Transform an instance into a list of edges on the node graph
636
instanceToEdges :: Instance.Instance -> [Graph.Edge]
637
instanceToEdges i
638
  | Instance.hasSecondary i = [(pnode,snode), (snode,pnode)]
639
  | otherwise = []
640
    where pnode = Instance.pNode i
641
          snode = Instance.sNode i
642

    
643
-- | Transform the list of instances into list of destination edges
644
instancesToEdges :: Instance.List -> [Graph.Edge]
645
instancesToEdges = concatMap instanceToEdges . Container.elems
646

    
647
-- | Transform the list of nodes into vertices bounds.
648
-- Returns Nothing is the list is empty.
649
nodesToBounds :: List -> Maybe Graph.Bounds
650
nodesToBounds nl = liftM2 (,) nmin nmax
651
    where nmin = fmap (fst . fst) (IntMap.minViewWithKey nl)
652
          nmax = fmap (fst . fst) (IntMap.maxViewWithKey nl)
653

    
654
-- | The clique of the primary nodes of the instances with a given secondary.
655
-- Return the full graph of those nodes that are primary node of at least one
656
-- instance that has the given node as secondary.
657
nodeToSharedSecondaryEdge :: Instance.List -> Node -> [Graph.Edge]
658
nodeToSharedSecondaryEdge il n = (,) <$> primaries <*> primaries
659
  where primaries = map (Instance.pNode . flip Container.find il) $ sList n
660

    
661

    
662
-- | Predicate of an edge having both vertices in a set of nodes.
663
filterValid :: List -> [Graph.Edge] -> [Graph.Edge]
664
filterValid nl  =  filter $ \(x,y) -> IntMap.member x nl && IntMap.member y nl
665

    
666
-- | Transform a Node + Instance list into a NodeGraph type.
667
-- Returns Nothing if the node list is empty.
668
mkNodeGraph :: List -> Instance.List -> Maybe Graph.Graph
669
mkNodeGraph nl il =
670
  liftM (`Graph.buildG` (filterValid nl . instancesToEdges $ il))
671
  (nodesToBounds nl)
672

    
673
-- | Transform a Nodes + Instances into a NodeGraph with all reboot exclusions.
674
-- This includes edges between nodes that are the primary nodes of instances
675
-- that have the same secondary node. Nodes not in the node list will not be
676
-- part of the graph, but they are still considered for the edges arising from
677
-- two instances having the same secondary node.
678
-- Return Nothing if the node list is empty.
679
mkRebootNodeGraph :: List -> List -> Instance.List -> Maybe Graph.Graph
680
mkRebootNodeGraph allnodes nl il =
681
  liftM (`Graph.buildG` filterValid nl edges) (nodesToBounds nl)
682
  where
683
    edges = instancesToEdges il `union`
684
            (Container.elems allnodes >>= nodeToSharedSecondaryEdge il) 
685

    
686
-- * Display functions
687

    
688
-- | Return a field for a given node.
689
showField :: Node   -- ^ Node which we're querying
690
          -> String -- ^ Field name
691
          -> String -- ^ Field value as string
692
showField t field =
693
  case field of
694
    "idx"  -> printf "%4d" $ idx t
695
    "name" -> alias t
696
    "fqdn" -> name t
697
    "status" -> case () of
698
                  _ | offline t -> "-"
699
                    | failN1 t -> "*"
700
                    | otherwise -> " "
701
    "tmem" -> printf "%5.0f" $ tMem t
702
    "nmem" -> printf "%5d" $ nMem t
703
    "xmem" -> printf "%5d" $ xMem t
704
    "fmem" -> printf "%5d" $ fMem t
705
    "imem" -> printf "%5d" $ iMem t
706
    "rmem" -> printf "%5d" $ rMem t
707
    "amem" -> printf "%5d" $ fMem t - rMem t
708
    "tdsk" -> printf "%5.0f" $ tDsk t / 1024
709
    "fdsk" -> printf "%5d" $ fDsk t `div` 1024
710
    "tcpu" -> printf "%4.0f" $ tCpu t
711
    "ucpu" -> printf "%4d" $ uCpu t
712
    "pcnt" -> printf "%3d" $ length (pList t)
713
    "scnt" -> printf "%3d" $ length (sList t)
714
    "plist" -> show $ pList t
715
    "slist" -> show $ sList t
716
    "pfmem" -> printf "%6.4f" $ pMem t
717
    "pfdsk" -> printf "%6.4f" $ pDsk t
718
    "rcpu"  -> printf "%5.2f" $ pCpu t
719
    "cload" -> printf "%5.3f" uC
720
    "mload" -> printf "%5.3f" uM
721
    "dload" -> printf "%5.3f" uD
722
    "nload" -> printf "%5.3f" uN
723
    "ptags" -> intercalate "," . map (uncurry (printf "%s=%d")) .
724
               Map.toList $ pTags t
725
    "peermap" -> show $ peers t
726
    "spindle_count" -> show $ tSpindles t
727
    "hi_spindles" -> show $ hiSpindles t
728
    "inst_spindles" -> show $ instSpindles t
729
    _ -> T.unknownField
730
  where
731
    T.DynUtil { T.cpuWeight = uC, T.memWeight = uM,
732
                T.dskWeight = uD, T.netWeight = uN } = utilLoad t
733

    
734
-- | Returns the header and numeric propery of a field.
735
showHeader :: String -> (String, Bool)
736
showHeader field =
737
  case field of
738
    "idx" -> ("Index", True)
739
    "name" -> ("Name", False)
740
    "fqdn" -> ("Name", False)
741
    "status" -> ("F", False)
742
    "tmem" -> ("t_mem", True)
743
    "nmem" -> ("n_mem", True)
744
    "xmem" -> ("x_mem", True)
745
    "fmem" -> ("f_mem", True)
746
    "imem" -> ("i_mem", True)
747
    "rmem" -> ("r_mem", True)
748
    "amem" -> ("a_mem", True)
749
    "tdsk" -> ("t_dsk", True)
750
    "fdsk" -> ("f_dsk", True)
751
    "tcpu" -> ("pcpu", True)
752
    "ucpu" -> ("vcpu", True)
753
    "pcnt" -> ("pcnt", True)
754
    "scnt" -> ("scnt", True)
755
    "plist" -> ("primaries", True)
756
    "slist" -> ("secondaries", True)
757
    "pfmem" -> ("p_fmem", True)
758
    "pfdsk" -> ("p_fdsk", True)
759
    "rcpu"  -> ("r_cpu", True)
760
    "cload" -> ("lCpu", True)
761
    "mload" -> ("lMem", True)
762
    "dload" -> ("lDsk", True)
763
    "nload" -> ("lNet", True)
764
    "ptags" -> ("PrimaryTags", False)
765
    "peermap" -> ("PeerMap", False)
766
    "spindle_count" -> ("NodeSpindles", True)
767
    "hi_spindles" -> ("MaxSpindles", True)
768
    "inst_spindles" -> ("InstSpindles", True)
769
    -- TODO: add node fields (group.uuid, group)
770
    _ -> (T.unknownField, False)
771

    
772
-- | String converter for the node list functionality.
773
list :: [String] -> Node -> [String]
774
list fields t = map (showField t) fields
775

    
776
-- | Constant holding the fields we're displaying by default.
777
defaultFields :: [String]
778
defaultFields =
779
  [ "status", "name", "tmem", "nmem", "imem", "xmem", "fmem"
780
  , "rmem", "tdsk", "fdsk", "tcpu", "ucpu", "pcnt", "scnt"
781
  , "pfmem", "pfdsk", "rcpu"
782
  , "cload", "mload", "dload", "nload" ]
783

    
784
{-# ANN computeGroups "HLint: ignore Use alternative" #-}
785
-- | Split a list of nodes into a list of (node group UUID, list of
786
-- associated nodes).
787
computeGroups :: [Node] -> [(T.Gdx, [Node])]
788
computeGroups nodes =
789
  let nodes' = sortBy (comparing group) nodes
790
      nodes'' = groupBy ((==) `on` group) nodes'
791
  -- use of head here is OK, since groupBy returns non-empty lists; if
792
  -- you remove groupBy, also remove use of head
793
  in map (\nl -> (group (head nl), nl)) nodes''