Statistics
| Branch: | Tag: | Revision:

root / htools / Ganeti / HTools / Node.hs @ f87c9f5d

History | View | Annotate | Download (21.5 kB)

1
{-| Module describing a node.
2

    
3
    All updates are functional (copy-based) and return a new node with
4
    updated value.
5
-}
6

    
7
{-
8

    
9
Copyright (C) 2009, 2010, 2011, 2012 Google Inc.
10

    
11
This program is free software; you can redistribute it and/or modify
12
it under the terms of the GNU General Public License as published by
13
the Free Software Foundation; either version 2 of the License, or
14
(at your option) any later version.
15

    
16
This program is distributed in the hope that it will be useful, but
17
WITHOUT ANY WARRANTY; without even the implied warranty of
18
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19
General Public License for more details.
20

    
21
You should have received a copy of the GNU General Public License
22
along with this program; if not, write to the Free Software
23
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
24
02110-1301, USA.
25

    
26
-}
27

    
28
module Ganeti.HTools.Node
29
  ( Node(..)
30
  , List
31
  -- * Constructor
32
  , create
33
  -- ** Finalization after data loading
34
  , buildPeers
35
  , setIdx
36
  , setAlias
37
  , setOffline
38
  , setXmem
39
  , setFmem
40
  , setPri
41
  , setSec
42
  , setMdsk
43
  , setMcpu
44
  , setPolicy
45
  -- * Tag maps
46
  , addTags
47
  , delTags
48
  , rejectAddTags
49
  -- * Instance (re)location
50
  , removePri
51
  , removeSec
52
  , addPri
53
  , addPriEx
54
  , addSec
55
  , addSecEx
56
  -- * Stats
57
  , availDisk
58
  , availMem
59
  , availCpu
60
  , iMem
61
  , iDsk
62
  , conflictingPrimaries
63
  -- * Formatting
64
  , defaultFields
65
  , showHeader
66
  , showField
67
  , list
68
  -- * Misc stuff
69
  , AssocList
70
  , AllocElement
71
  , noSecondary
72
  , computeGroups
73
  ) where
74

    
75
import Data.List hiding (group)
76
import qualified Data.Map as Map
77
import qualified Data.Foldable as Foldable
78
import Data.Ord (comparing)
79
import Text.Printf (printf)
80

    
81
import qualified Ganeti.HTools.Container as Container
82
import qualified Ganeti.HTools.Instance as Instance
83
import qualified Ganeti.HTools.PeerMap as P
84

    
85
import qualified Ganeti.HTools.Types as T
86

    
87
-- * Type declarations
88

    
89
-- | The tag map type.
90
type TagMap = Map.Map String Int
91

    
92
-- | The node type.
93
data Node = Node
94
  { name     :: String    -- ^ The node name
95
  , alias    :: String    -- ^ The shortened name (for display purposes)
96
  , tMem     :: Double    -- ^ Total memory (MiB)
97
  , nMem     :: Int       -- ^ Node memory (MiB)
98
  , fMem     :: Int       -- ^ Free memory (MiB)
99
  , xMem     :: Int       -- ^ Unaccounted memory (MiB)
100
  , tDsk     :: Double    -- ^ Total disk space (MiB)
101
  , fDsk     :: Int       -- ^ Free disk space (MiB)
102
  , tCpu     :: Double    -- ^ Total CPU count
103
  , uCpu     :: Int       -- ^ Used VCPU count
104
  , spindleCount :: Int   -- ^ Node spindles (spindle_count node parameter)
105
  , pList    :: [T.Idx]   -- ^ List of primary instance indices
106
  , sList    :: [T.Idx]   -- ^ List of secondary instance indices
107
  , idx      :: T.Ndx     -- ^ Internal index for book-keeping
108
  , peers    :: P.PeerMap -- ^ Pnode to instance mapping
109
  , failN1   :: Bool      -- ^ Whether the node has failed n1
110
  , rMem     :: Int       -- ^ Maximum memory needed for failover by
111
                          -- primaries of this node
112
  , pMem     :: Double    -- ^ Percent of free memory
113
  , pDsk     :: Double    -- ^ Percent of free disk
114
  , pRem     :: Double    -- ^ Percent of reserved memory
115
  , pCpu     :: Double    -- ^ Ratio of virtual to physical CPUs
116
  , mDsk     :: Double    -- ^ Minimum free disk ratio
117
  , loDsk    :: Int       -- ^ Autocomputed from mDsk low disk
118
                          -- threshold
119
  , hiCpu    :: Int       -- ^ Autocomputed from mCpu high cpu
120
                          -- threshold
121
  , hiSpindles :: Double  -- ^ Auto-computed from policy spindle_ratio
122
                          -- and the node spindle count
123
  , instSpindles :: Double -- ^ Spindles used by instances
124
  , offline  :: Bool      -- ^ Whether the node should not be used for
125
                          -- allocations and skipped from score
126
                          -- computations
127
  , utilPool :: T.DynUtil -- ^ Total utilisation capacity
128
  , utilLoad :: T.DynUtil -- ^ Sum of instance utilisation
129
  , pTags    :: TagMap    -- ^ Map of primary instance tags and their count
130
  , group    :: T.Gdx     -- ^ The node's group (index)
131
  , iPolicy  :: T.IPolicy -- ^ The instance policy (of the node's group)
132
  } deriving (Show, Read, Eq)
133

    
134
instance T.Element Node where
135
  nameOf = name
136
  idxOf = idx
137
  setAlias = setAlias
138
  setIdx = setIdx
139
  allNames n = [name n, alias n]
140

    
141
-- | A simple name for the int, node association list.
142
type AssocList = [(T.Ndx, Node)]
143

    
144
-- | A simple name for a node map.
145
type List = Container.Container Node
146

    
147
-- | A simple name for an allocation element (here just for logistic
148
-- reasons).
149
type AllocElement = (List, Instance.Instance, [Node], T.Score)
150

    
151
-- | Constant node index for a non-moveable instance.
152
noSecondary :: T.Ndx
153
noSecondary = -1
154

    
155
-- * Helper functions
156

    
157
-- | Add a tag to a tagmap.
158
addTag :: TagMap -> String -> TagMap
159
addTag t s = Map.insertWith (+) s 1 t
160

    
161
-- | Add multiple tags.
162
addTags :: TagMap -> [String] -> TagMap
163
addTags = foldl' addTag
164

    
165
-- | Adjust or delete a tag from a tagmap.
166
delTag :: TagMap -> String -> TagMap
167
delTag t s = Map.update (\v -> if v > 1
168
                                 then Just (v-1)
169
                                 else Nothing)
170
             s t
171

    
172
-- | Remove multiple tags.
173
delTags :: TagMap -> [String] -> TagMap
174
delTags = foldl' delTag
175

    
176
-- | Check if we can add a list of tags to a tagmap.
177
rejectAddTags :: TagMap -> [String] -> Bool
178
rejectAddTags t = any (`Map.member` t)
179

    
180
-- | Check how many primary instances have conflicting tags. The
181
-- algorithm to compute this is to sum the count of all tags, then
182
-- subtract the size of the tag map (since each tag has at least one,
183
-- non-conflicting instance); this is equivalent to summing the
184
-- values in the tag map minus one.
185
conflictingPrimaries :: Node -> Int
186
conflictingPrimaries (Node { pTags = t }) = Foldable.sum t - Map.size t
187

    
188
-- | Helper function to increment a base value depending on the passed
189
-- boolean argument.
190
incIf :: (Num a) => Bool -> a -> a -> a
191
incIf True  base delta = base + delta
192
incIf False base _     = base
193

    
194
-- | Helper function to decrement a base value depending on the passed
195
-- boolean argument.
196
decIf :: (Num a) => Bool -> a -> a -> a
197
decIf True  base delta = base - delta
198
decIf False base _     = base
199

    
200
-- * Initialization functions
201

    
202
-- | Create a new node.
203
--
204
-- The index and the peers maps are empty, and will be need to be
205
-- update later via the 'setIdx' and 'buildPeers' functions.
206
create :: String -> Double -> Int -> Int -> Double
207
       -> Int -> Double -> Bool -> Int -> T.Gdx -> Node
208
create name_init mem_t_init mem_n_init mem_f_init
209
       dsk_t_init dsk_f_init cpu_t_init offline_init spindles_init
210
       group_init =
211
  Node { name = name_init
212
       , alias = name_init
213
       , tMem = mem_t_init
214
       , nMem = mem_n_init
215
       , fMem = mem_f_init
216
       , tDsk = dsk_t_init
217
       , fDsk = dsk_f_init
218
       , tCpu = cpu_t_init
219
       , spindleCount = spindles_init
220
       , uCpu = 0
221
       , pList = []
222
       , sList = []
223
       , failN1 = True
224
       , idx = -1
225
       , peers = P.empty
226
       , rMem = 0
227
       , pMem = fromIntegral mem_f_init / mem_t_init
228
       , pDsk = fromIntegral dsk_f_init / dsk_t_init
229
       , pRem = 0
230
       , pCpu = 0
231
       , offline = offline_init
232
       , xMem = 0
233
       , mDsk = T.defReservedDiskRatio
234
       , loDsk = mDskToloDsk T.defReservedDiskRatio dsk_t_init
235
       , hiCpu = mCpuTohiCpu (T.iPolicyVcpuRatio T.defIPolicy) cpu_t_init
236
       , hiSpindles = computeHiSpindles (T.iPolicySpindleRatio T.defIPolicy)
237
                      spindles_init
238
       , instSpindles = 0
239
       , utilPool = T.baseUtil
240
       , utilLoad = T.zeroUtil
241
       , pTags = Map.empty
242
       , group = group_init
243
       , iPolicy = T.defIPolicy
244
       }
245

    
246
-- | Conversion formula from mDsk\/tDsk to loDsk.
247
mDskToloDsk :: Double -> Double -> Int
248
mDskToloDsk mval = floor . (mval *)
249

    
250
-- | Conversion formula from mCpu\/tCpu to hiCpu.
251
mCpuTohiCpu :: Double -> Double -> Int
252
mCpuTohiCpu mval = floor . (mval *)
253

    
254
-- | Conversiojn formula from spindles and spindle ratio to hiSpindles.
255
computeHiSpindles :: Double -> Int -> Double
256
computeHiSpindles spindle_ratio = (spindle_ratio *) . fromIntegral
257

    
258
-- | Changes the index.
259
--
260
-- This is used only during the building of the data structures.
261
setIdx :: Node -> T.Ndx -> Node
262
setIdx t i = t {idx = i}
263

    
264
-- | Changes the alias.
265
--
266
-- This is used only during the building of the data structures.
267
setAlias :: Node -> String -> Node
268
setAlias t s = t { alias = s }
269

    
270
-- | Sets the offline attribute.
271
setOffline :: Node -> Bool -> Node
272
setOffline t val = t { offline = val }
273

    
274
-- | Sets the unnaccounted memory.
275
setXmem :: Node -> Int -> Node
276
setXmem t val = t { xMem = val }
277

    
278
-- | Sets the max disk usage ratio.
279
setMdsk :: Node -> Double -> Node
280
setMdsk t val = t { mDsk = val, loDsk = mDskToloDsk val (tDsk t) }
281

    
282
-- | Sets the max cpu usage ratio. This will update the node's
283
-- ipolicy, losing sharing (but it should be a seldomly done operation).
284
setMcpu :: Node -> Double -> Node
285
setMcpu t val =
286
  let new_ipol = (iPolicy t) { T.iPolicyVcpuRatio = val }
287
  in t { hiCpu = mCpuTohiCpu val (tCpu t), iPolicy = new_ipol }
288

    
289
-- | Sets the policy.
290
setPolicy :: T.IPolicy -> Node -> Node
291
setPolicy pol node =
292
  node { iPolicy = pol
293
       , hiCpu = mCpuTohiCpu (T.iPolicyVcpuRatio pol) (tCpu node)
294
       , hiSpindles = computeHiSpindles (T.iPolicySpindleRatio pol)
295
                      (spindleCount node)
296
       }
297

    
298
-- | Computes the maximum reserved memory for peers from a peer map.
299
computeMaxRes :: P.PeerMap -> P.Elem
300
computeMaxRes = P.maxElem
301

    
302
-- | Builds the peer map for a given node.
303
buildPeers :: Node -> Instance.List -> Node
304
buildPeers t il =
305
  let mdata = map
306
              (\i_idx -> let inst = Container.find i_idx il
307
                             mem = if Instance.usesSecMem inst
308
                                     then Instance.mem inst
309
                                     else 0
310
                         in (Instance.pNode inst, mem))
311
              (sList t)
312
      pmap = P.accumArray (+) mdata
313
      new_rmem = computeMaxRes pmap
314
      new_failN1 = fMem t <= new_rmem
315
      new_prem = fromIntegral new_rmem / tMem t
316
  in t {peers=pmap, failN1 = new_failN1, rMem = new_rmem, pRem = new_prem}
317

    
318
-- | Assigns an instance to a node as primary and update the used VCPU
319
-- count, utilisation data and tags map.
320
setPri :: Node -> Instance.Instance -> Node
321
setPri t inst = t { pList = Instance.idx inst:pList t
322
                  , uCpu = new_count
323
                  , pCpu = fromIntegral new_count / tCpu t
324
                  , utilLoad = utilLoad t `T.addUtil` Instance.util inst
325
                  , pTags = addTags (pTags t) (Instance.tags inst)
326
                  }
327
  where new_count = Instance.applyIfOnline inst (+ Instance.vcpus inst)
328
                    (uCpu t )
329

    
330
-- | Assigns an instance to a node as secondary without other updates.
331
setSec :: Node -> Instance.Instance -> Node
332
setSec t inst = t { sList = Instance.idx inst:sList t
333
                  , utilLoad = old_load { T.dskWeight = T.dskWeight old_load +
334
                                          T.dskWeight (Instance.util inst) }
335
                  }
336
  where old_load = utilLoad t
337

    
338
-- * Update functions
339

    
340
-- | Sets the free memory.
341
setFmem :: Node -> Int -> Node
342
setFmem t new_mem =
343
  let new_n1 = new_mem <= rMem t
344
      new_mp = fromIntegral new_mem / tMem t
345
  in t { fMem = new_mem, failN1 = new_n1, pMem = new_mp }
346

    
347
-- | Removes a primary instance.
348
removePri :: Node -> Instance.Instance -> Node
349
removePri t inst =
350
  let iname = Instance.idx inst
351
      i_online = Instance.instanceNotOffline inst
352
      uses_disk = Instance.usesLocalStorage inst
353
      new_plist = delete iname (pList t)
354
      new_mem = incIf i_online (fMem t) (Instance.mem inst)
355
      new_dsk = incIf uses_disk (fDsk t) (Instance.dsk inst)
356
      new_mp = fromIntegral new_mem / tMem t
357
      new_dp = fromIntegral new_dsk / tDsk t
358
      new_failn1 = new_mem <= rMem t
359
      new_ucpu = decIf i_online (uCpu t) (Instance.vcpus inst)
360
      new_rcpu = fromIntegral new_ucpu / tCpu t
361
      new_load = utilLoad t `T.subUtil` Instance.util inst
362
  in t { pList = new_plist, fMem = new_mem, fDsk = new_dsk
363
       , failN1 = new_failn1, pMem = new_mp, pDsk = new_dp
364
       , uCpu = new_ucpu, pCpu = new_rcpu, utilLoad = new_load
365
       , pTags = delTags (pTags t) (Instance.tags inst) }
366

    
367
-- | Removes a secondary instance.
368
removeSec :: Node -> Instance.Instance -> Node
369
removeSec t inst =
370
  let iname = Instance.idx inst
371
      uses_disk = Instance.usesLocalStorage inst
372
      cur_dsk = fDsk t
373
      pnode = Instance.pNode inst
374
      new_slist = delete iname (sList t)
375
      new_dsk = incIf uses_disk cur_dsk (Instance.dsk inst)
376
      old_peers = peers t
377
      old_peem = P.find pnode old_peers
378
      new_peem = decIf (Instance.usesSecMem inst) old_peem (Instance.mem inst)
379
      new_peers = if new_peem > 0
380
                    then P.add pnode new_peem old_peers
381
                    else P.remove pnode old_peers
382
      old_rmem = rMem t
383
      new_rmem = if old_peem < old_rmem
384
                   then old_rmem
385
                   else computeMaxRes new_peers
386
      new_prem = fromIntegral new_rmem / tMem t
387
      new_failn1 = fMem t <= new_rmem
388
      new_dp = fromIntegral new_dsk / tDsk t
389
      old_load = utilLoad t
390
      new_load = old_load { T.dskWeight = T.dskWeight old_load -
391
                                          T.dskWeight (Instance.util inst) }
392
  in t { sList = new_slist, fDsk = new_dsk, peers = new_peers
393
       , failN1 = new_failn1, rMem = new_rmem, pDsk = new_dp
394
       , pRem = new_prem, utilLoad = new_load }
395

    
396
-- | Adds a primary instance (basic version).
397
addPri :: Node -> Instance.Instance -> T.OpResult Node
398
addPri = addPriEx False
399

    
400
-- | Adds a primary instance (extended version).
401
addPriEx :: Bool               -- ^ Whether to override the N+1 and
402
                               -- other /soft/ checks, useful if we
403
                               -- come from a worse status
404
                               -- (e.g. offline)
405
         -> Node               -- ^ The target node
406
         -> Instance.Instance  -- ^ The instance to add
407
         -> T.OpResult Node    -- ^ The result of the operation,
408
                               -- either the new version of the node
409
                               -- or a failure mode
410
addPriEx force t inst =
411
  let iname = Instance.idx inst
412
      i_online = Instance.instanceNotOffline inst
413
      uses_disk = Instance.usesLocalStorage inst
414
      cur_dsk = fDsk t
415
      new_mem = decIf i_online (fMem t) (Instance.mem inst)
416
      new_dsk = decIf uses_disk cur_dsk (Instance.dsk inst)
417
      new_failn1 = new_mem <= rMem t
418
      new_ucpu = incIf i_online (uCpu t) (Instance.vcpus inst)
419
      new_pcpu = fromIntegral new_ucpu / tCpu t
420
      new_dp = fromIntegral new_dsk / tDsk t
421
      l_cpu = T.iPolicyVcpuRatio $ iPolicy t
422
      new_load = utilLoad t `T.addUtil` Instance.util inst
423
      inst_tags = Instance.tags inst
424
      old_tags = pTags t
425
      strict = not force
426
  in case () of
427
       _ | new_mem <= 0 -> T.OpFail T.FailMem
428
         | uses_disk && new_dsk <= 0 -> T.OpFail T.FailDisk
429
         | uses_disk && mDsk t > new_dp && strict -> T.OpFail T.FailDisk
430
         | new_failn1 && not (failN1 t) && strict -> T.OpFail T.FailMem
431
         | l_cpu >= 0 && l_cpu < new_pcpu && strict -> T.OpFail T.FailCPU
432
         | rejectAddTags old_tags inst_tags -> T.OpFail T.FailTags
433
         | otherwise ->
434
           let new_plist = iname:pList t
435
               new_mp = fromIntegral new_mem / tMem t
436
               r = t { pList = new_plist, fMem = new_mem, fDsk = new_dsk
437
                     , failN1 = new_failn1, pMem = new_mp, pDsk = new_dp
438
                     , uCpu = new_ucpu, pCpu = new_pcpu
439
                     , utilLoad = new_load
440
                     , pTags = addTags old_tags inst_tags }
441
           in T.OpGood r
442

    
443
-- | Adds a secondary instance (basic version).
444
addSec :: Node -> Instance.Instance -> T.Ndx -> T.OpResult Node
445
addSec = addSecEx False
446

    
447
-- | Adds a secondary instance (extended version).
448
addSecEx :: Bool -> Node -> Instance.Instance -> T.Ndx -> T.OpResult Node
449
addSecEx force t inst pdx =
450
  let iname = Instance.idx inst
451
      old_peers = peers t
452
      old_mem = fMem t
453
      new_dsk = fDsk t - Instance.dsk inst
454
      secondary_needed_mem = if Instance.usesSecMem inst
455
                               then Instance.mem inst
456
                               else 0
457
      new_peem = P.find pdx old_peers + secondary_needed_mem
458
      new_peers = P.add pdx new_peem old_peers
459
      new_rmem = max (rMem t) new_peem
460
      new_prem = fromIntegral new_rmem / tMem t
461
      new_failn1 = old_mem <= new_rmem
462
      new_dp = fromIntegral new_dsk / tDsk t
463
      old_load = utilLoad t
464
      new_load = old_load { T.dskWeight = T.dskWeight old_load +
465
                                          T.dskWeight (Instance.util inst) }
466
      strict = not force
467
  in case () of
468
       _ | not (Instance.hasSecondary inst) -> T.OpFail T.FailDisk
469
         | new_dsk <= 0 -> T.OpFail T.FailDisk
470
         | mDsk t > new_dp && strict -> T.OpFail T.FailDisk
471
         | secondary_needed_mem >= old_mem && strict -> T.OpFail T.FailMem
472
         | new_failn1 && not (failN1 t) && strict -> T.OpFail T.FailMem
473
         | otherwise ->
474
           let new_slist = iname:sList t
475
               r = t { sList = new_slist, fDsk = new_dsk
476
                     , peers = new_peers, failN1 = new_failn1
477
                     , rMem = new_rmem, pDsk = new_dp
478
                     , pRem = new_prem, utilLoad = new_load }
479
           in T.OpGood r
480

    
481
-- * Stats functions
482

    
483
-- | Computes the amount of available disk on a given node.
484
availDisk :: Node -> Int
485
availDisk t =
486
  let _f = fDsk t
487
      _l = loDsk t
488
  in if _f < _l
489
       then 0
490
       else _f - _l
491

    
492
-- | Computes the amount of used disk on a given node.
493
iDsk :: Node -> Int
494
iDsk t = truncate (tDsk t) - fDsk t
495

    
496
-- | Computes the amount of available memory on a given node.
497
availMem :: Node -> Int
498
availMem t =
499
  let _f = fMem t
500
      _l = rMem t
501
  in if _f < _l
502
       then 0
503
       else _f - _l
504

    
505
-- | Computes the amount of available memory on a given node.
506
availCpu :: Node -> Int
507
availCpu t =
508
  let _u = uCpu t
509
      _l = hiCpu t
510
  in if _l >= _u
511
       then _l - _u
512
       else 0
513

    
514
-- | The memory used by instances on a given node.
515
iMem :: Node -> Int
516
iMem t = truncate (tMem t) - nMem t - xMem t - fMem t
517

    
518
-- * Display functions
519

    
520
-- | Return a field for a given node.
521
showField :: Node   -- ^ Node which we're querying
522
          -> String -- ^ Field name
523
          -> String -- ^ Field value as string
524
showField t field =
525
  case field of
526
    "idx"  -> printf "%4d" $ idx t
527
    "name" -> alias t
528
    "fqdn" -> name t
529
    "status" -> case () of
530
                  _ | offline t -> "-"
531
                    | failN1 t -> "*"
532
                    | otherwise -> " "
533
    "tmem" -> printf "%5.0f" $ tMem t
534
    "nmem" -> printf "%5d" $ nMem t
535
    "xmem" -> printf "%5d" $ xMem t
536
    "fmem" -> printf "%5d" $ fMem t
537
    "imem" -> printf "%5d" $ iMem t
538
    "rmem" -> printf "%5d" $ rMem t
539
    "amem" -> printf "%5d" $ fMem t - rMem t
540
    "tdsk" -> printf "%5.0f" $ tDsk t / 1024
541
    "fdsk" -> printf "%5d" $ fDsk t `div` 1024
542
    "tcpu" -> printf "%4.0f" $ tCpu t
543
    "ucpu" -> printf "%4d" $ uCpu t
544
    "pcnt" -> printf "%3d" $ length (pList t)
545
    "scnt" -> printf "%3d" $ length (sList t)
546
    "plist" -> show $ pList t
547
    "slist" -> show $ sList t
548
    "pfmem" -> printf "%6.4f" $ pMem t
549
    "pfdsk" -> printf "%6.4f" $ pDsk t
550
    "rcpu"  -> printf "%5.2f" $ pCpu t
551
    "cload" -> printf "%5.3f" uC
552
    "mload" -> printf "%5.3f" uM
553
    "dload" -> printf "%5.3f" uD
554
    "nload" -> printf "%5.3f" uN
555
    "ptags" -> intercalate "," . map (uncurry (printf "%s=%d")) .
556
               Map.toList $ pTags t
557
    "peermap" -> show $ peers t
558
    "spindle_count" -> show $ spindleCount t
559
    "hi_spindles" -> show $ hiSpindles t
560
    "inst_spindles" -> show $ instSpindles t
561
    _ -> T.unknownField
562
  where
563
    T.DynUtil { T.cpuWeight = uC, T.memWeight = uM,
564
                T.dskWeight = uD, T.netWeight = uN } = utilLoad t
565

    
566
-- | Returns the header and numeric propery of a field.
567
showHeader :: String -> (String, Bool)
568
showHeader field =
569
  case field of
570
    "idx" -> ("Index", True)
571
    "name" -> ("Name", False)
572
    "fqdn" -> ("Name", False)
573
    "status" -> ("F", False)
574
    "tmem" -> ("t_mem", True)
575
    "nmem" -> ("n_mem", True)
576
    "xmem" -> ("x_mem", True)
577
    "fmem" -> ("f_mem", True)
578
    "imem" -> ("i_mem", True)
579
    "rmem" -> ("r_mem", True)
580
    "amem" -> ("a_mem", True)
581
    "tdsk" -> ("t_dsk", True)
582
    "fdsk" -> ("f_dsk", True)
583
    "tcpu" -> ("pcpu", True)
584
    "ucpu" -> ("vcpu", True)
585
    "pcnt" -> ("pcnt", True)
586
    "scnt" -> ("scnt", True)
587
    "plist" -> ("primaries", True)
588
    "slist" -> ("secondaries", True)
589
    "pfmem" -> ("p_fmem", True)
590
    "pfdsk" -> ("p_fdsk", True)
591
    "rcpu"  -> ("r_cpu", True)
592
    "cload" -> ("lCpu", True)
593
    "mload" -> ("lMem", True)
594
    "dload" -> ("lDsk", True)
595
    "nload" -> ("lNet", True)
596
    "ptags" -> ("PrimaryTags", False)
597
    "peermap" -> ("PeerMap", False)
598
    "spindle_count" -> ("NodeSpindles", True)
599
    "hi_spindles" -> ("MaxSpindles", True)
600
    "inst_spindles" -> ("InstSpindles", True)
601
    -- TODO: add node fields (group.uuid, group)
602
    _ -> (T.unknownField, False)
603

    
604
-- | String converter for the node list functionality.
605
list :: [String] -> Node -> [String]
606
list fields t = map (showField t) fields
607

    
608
-- | Constant holding the fields we're displaying by default.
609
defaultFields :: [String]
610
defaultFields =
611
  [ "status", "name", "tmem", "nmem", "imem", "xmem", "fmem"
612
  , "rmem", "tdsk", "fdsk", "tcpu", "ucpu", "pcnt", "scnt"
613
  , "pfmem", "pfdsk", "rcpu"
614
  , "cload", "mload", "dload", "nload" ]
615

    
616
-- | Split a list of nodes into a list of (node group UUID, list of
617
-- associated nodes).
618
computeGroups :: [Node] -> [(T.Gdx, [Node])]
619
computeGroups nodes =
620
  let nodes' = sortBy (comparing group) nodes
621
      nodes'' = groupBy (\a b -> group a == group b) nodes'
622
  in map (\nl -> (group (head nl), nl)) nodes''