Statistics
| Branch: | Tag: | Revision:

root / Ganeti / HTools / Node.hs @ 2060348b

History | View | Annotate | Download (12 kB)

1
{-| Module describing a node.
2

    
3
    All updates are functional (copy-based) and return a new node with
4
    updated value.
5
-}
6

    
7
{-
8

    
9
Copyright (C) 2009 Google Inc.
10

    
11
This program is free software; you can redistribute it and/or modify
12
it under the terms of the GNU General Public License as published by
13
the Free Software Foundation; either version 2 of the License, or
14
(at your option) any later version.
15

    
16
This program is distributed in the hope that it will be useful, but
17
WITHOUT ANY WARRANTY; without even the implied warranty of
18
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19
General Public License for more details.
20

    
21
You should have received a copy of the GNU General Public License
22
along with this program; if not, write to the Free Software
23
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
24
02110-1301, USA.
25

    
26
-}
27

    
28
module Ganeti.HTools.Node
29
    ( Node(failN1, name, idx,
30
           tMem, nMem, fMem, rMem, xMem,
31
           tDsk, fDsk,
32
           tCpu, uCpu,
33
           pMem, pDsk, pRem, pCpu,
34
           mDsk, mCpu, loDsk, hiCpu,
35
           pList, sList, offline)
36
    , List
37
    -- * Constructor
38
    , create
39
    -- ** Finalization after data loading
40
    , buildPeers
41
    , setIdx
42
    , setName
43
    , setOffline
44
    , setXmem
45
    , setFmem
46
    , setPri
47
    , setSec
48
    , setMdsk
49
    , setMcpu
50
    , addCpus
51
    -- * Instance (re)location
52
    , removePri
53
    , removeSec
54
    , addPri
55
    , addSec
56
    -- * Stats
57
    , availDisk
58
    -- * Formatting
59
    , list
60
    -- * Misc stuff
61
    , AssocList
62
    , noSecondary
63
    ) where
64

    
65
import Data.List
66
import Text.Printf (printf)
67

    
68
import qualified Ganeti.HTools.Container as Container
69
import qualified Ganeti.HTools.Instance as Instance
70
import qualified Ganeti.HTools.PeerMap as PeerMap
71

    
72
import qualified Ganeti.HTools.Types as T
73

    
74
-- * Type declarations
75

    
76
-- | The node type.
77
data Node = Node { name  :: String -- ^ The node name
78
                 , tMem :: Double  -- ^ Total memory (MiB)
79
                 , nMem :: Int     -- ^ Node memory (MiB)
80
                 , fMem :: Int     -- ^ Free memory (MiB)
81
                 , xMem :: Int     -- ^ Unaccounted memory (MiB)
82
                 , tDsk :: Double  -- ^ Total disk space (MiB)
83
                 , fDsk :: Int     -- ^ Free disk space (MiB)
84
                 , tCpu :: Double  -- ^ Total CPU count
85
                 , uCpu :: Int     -- ^ Used VCPU count
86
                 , pList :: [T.Idx]-- ^ List of primary instance indices
87
                 , sList :: [T.Idx]-- ^ List of secondary instance indices
88
                 , idx :: T.Ndx    -- ^ Internal index for book-keeping
89
                 , peers :: PeerMap.PeerMap -- ^ Pnode to instance mapping
90
                 , failN1:: Bool   -- ^ Whether the node has failed n1
91
                 , rMem :: Int     -- ^ Maximum memory needed for
92
                                   -- failover by primaries of this node
93
                 , pMem :: Double  -- ^ Percent of free memory
94
                 , pDsk :: Double  -- ^ Percent of free disk
95
                 , pRem :: Double  -- ^ Percent of reserved memory
96
                 , pCpu :: Double  -- ^ Ratio of virtual to physical CPUs
97
                 , mDsk :: Double  -- ^ Minimum free disk ratio
98
                 , mCpu :: Double  -- ^ Max ratio of virt-to-phys CPUs
99
                 , loDsk :: Int    -- ^ Autocomputed from mDsk low disk
100
                                   -- threshold
101
                 , hiCpu :: Int    -- ^ Autocomputed from mCpu high cpu
102
                                   -- threshold
103
                 , offline :: Bool -- ^ Whether the node should not be used
104
                                   -- for allocations and skipped from
105
                                   -- score computations
106
  } deriving (Show)
107

    
108
instance T.Element Node where
109
    nameOf = name
110
    idxOf = idx
111
    setName = setName
112
    setIdx = setIdx
113

    
114
-- | A simple name for the int, node association list.
115
type AssocList = [(T.Ndx, Node)]
116

    
117
-- | A simple name for a node map.
118
type List = Container.Container Node
119

    
120
-- | Constant node index for a non-moveable instance.
121
noSecondary :: T.Ndx
122
noSecondary = -1
123

    
124
-- | No limit value
125
noLimit :: Double
126
noLimit = -1
127

    
128
-- | No limit int value
129
noLimitInt :: Int
130
noLimitInt = -1
131

    
132
-- * Initialization functions
133

    
134
-- | Create a new node.
135
--
136
-- The index and the peers maps are empty, and will be need to be
137
-- update later via the 'setIdx' and 'buildPeers' functions.
138
create :: String -> Double -> Int -> Int -> Double
139
       -> Int -> Double -> Bool -> Node
140
create name_init mem_t_init mem_n_init mem_f_init
141
       dsk_t_init dsk_f_init cpu_t_init offline_init =
142
    Node
143
    {
144
      name  = name_init,
145
      tMem = mem_t_init,
146
      nMem = mem_n_init,
147
      fMem = mem_f_init,
148
      tDsk = dsk_t_init,
149
      fDsk = dsk_f_init,
150
      tCpu = cpu_t_init,
151
      uCpu = 0,
152
      pList = [],
153
      sList = [],
154
      failN1 = True,
155
      idx = -1,
156
      peers = PeerMap.empty,
157
      rMem = 0,
158
      pMem = fromIntegral mem_f_init / mem_t_init,
159
      pDsk = fromIntegral dsk_f_init / dsk_t_init,
160
      pRem = 0,
161
      pCpu = 0,
162
      offline = offline_init,
163
      xMem = 0,
164
      mDsk = noLimit,
165
      mCpu = noLimit,
166
      loDsk = noLimitInt,
167
      hiCpu = noLimitInt
168
    }
169

    
170
-- | Changes the index.
171
--
172
-- This is used only during the building of the data structures.
173
setIdx :: Node -> T.Ndx -> Node
174
setIdx t i = t {idx = i}
175

    
176
-- | Changes the name.
177
--
178
-- This is used only during the building of the data structures.
179
setName :: Node -> String -> Node
180
setName t s = t {name = s}
181

    
182
-- | Sets the offline attribute.
183
setOffline :: Node -> Bool -> Node
184
setOffline t val = t { offline = val }
185

    
186
-- | Sets the unnaccounted memory.
187
setXmem :: Node -> Int -> Node
188
setXmem t val = t { xMem = val }
189

    
190
-- | Sets the max disk usage ratio
191
setMdsk :: Node -> Double -> Node
192
setMdsk t val = t { mDsk = val,
193
                    loDsk = if val == noLimit
194
                             then noLimitInt
195
                             else floor (val * tDsk t) }
196

    
197
-- | Sets the max cpu usage ratio
198
setMcpu :: Node -> Double -> Node
199
setMcpu t val = t { mCpu = val, hiCpu = floor (val * tCpu t) }
200

    
201
-- | Computes the maximum reserved memory for peers from a peer map.
202
computeMaxRes :: PeerMap.PeerMap -> PeerMap.Elem
203
computeMaxRes = PeerMap.maxElem
204

    
205
-- | Builds the peer map for a given node.
206
buildPeers :: Node -> Instance.List -> Node
207
buildPeers t il =
208
    let mdata = map
209
                (\i_idx -> let inst = Container.find i_idx il
210
                           in (Instance.pNode inst, Instance.mem inst))
211
                (sList t)
212
        pmap = PeerMap.accumArray (+) mdata
213
        new_rmem = computeMaxRes pmap
214
        new_failN1 = fMem t <= new_rmem
215
        new_prem = fromIntegral new_rmem / tMem t
216
    in t {peers=pmap, failN1 = new_failN1, rMem = new_rmem, pRem = new_prem}
217

    
218
-- | Assigns an instance to a node as primary without other updates.
219
setPri :: Node -> T.Idx -> Node
220
setPri t ix = t { pList = ix:pList t }
221

    
222
-- | Assigns an instance to a node as secondary without other updates.
223
setSec :: Node -> T.Idx -> Node
224
setSec t ix = t { sList = ix:sList t }
225

    
226
-- | Add primary cpus to a node
227
addCpus :: Node -> Int -> Node
228
addCpus t count =
229
    let new_count = uCpu t + count
230
    in t { uCpu = new_count, pCpu = fromIntegral new_count / tCpu t }
231

    
232
-- * Update functions
233

    
234
-- | Sets the free memory.
235
setFmem :: Node -> Int -> Node
236
setFmem t new_mem =
237
    let new_n1 = new_mem <= rMem t
238
        new_mp = fromIntegral new_mem / tMem t
239
    in
240
      t { fMem = new_mem, failN1 = new_n1, pMem = new_mp }
241

    
242
-- | Removes a primary instance.
243
removePri :: Node -> Instance.Instance -> Node
244
removePri t inst =
245
    let iname = Instance.idx inst
246
        new_plist = delete iname (pList t)
247
        new_mem = fMem t + Instance.mem inst
248
        new_dsk = fDsk t + Instance.dsk inst
249
        new_mp = fromIntegral new_mem / tMem t
250
        new_dp = fromIntegral new_dsk / tDsk t
251
        new_failn1 = new_mem <= rMem t
252
        new_ucpu = uCpu t - Instance.vcpus inst
253
        new_rcpu = fromIntegral new_ucpu / tCpu t
254
    in t {pList = new_plist, fMem = new_mem, fDsk = new_dsk,
255
          failN1 = new_failn1, pMem = new_mp, pDsk = new_dp,
256
          uCpu = new_ucpu, pCpu = new_rcpu}
257

    
258
-- | Removes a secondary instance.
259
removeSec :: Node -> Instance.Instance -> Node
260
removeSec t inst =
261
    let iname = Instance.idx inst
262
        pnode = Instance.pNode inst
263
        new_slist = delete iname (sList t)
264
        new_dsk = fDsk t + Instance.dsk inst
265
        old_peers = peers t
266
        old_peem = PeerMap.find pnode old_peers
267
        new_peem =  old_peem - Instance.mem inst
268
        new_peers = PeerMap.add pnode new_peem old_peers
269
        old_rmem = rMem t
270
        new_rmem = if old_peem < old_rmem then
271
                       old_rmem
272
                   else
273
                       computeMaxRes new_peers
274
        new_prem = fromIntegral new_rmem / tMem t
275
        new_failn1 = fMem t <= new_rmem
276
        new_dp = fromIntegral new_dsk / tDsk t
277
    in t {sList = new_slist, fDsk = new_dsk, peers = new_peers,
278
          failN1 = new_failn1, rMem = new_rmem, pDsk = new_dp,
279
          pRem = new_prem}
280

    
281
-- | Adds a primary instance.
282
addPri :: Node -> Instance.Instance -> T.OpResult Node
283
addPri t inst =
284
    let iname = Instance.idx inst
285
        new_mem = fMem t - Instance.mem inst
286
        new_dsk = fDsk t - Instance.dsk inst
287
        new_failn1 = new_mem <= rMem t
288
        new_ucpu = uCpu t + Instance.vcpus inst
289
        new_pcpu = fromIntegral new_ucpu / tCpu t
290
        new_dp = fromIntegral new_dsk / tDsk t
291
        l_cpu = mCpu t
292
    in if new_mem <= 0 then T.OpFail T.FailMem
293
       else if new_dsk <= 0 || mDsk t > new_dp then T.OpFail T.FailDisk
294
       else if new_failn1 && not (failN1 t) then T.OpFail T.FailMem
295
       else if l_cpu >= 0 && l_cpu < new_pcpu then T.OpFail T.FailCPU
296
       else
297
           let new_plist = iname:pList t
298
               new_mp = fromIntegral new_mem / tMem t
299
               r = t { pList = new_plist, fMem = new_mem, fDsk = new_dsk,
300
                       failN1 = new_failn1, pMem = new_mp, pDsk = new_dp,
301
                       uCpu = new_ucpu, pCpu = new_pcpu }
302
           in T.OpGood r
303

    
304
-- | Adds a secondary instance.
305
addSec :: Node -> Instance.Instance -> T.Ndx -> T.OpResult Node
306
addSec t inst pdx =
307
    let iname = Instance.idx inst
308
        old_peers = peers t
309
        old_mem = fMem t
310
        new_dsk = fDsk t - Instance.dsk inst
311
        new_peem = PeerMap.find pdx old_peers + Instance.mem inst
312
        new_peers = PeerMap.add pdx new_peem old_peers
313
        new_rmem = max (rMem t) new_peem
314
        new_prem = fromIntegral new_rmem / tMem t
315
        new_failn1 = old_mem <= new_rmem
316
        new_dp = fromIntegral new_dsk / tDsk t
317
    in if new_dsk <= 0 || mDsk t > new_dp then T.OpFail T.FailDisk
318
       else if new_failn1 && not (failN1 t) then T.OpFail T.FailMem
319
       else let new_slist = iname:sList t
320
                r = t { sList = new_slist, fDsk = new_dsk,
321
                        peers = new_peers, failN1 = new_failn1,
322
                        rMem = new_rmem, pDsk = new_dp,
323
                        pRem = new_prem }
324
           in T.OpGood r
325

    
326
-- * Stats functions
327

    
328
-- | Computes the amount of available disk on a given node
329
availDisk :: Node -> Int
330
availDisk t =
331
    let _f = fDsk t
332
        _l = loDsk t
333
    in
334
      if _l == noLimitInt
335
      then _f
336
      else if _f < _l
337
           then 0
338
           else _f - _l
339

    
340
-- * Display functions
341

    
342
-- | String converter for the node list functionality.
343
list :: Int -> Node -> String
344
list mname t =
345
    let pl = length $ pList t
346
        sl = length $ sList t
347
        mp = pMem t
348
        dp = pDsk t
349
        cp = pCpu t
350
        off = offline t
351
        fn = failN1 t
352
        tmem = tMem t
353
        nmem = nMem t
354
        xmem = xMem t
355
        fmem = fMem t
356
        imem = truncate tmem - nmem - xmem - fmem
357
    in
358
      if off
359
         then printf " - %-*s %57s %3d %3d"
360
              mname (name t) "" pl sl
361
         else
362
             printf " %c %-*s %5.0f %5d %5d %5d %5d %5d %5.0f %5d\
363
                    \ %4.0f %4d %3d %3d %6.4f %6.4f %5.2f"
364
                 (if off then '-' else if fn then '*' else ' ')
365
                 mname (name t) tmem nmem imem xmem fmem (rMem t)
366
                 (tDsk t / 1024) (fDsk t `div` 1024)
367
                 (tCpu t) (uCpu t)
368
                 pl sl mp dp cp