1 {-| Implementation of cluster-wide logic.
3 This module holds all pure cluster-logic; I\/O related functionality
4 goes into the /Main/ module for the individual binaries.
10 Copyright (C) 2009, 2010, 2011 Google Inc.
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
17 This program is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
29 module Ganeti.HTools.Cluster
37 -- * Generic functions
39 , computeAllocationDelta
40 -- * First phase functions
42 -- * Second phase functions
47 -- * Display functions
50 -- * Balacing functions
59 -- * IAllocator functions
67 -- * Allocation functions
70 -- * Node group functions
76 import qualified Data.IntSet as IntSet
78 import Data.Maybe (fromJust, isNothing)
79 import Data.Ord (comparing)
80 import Text.Printf (printf)
83 import qualified Ganeti.HTools.Container as Container
84 import qualified Ganeti.HTools.Instance as Instance
85 import qualified Ganeti.HTools.Node as Node
86 import qualified Ganeti.HTools.Group as Group
87 import Ganeti.HTools.Types
88 import Ganeti.HTools.Utils
89 import Ganeti.HTools.Compat
90 import qualified Ganeti.OpCodes as OpCodes
94 -- | Allocation\/relocation solution.
95 data AllocSolution = AllocSolution
96 { asFailures :: [FailMode] -- ^ Failure counts
97 , asAllocs :: Int -- ^ Good allocation count
98 , asSolution :: Maybe Node.AllocElement -- ^ The actual allocation result
99 , asLog :: [String] -- ^ Informational messages
102 -- | Node evacuation/group change iallocator result type. This result
103 -- type consists of actual opcodes (a restricted subset) that are
104 -- transmitted back to Ganeti.
105 data EvacSolution = EvacSolution
106 { esMoved :: [(Idx, Gdx, [Ndx])] -- ^ Instances moved successfully
107 , esFailed :: [(Idx, String)] -- ^ Instances which were not
109 , esOpCodes :: [[[OpCodes.OpCode]]] -- ^ List of lists of jobs
112 -- | Allocation results, as used in 'iterateAlloc' and 'tieredAlloc'.
113 type AllocResult = (FailStats, Node.List, Instance.List,
114 [Instance.Instance], [CStats])
116 -- | A type denoting the valid allocation mode/pairs.
118 -- For a one-node allocation, this will be a @Left ['Node.Node']@,
119 -- whereas for a two-node allocation, this will be a @Right
120 -- [('Node.Node', 'Node.Node')]@.
121 type AllocNodes = Either [Ndx] [(Ndx, Ndx)]
123 -- | The empty solution we start with when computing allocations.
124 emptyAllocSolution :: AllocSolution
125 emptyAllocSolution = AllocSolution { asFailures = [], asAllocs = 0
126 , asSolution = Nothing, asLog = [] }
128 -- | The empty evac solution.
129 emptyEvacSolution :: EvacSolution
130 emptyEvacSolution = EvacSolution { esMoved = []
135 -- | The complete state for the balancing solution.
136 data Table = Table Node.List Instance.List Score [Placement]
137 deriving (Show, Read)
139 -- | Cluster statistics data type.
140 data CStats = CStats { csFmem :: Integer -- ^ Cluster free mem
141 , csFdsk :: Integer -- ^ Cluster free disk
142 , csAmem :: Integer -- ^ Cluster allocatable mem
143 , csAdsk :: Integer -- ^ Cluster allocatable disk
144 , csAcpu :: Integer -- ^ Cluster allocatable cpus
145 , csMmem :: Integer -- ^ Max node allocatable mem
146 , csMdsk :: Integer -- ^ Max node allocatable disk
147 , csMcpu :: Integer -- ^ Max node allocatable cpu
148 , csImem :: Integer -- ^ Instance used mem
149 , csIdsk :: Integer -- ^ Instance used disk
150 , csIcpu :: Integer -- ^ Instance used cpu
151 , csTmem :: Double -- ^ Cluster total mem
152 , csTdsk :: Double -- ^ Cluster total disk
153 , csTcpu :: Double -- ^ Cluster total cpus
154 , csVcpu :: Integer -- ^ Cluster virtual cpus (if
155 -- node pCpu has been set,
157 , csXmem :: Integer -- ^ Unnacounted for mem
158 , csNmem :: Integer -- ^ Node own memory
159 , csScore :: Score -- ^ The cluster score
160 , csNinst :: Int -- ^ The total number of instances
162 deriving (Show, Read)
164 -- | Currently used, possibly to allocate, unallocable.
165 type AllocStats = (RSpec, RSpec, RSpec)
167 -- * Utility functions
169 -- | Verifies the N+1 status and return the affected nodes.
170 verifyN1 :: [Node.Node] -> [Node.Node]
171 verifyN1 = filter Node.failN1
173 {-| Computes the pair of bad nodes and instances.
175 The bad node list is computed via a simple 'verifyN1' check, and the
176 bad instance list is the list of primary and secondary instances of
180 computeBadItems :: Node.List -> Instance.List ->
181 ([Node.Node], [Instance.Instance])
182 computeBadItems nl il =
183 let bad_nodes = verifyN1 $ getOnline nl
184 bad_instances = map (`Container.find` il) .
186 concatMap (\ n -> Node.sList n ++ Node.pList n) bad_nodes
188 (bad_nodes, bad_instances)
190 -- | Zero-initializer for the CStats type.
191 emptyCStats :: CStats
192 emptyCStats = CStats 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
194 -- | Update stats with data from a new node.
195 updateCStats :: CStats -> Node.Node -> CStats
196 updateCStats cs node =
197 let CStats { csFmem = x_fmem, csFdsk = x_fdsk,
198 csAmem = x_amem, csAcpu = x_acpu, csAdsk = x_adsk,
199 csMmem = x_mmem, csMdsk = x_mdsk, csMcpu = x_mcpu,
200 csImem = x_imem, csIdsk = x_idsk, csIcpu = x_icpu,
201 csTmem = x_tmem, csTdsk = x_tdsk, csTcpu = x_tcpu,
203 csXmem = x_xmem, csNmem = x_nmem, csNinst = x_ninst
206 inc_amem = Node.fMem node - Node.rMem node
207 inc_amem' = if inc_amem > 0 then inc_amem else 0
208 inc_adsk = Node.availDisk node
209 inc_imem = truncate (Node.tMem node) - Node.nMem node
210 - Node.xMem node - Node.fMem node
211 inc_icpu = Node.uCpu node
212 inc_idsk = truncate (Node.tDsk node) - Node.fDsk node
213 inc_vcpu = Node.hiCpu node
214 inc_acpu = Node.availCpu node
216 in cs { csFmem = x_fmem + fromIntegral (Node.fMem node)
217 , csFdsk = x_fdsk + fromIntegral (Node.fDsk node)
218 , csAmem = x_amem + fromIntegral inc_amem'
219 , csAdsk = x_adsk + fromIntegral inc_adsk
220 , csAcpu = x_acpu + fromIntegral inc_acpu
221 , csMmem = max x_mmem (fromIntegral inc_amem')
222 , csMdsk = max x_mdsk (fromIntegral inc_adsk)
223 , csMcpu = max x_mcpu (fromIntegral inc_acpu)
224 , csImem = x_imem + fromIntegral inc_imem
225 , csIdsk = x_idsk + fromIntegral inc_idsk
226 , csIcpu = x_icpu + fromIntegral inc_icpu
227 , csTmem = x_tmem + Node.tMem node
228 , csTdsk = x_tdsk + Node.tDsk node
229 , csTcpu = x_tcpu + Node.tCpu node
230 , csVcpu = x_vcpu + fromIntegral inc_vcpu
231 , csXmem = x_xmem + fromIntegral (Node.xMem node)
232 , csNmem = x_nmem + fromIntegral (Node.nMem node)
233 , csNinst = x_ninst + length (Node.pList node)
236 -- | Compute the total free disk and memory in the cluster.
237 totalResources :: Node.List -> CStats
239 let cs = foldl' updateCStats emptyCStats . Container.elems $ nl
240 in cs { csScore = compCV nl }
242 -- | Compute the delta between two cluster state.
244 -- This is used when doing allocations, to understand better the
245 -- available cluster resources. The return value is a triple of the
246 -- current used values, the delta that was still allocated, and what
247 -- was left unallocated.
248 computeAllocationDelta :: CStats -> CStats -> AllocStats
249 computeAllocationDelta cini cfin =
250 let CStats {csImem = i_imem, csIdsk = i_idsk, csIcpu = i_icpu} = cini
251 CStats {csImem = f_imem, csIdsk = f_idsk, csIcpu = f_icpu,
252 csTmem = t_mem, csTdsk = t_dsk, csVcpu = v_cpu } = cfin
253 rini = RSpec (fromIntegral i_icpu) (fromIntegral i_imem)
254 (fromIntegral i_idsk)
255 rfin = RSpec (fromIntegral (f_icpu - i_icpu))
256 (fromIntegral (f_imem - i_imem))
257 (fromIntegral (f_idsk - i_idsk))
258 un_cpu = fromIntegral (v_cpu - f_icpu)::Int
259 runa = RSpec un_cpu (truncate t_mem - fromIntegral f_imem)
260 (truncate t_dsk - fromIntegral f_idsk)
261 in (rini, rfin, runa)
263 -- | The names and weights of the individual elements in the CV list.
264 detailedCVInfo :: [(Double, String)]
265 detailedCVInfo = [ (1, "free_mem_cv")
266 , (1, "free_disk_cv")
268 , (1, "reserved_mem_cv")
269 , (4, "offline_all_cnt")
270 , (16, "offline_pri_cnt")
271 , (1, "vcpu_ratio_cv")
274 , (1, "disk_load_cv")
276 , (2, "pri_tags_score")
279 -- | Holds the weights used by 'compCVNodes' for each metric.
280 detailedCVWeights :: [Double]
281 detailedCVWeights = map fst detailedCVInfo
283 -- | Compute the mem and disk covariance.
284 compDetailedCV :: [Node.Node] -> [Double]
285 compDetailedCV all_nodes =
287 (offline, nodes) = partition Node.offline all_nodes
288 mem_l = map Node.pMem nodes
289 dsk_l = map Node.pDsk nodes
290 -- metric: memory covariance
291 mem_cv = stdDev mem_l
292 -- metric: disk covariance
293 dsk_cv = stdDev dsk_l
294 -- metric: count of instances living on N1 failing nodes
295 n1_score = fromIntegral . sum . map (\n -> length (Node.sList n) +
296 length (Node.pList n)) .
297 filter Node.failN1 $ nodes :: Double
298 res_l = map Node.pRem nodes
299 -- metric: reserved memory covariance
300 res_cv = stdDev res_l
301 -- offline instances metrics
302 offline_ipri = sum . map (length . Node.pList) $ offline
303 offline_isec = sum . map (length . Node.sList) $ offline
304 -- metric: count of instances on offline nodes
305 off_score = fromIntegral (offline_ipri + offline_isec)::Double
306 -- metric: count of primary instances on offline nodes (this
307 -- helps with evacuation/failover of primary instances on
308 -- 2-node clusters with one node offline)
309 off_pri_score = fromIntegral offline_ipri::Double
310 cpu_l = map Node.pCpu nodes
311 -- metric: covariance of vcpu/pcpu ratio
312 cpu_cv = stdDev cpu_l
313 -- metrics: covariance of cpu, memory, disk and network load
314 (c_load, m_load, d_load, n_load) = unzip4 $
316 let DynUtil c1 m1 d1 n1 = Node.utilLoad n
317 DynUtil c2 m2 d2 n2 = Node.utilPool n
318 in (c1/c2, m1/m2, d1/d2, n1/n2)
320 -- metric: conflicting instance count
321 pri_tags_inst = sum $ map Node.conflictingPrimaries nodes
322 pri_tags_score = fromIntegral pri_tags_inst::Double
323 in [ mem_cv, dsk_cv, n1_score, res_cv, off_score, off_pri_score, cpu_cv
324 , stdDev c_load, stdDev m_load , stdDev d_load, stdDev n_load
327 -- | Compute the /total/ variance.
328 compCVNodes :: [Node.Node] -> Double
329 compCVNodes = sum . zipWith (*) detailedCVWeights . compDetailedCV
331 -- | Wrapper over 'compCVNodes' for callers that have a 'Node.List'.
332 compCV :: Node.List -> Double
333 compCV = compCVNodes . Container.elems
335 -- | Compute online nodes from a 'Node.List'.
336 getOnline :: Node.List -> [Node.Node]
337 getOnline = filter (not . Node.offline) . Container.elems
339 -- * Balancing functions
341 -- | Compute best table. Note that the ordering of the arguments is important.
342 compareTables :: Table -> Table -> Table
343 compareTables a@(Table _ _ a_cv _) b@(Table _ _ b_cv _ ) =
344 if a_cv > b_cv then b else a
346 -- | Applies an instance move to a given node list and instance.
347 applyMove :: Node.List -> Instance.Instance
348 -> IMove -> OpResult (Node.List, Instance.Instance, Ndx, Ndx)
350 applyMove nl inst Failover =
351 let old_pdx = Instance.pNode inst
352 old_sdx = Instance.sNode inst
353 old_p = Container.find old_pdx nl
354 old_s = Container.find old_sdx nl
355 int_p = Node.removePri old_p inst
356 int_s = Node.removeSec old_s inst
357 force_p = Node.offline old_p
358 new_nl = do -- Maybe monad
359 new_p <- Node.addPriEx force_p int_s inst
360 new_s <- Node.addSec int_p inst old_sdx
361 let new_inst = Instance.setBoth inst old_sdx old_pdx
362 return (Container.addTwo old_pdx new_s old_sdx new_p nl,
363 new_inst, old_sdx, old_pdx)
366 -- Replace the primary (f:, r:np, f)
367 applyMove nl inst (ReplacePrimary new_pdx) =
368 let old_pdx = Instance.pNode inst
369 old_sdx = Instance.sNode inst
370 old_p = Container.find old_pdx nl
371 old_s = Container.find old_sdx nl
372 tgt_n = Container.find new_pdx nl
373 int_p = Node.removePri old_p inst
374 int_s = Node.removeSec old_s inst
375 force_p = Node.offline old_p
376 new_nl = do -- Maybe monad
377 -- check that the current secondary can host the instance
378 -- during the migration
379 tmp_s <- Node.addPriEx force_p int_s inst
380 let tmp_s' = Node.removePri tmp_s inst
381 new_p <- Node.addPriEx force_p tgt_n inst
382 new_s <- Node.addSecEx force_p tmp_s' inst new_pdx
383 let new_inst = Instance.setPri inst new_pdx
384 return (Container.add new_pdx new_p $
385 Container.addTwo old_pdx int_p old_sdx new_s nl,
386 new_inst, new_pdx, old_sdx)
389 -- Replace the secondary (r:ns)
390 applyMove nl inst (ReplaceSecondary new_sdx) =
391 let old_pdx = Instance.pNode inst
392 old_sdx = Instance.sNode inst
393 old_s = Container.find old_sdx nl
394 tgt_n = Container.find new_sdx nl
395 int_s = Node.removeSec old_s inst
396 force_s = Node.offline old_s
397 new_inst = Instance.setSec inst new_sdx
398 new_nl = Node.addSecEx force_s tgt_n inst old_pdx >>=
399 \new_s -> return (Container.addTwo new_sdx
400 new_s old_sdx int_s nl,
401 new_inst, old_pdx, new_sdx)
404 -- Replace the secondary and failover (r:np, f)
405 applyMove nl inst (ReplaceAndFailover new_pdx) =
406 let old_pdx = Instance.pNode inst
407 old_sdx = Instance.sNode inst
408 old_p = Container.find old_pdx nl
409 old_s = Container.find old_sdx nl
410 tgt_n = Container.find new_pdx nl
411 int_p = Node.removePri old_p inst
412 int_s = Node.removeSec old_s inst
413 force_s = Node.offline old_s
414 new_nl = do -- Maybe monad
415 new_p <- Node.addPri tgt_n inst
416 new_s <- Node.addSecEx force_s int_p inst new_pdx
417 let new_inst = Instance.setBoth inst new_pdx old_pdx
418 return (Container.add new_pdx new_p $
419 Container.addTwo old_pdx new_s old_sdx int_s nl,
420 new_inst, new_pdx, old_pdx)
423 -- Failver and replace the secondary (f, r:ns)
424 applyMove nl inst (FailoverAndReplace new_sdx) =
425 let old_pdx = Instance.pNode inst
426 old_sdx = Instance.sNode inst
427 old_p = Container.find old_pdx nl
428 old_s = Container.find old_sdx nl
429 tgt_n = Container.find new_sdx nl
430 int_p = Node.removePri old_p inst
431 int_s = Node.removeSec old_s inst
432 force_p = Node.offline old_p
433 new_nl = do -- Maybe monad
434 new_p <- Node.addPriEx force_p int_s inst
435 new_s <- Node.addSecEx force_p tgt_n inst old_sdx
436 let new_inst = Instance.setBoth inst old_sdx new_sdx
437 return (Container.add new_sdx new_s $
438 Container.addTwo old_sdx new_p old_pdx int_p nl,
439 new_inst, old_sdx, new_sdx)
442 -- | Tries to allocate an instance on one given node.
443 allocateOnSingle :: Node.List -> Instance.Instance -> Ndx
444 -> OpResult Node.AllocElement
445 allocateOnSingle nl inst new_pdx =
446 let p = Container.find new_pdx nl
447 new_inst = Instance.setBoth inst new_pdx Node.noSecondary
448 in Node.addPri p inst >>= \new_p -> do
449 let new_nl = Container.add new_pdx new_p nl
450 new_score = compCV nl
451 return (new_nl, new_inst, [new_p], new_score)
453 -- | Tries to allocate an instance on a given pair of nodes.
454 allocateOnPair :: Node.List -> Instance.Instance -> Ndx -> Ndx
455 -> OpResult Node.AllocElement
456 allocateOnPair nl inst new_pdx new_sdx =
457 let tgt_p = Container.find new_pdx nl
458 tgt_s = Container.find new_sdx nl
460 new_p <- Node.addPri tgt_p inst
461 new_s <- Node.addSec tgt_s inst new_pdx
462 let new_inst = Instance.setBoth inst new_pdx new_sdx
463 new_nl = Container.addTwo new_pdx new_p new_sdx new_s nl
464 return (new_nl, new_inst, [new_p, new_s], compCV new_nl)
466 -- | Tries to perform an instance move and returns the best table
467 -- between the original one and the new one.
468 checkSingleStep :: Table -- ^ The original table
469 -> Instance.Instance -- ^ The instance to move
470 -> Table -- ^ The current best table
471 -> IMove -- ^ The move to apply
472 -> Table -- ^ The final best table
473 checkSingleStep ini_tbl target cur_tbl move =
475 Table ini_nl ini_il _ ini_plc = ini_tbl
476 tmp_resu = applyMove ini_nl target move
480 OpGood (upd_nl, new_inst, pri_idx, sec_idx) ->
481 let tgt_idx = Instance.idx target
482 upd_cvar = compCV upd_nl
483 upd_il = Container.add tgt_idx new_inst ini_il
484 upd_plc = (tgt_idx, pri_idx, sec_idx, move, upd_cvar):ini_plc
485 upd_tbl = Table upd_nl upd_il upd_cvar upd_plc
487 compareTables cur_tbl upd_tbl
489 -- | Given the status of the current secondary as a valid new node and
490 -- the current candidate target node, generate the possible moves for
492 possibleMoves :: Bool -- ^ Whether the secondary node is a valid new node
493 -> Bool -- ^ Whether we can change the primary node
494 -> Ndx -- ^ Target node candidate
495 -> [IMove] -- ^ List of valid result moves
497 possibleMoves _ False tdx =
498 [ReplaceSecondary tdx]
500 possibleMoves True True tdx =
501 [ReplaceSecondary tdx,
502 ReplaceAndFailover tdx,
504 FailoverAndReplace tdx]
506 possibleMoves False True tdx =
507 [ReplaceSecondary tdx,
508 ReplaceAndFailover tdx]
510 -- | Compute the best move for a given instance.
511 checkInstanceMove :: [Ndx] -- ^ Allowed target node indices
512 -> Bool -- ^ Whether disk moves are allowed
513 -> Bool -- ^ Whether instance moves are allowed
514 -> Table -- ^ Original table
515 -> Instance.Instance -- ^ Instance to move
516 -> Table -- ^ Best new table for this instance
517 checkInstanceMove nodes_idx disk_moves inst_moves ini_tbl target =
519 opdx = Instance.pNode target
520 osdx = Instance.sNode target
521 nodes = filter (\idx -> idx /= opdx && idx /= osdx) nodes_idx
522 use_secondary = elem osdx nodes_idx && inst_moves
523 aft_failover = if use_secondary -- if allowed to failover
524 then checkSingleStep ini_tbl target ini_tbl Failover
526 all_moves = if disk_moves
528 (possibleMoves use_secondary inst_moves) nodes
531 -- iterate over the possible nodes for this instance
532 foldl' (checkSingleStep ini_tbl target) aft_failover all_moves
534 -- | Compute the best next move.
535 checkMove :: [Ndx] -- ^ Allowed target node indices
536 -> Bool -- ^ Whether disk moves are allowed
537 -> Bool -- ^ Whether instance moves are allowed
538 -> Table -- ^ The current solution
539 -> [Instance.Instance] -- ^ List of instances still to move
540 -> Table -- ^ The new solution
541 checkMove nodes_idx disk_moves inst_moves ini_tbl victims =
542 let Table _ _ _ ini_plc = ini_tbl
543 -- we're using rwhnf from the Control.Parallel.Strategies
544 -- package; we don't need to use rnf as that would force too
545 -- much evaluation in single-threaded cases, and in
546 -- multi-threaded case the weak head normal form is enough to
547 -- spark the evaluation
548 tables = parMap rwhnf (checkInstanceMove nodes_idx disk_moves
551 -- iterate over all instances, computing the best move
552 best_tbl = foldl' compareTables ini_tbl tables
553 Table _ _ _ best_plc = best_tbl
554 in if length best_plc == length ini_plc
555 then ini_tbl -- no advancement
558 -- | Check if we are allowed to go deeper in the balancing.
559 doNextBalance :: Table -- ^ The starting table
560 -> Int -- ^ Remaining length
561 -> Score -- ^ Score at which to stop
562 -> Bool -- ^ The resulting table and commands
563 doNextBalance ini_tbl max_rounds min_score =
564 let Table _ _ ini_cv ini_plc = ini_tbl
565 ini_plc_len = length ini_plc
566 in (max_rounds < 0 || ini_plc_len < max_rounds) && ini_cv > min_score
568 -- | Run a balance move.
569 tryBalance :: Table -- ^ The starting table
570 -> Bool -- ^ Allow disk moves
571 -> Bool -- ^ Allow instance moves
572 -> Bool -- ^ Only evacuate moves
573 -> Score -- ^ Min gain threshold
574 -> Score -- ^ Min gain
575 -> Maybe Table -- ^ The resulting table and commands
576 tryBalance ini_tbl disk_moves inst_moves evac_mode mg_limit min_gain =
577 let Table ini_nl ini_il ini_cv _ = ini_tbl
578 all_inst = Container.elems ini_il
579 all_inst' = if evac_mode
580 then let bad_nodes = map Node.idx . filter Node.offline $
581 Container.elems ini_nl
582 in filter (any (`elem` bad_nodes) . Instance.allNodes)
585 reloc_inst = filter Instance.movable all_inst'
586 node_idx = map Node.idx . filter (not . Node.offline) $
587 Container.elems ini_nl
588 fin_tbl = checkMove node_idx disk_moves inst_moves ini_tbl reloc_inst
589 (Table _ _ fin_cv _) = fin_tbl
591 if fin_cv < ini_cv && (ini_cv > mg_limit || ini_cv - fin_cv >= min_gain)
592 then Just fin_tbl -- this round made success, return the new table
595 -- * Allocation functions
597 -- | Build failure stats out of a list of failures.
598 collapseFailures :: [FailMode] -> FailStats
599 collapseFailures flst =
600 map (\k -> (k, foldl' (\a e -> if e == k then a + 1 else a) 0 flst))
603 -- | Compares two Maybe AllocElement and chooses the besst score.
604 bestAllocElement :: Maybe Node.AllocElement
605 -> Maybe Node.AllocElement
606 -> Maybe Node.AllocElement
607 bestAllocElement a Nothing = a
608 bestAllocElement Nothing b = b
609 bestAllocElement a@(Just (_, _, _, ascore)) b@(Just (_, _, _, bscore)) =
610 if ascore < bscore then a else b
612 -- | Update current Allocation solution and failure stats with new
614 concatAllocs :: AllocSolution -> OpResult Node.AllocElement -> AllocSolution
615 concatAllocs as (OpFail reason) = as { asFailures = reason : asFailures as }
617 concatAllocs as (OpGood ns) =
618 let -- Choose the old or new solution, based on the cluster score
620 osols = asSolution as
621 nsols = bestAllocElement osols (Just ns)
623 -- Note: we force evaluation of nsols here in order to keep the
624 -- memory profile low - we know that we will need nsols for sure
625 -- in the next cycle, so we force evaluation of nsols, since the
626 -- foldl' in the caller will only evaluate the tuple, but not the
627 -- elements of the tuple
628 in nsols `seq` nsuc `seq` as { asAllocs = nsuc, asSolution = nsols }
630 -- | Given a solution, generates a reasonable description for it.
631 describeSolution :: AllocSolution -> String
632 describeSolution as =
633 let fcnt = asFailures as
636 intercalate ", " . map (\(a, b) -> printf "%s: %d" (show a) b) .
637 filter ((> 0) . snd) . collapseFailures $ fcnt
639 Nothing -> "No valid allocation solutions, failure reasons: " ++
640 (if null fcnt then "unknown reasons" else freasons)
641 Just (_, _, nodes, cv) ->
642 printf ("score: %.8f, successes %d, failures %d (%s)" ++
643 " for node(s) %s") cv (asAllocs as) (length fcnt) freasons
644 (intercalate "/" . map Node.name $ nodes)
646 -- | Annotates a solution with the appropriate string.
647 annotateSolution :: AllocSolution -> AllocSolution
648 annotateSolution as = as { asLog = describeSolution as : asLog as }
650 -- | Reverses an evacuation solution.
652 -- Rationale: we always concat the results to the top of the lists, so
653 -- for proper jobset execution, we should reverse all lists.
654 reverseEvacSolution :: EvacSolution -> EvacSolution
655 reverseEvacSolution (EvacSolution f m o) =
656 EvacSolution (reverse f) (reverse m) (reverse o)
658 -- | Generate the valid node allocation singles or pairs for a new instance.
659 genAllocNodes :: Group.List -- ^ Group list
660 -> Node.List -- ^ The node map
661 -> Int -- ^ The number of nodes required
662 -> Bool -- ^ Whether to drop or not
664 -> Result AllocNodes -- ^ The (monadic) result
665 genAllocNodes gl nl count drop_unalloc =
666 let filter_fn = if drop_unalloc
667 then filter (Group.isAllocable .
668 flip Container.find gl . Node.group)
670 all_nodes = filter_fn $ getOnline nl
671 all_pairs = liftM2 (,) all_nodes all_nodes
672 ok_pairs = filter (\(x, y) -> Node.idx x /= Node.idx y &&
673 Node.group x == Node.group y) all_pairs
675 1 -> Ok (Left (map Node.idx all_nodes))
676 2 -> Ok (Right (map (\(p, s) -> (Node.idx p, Node.idx s)) ok_pairs))
677 _ -> Bad "Unsupported number of nodes, only one or two supported"
679 -- | Try to allocate an instance on the cluster.
680 tryAlloc :: (Monad m) =>
681 Node.List -- ^ The node list
682 -> Instance.List -- ^ The instance list
683 -> Instance.Instance -- ^ The instance to allocate
684 -> AllocNodes -- ^ The allocation targets
685 -> m AllocSolution -- ^ Possible solution list
686 tryAlloc nl _ inst (Right ok_pairs) =
687 let sols = foldl' (\cstate (p, s) ->
688 concatAllocs cstate $ allocateOnPair nl inst p s
689 ) emptyAllocSolution ok_pairs
691 in if null ok_pairs -- means we have just one node
692 then fail "Not enough online nodes"
693 else return $ annotateSolution sols
695 tryAlloc nl _ inst (Left all_nodes) =
696 let sols = foldl' (\cstate ->
697 concatAllocs cstate . allocateOnSingle nl inst
698 ) emptyAllocSolution all_nodes
700 then fail "No online nodes"
701 else return $ annotateSolution sols
703 -- | Given a group/result, describe it as a nice (list of) messages.
704 solutionDescription :: Group.List -> (Gdx, Result AllocSolution) -> [String]
705 solutionDescription gl (groupId, result) =
707 Ok solution -> map (printf "Group %s (%s): %s" gname pol) (asLog solution)
708 Bad message -> [printf "Group %s: error %s" gname message]
709 where grp = Container.find groupId gl
710 gname = Group.name grp
711 pol = apolToString (Group.allocPolicy grp)
713 -- | From a list of possibly bad and possibly empty solutions, filter
714 -- only the groups with a valid result. Note that the result will be
715 -- reversed compared to the original list.
716 filterMGResults :: Group.List
717 -> [(Gdx, Result AllocSolution)]
718 -> [(Gdx, AllocSolution)]
719 filterMGResults gl = foldl' fn []
720 where unallocable = not . Group.isAllocable . flip Container.find gl
721 fn accu (gdx, rasol) =
724 Ok sol | isNothing (asSolution sol) -> accu
725 | unallocable gdx -> accu
726 | otherwise -> (gdx, sol):accu
728 -- | Sort multigroup results based on policy and score.
729 sortMGResults :: Group.List
730 -> [(Gdx, AllocSolution)]
731 -> [(Gdx, AllocSolution)]
732 sortMGResults gl sols =
733 let extractScore (_, _, _, x) = x
734 solScore (gdx, sol) = (Group.allocPolicy (Container.find gdx gl),
735 (extractScore . fromJust . asSolution) sol)
736 in sortBy (comparing solScore) sols
738 -- | Finds the best group for an instance on a multi-group cluster.
740 -- Only solutions in @preferred@ and @last_resort@ groups will be
741 -- accepted as valid, and additionally if the allowed groups parameter
742 -- is not null then allocation will only be run for those group
744 findBestAllocGroup :: Group.List -- ^ The group list
745 -> Node.List -- ^ The node list
746 -> Instance.List -- ^ The instance list
747 -> Maybe [Gdx] -- ^ The allowed groups
748 -> Instance.Instance -- ^ The instance to allocate
749 -> Int -- ^ Required number of nodes
750 -> Result (Gdx, AllocSolution, [String])
751 findBestAllocGroup mggl mgnl mgil allowed_gdxs inst cnt =
752 let groups = splitCluster mgnl mgil
753 groups' = maybe groups (\gs -> filter ((`elem` gs) . fst) groups)
755 sols = map (\(gid, (nl, il)) ->
756 (gid, genAllocNodes mggl nl cnt False >>=
757 tryAlloc nl il inst))
758 groups'::[(Gdx, Result AllocSolution)]
759 all_msgs = concatMap (solutionDescription mggl) sols
760 goodSols = filterMGResults mggl sols
761 sortedSols = sortMGResults mggl goodSols
762 in if null sortedSols
763 then Bad $ intercalate ", " all_msgs
764 else let (final_group, final_sol) = head sortedSols
765 in return (final_group, final_sol, all_msgs)
767 -- | Try to allocate an instance on a multi-group cluster.
768 tryMGAlloc :: Group.List -- ^ The group list
769 -> Node.List -- ^ The node list
770 -> Instance.List -- ^ The instance list
771 -> Instance.Instance -- ^ The instance to allocate
772 -> Int -- ^ Required number of nodes
773 -> Result AllocSolution -- ^ Possible solution list
774 tryMGAlloc mggl mgnl mgil inst cnt = do
775 (best_group, solution, all_msgs) <-
776 findBestAllocGroup mggl mgnl mgil Nothing inst cnt
777 let group_name = Group.name $ Container.find best_group mggl
778 selmsg = "Selected group: " ++ group_name
779 return $ solution { asLog = selmsg:all_msgs }
781 -- | Try to relocate an instance on the cluster.
782 tryReloc :: (Monad m) =>
783 Node.List -- ^ The node list
784 -> Instance.List -- ^ The instance list
785 -> Idx -- ^ The index of the instance to move
786 -> Int -- ^ The number of nodes required
787 -> [Ndx] -- ^ Nodes which should not be used
788 -> m AllocSolution -- ^ Solution list
789 tryReloc nl il xid 1 ex_idx =
790 let all_nodes = getOnline nl
791 inst = Container.find xid il
792 ex_idx' = Instance.pNode inst:ex_idx
793 valid_nodes = filter (not . flip elem ex_idx' . Node.idx) all_nodes
794 valid_idxes = map Node.idx valid_nodes
795 sols1 = foldl' (\cstate x ->
798 applyMove nl inst (ReplaceSecondary x)
799 return (mnl, i, [Container.find x mnl],
801 in concatAllocs cstate em
802 ) emptyAllocSolution valid_idxes
805 tryReloc _ _ _ reqn _ = fail $ "Unsupported number of relocation \
806 \destinations required (" ++ show reqn ++
807 "), only one supported"
809 -- | Function which fails if the requested mode is change secondary.
811 -- This is useful since except DRBD, no other disk template can
812 -- execute change secondary; thus, we can just call this function
813 -- instead of always checking for secondary mode. After the call to
814 -- this function, whatever mode we have is just a primary change.
815 failOnSecondaryChange :: (Monad m) => EvacMode -> DiskTemplate -> m ()
816 failOnSecondaryChange ChangeSecondary dt =
817 fail $ "Instances with disk template '" ++ dtToString dt ++
818 "' can't execute change secondary"
819 failOnSecondaryChange _ _ = return ()
821 -- | Run evacuation for a single instance.
823 -- /Note:/ this function should correctly execute both intra-group
824 -- evacuations (in all modes) and inter-group evacuations (in the
825 -- 'ChangeAll' mode). Of course, this requires that the correct list
826 -- of target nodes is passed.
827 nodeEvacInstance :: Node.List -- ^ The node list (cluster-wide)
828 -> Instance.List -- ^ Instance list (cluster-wide)
829 -> EvacMode -- ^ The evacuation mode
830 -> Instance.Instance -- ^ The instance to be evacuated
831 -> Gdx -- ^ The group we're targetting
832 -> [Ndx] -- ^ The list of available nodes
834 -> Result (Node.List, Instance.List, [OpCodes.OpCode])
835 nodeEvacInstance _ _ mode (Instance.Instance
836 {Instance.diskTemplate = dt@DTDiskless}) _ _ =
837 failOnSecondaryChange mode dt >>
838 fail "Diskless relocations not implemented yet"
840 nodeEvacInstance _ _ _ (Instance.Instance
841 {Instance.diskTemplate = DTPlain}) _ _ =
842 fail "Instances of type plain cannot be relocated"
844 nodeEvacInstance _ _ _ (Instance.Instance
845 {Instance.diskTemplate = DTFile}) _ _ =
846 fail "Instances of type file cannot be relocated"
848 nodeEvacInstance _ _ mode (Instance.Instance
849 {Instance.diskTemplate = dt@DTSharedFile}) _ _ =
850 failOnSecondaryChange mode dt >>
851 fail "Shared file relocations not implemented yet"
853 nodeEvacInstance _ _ mode (Instance.Instance
854 {Instance.diskTemplate = dt@DTBlock}) _ _ =
855 failOnSecondaryChange mode dt >>
856 fail "Block device relocations not implemented yet"
858 nodeEvacInstance nl il ChangePrimary
859 inst@(Instance.Instance {Instance.diskTemplate = DTDrbd8})
862 (nl', inst', _, _) <- opToResult $ applyMove nl inst Failover
863 let idx = Instance.idx inst
864 il' = Container.add idx inst' il
865 ops = iMoveToJob nl' il' idx Failover
866 return (nl', il', ops)
868 nodeEvacInstance nl il ChangeSecondary
869 inst@(Instance.Instance {Instance.diskTemplate = DTDrbd8})
872 (nl', inst', _, ndx) <- annotateResult "Can't find any good node" $
874 foldl' (evacDrbdSecondaryInner nl inst gdx)
875 (Left "no nodes available") avail_nodes
876 let idx = Instance.idx inst
877 il' = Container.add idx inst' il
878 ops = iMoveToJob nl' il' idx (ReplaceSecondary ndx)
879 return (nl', il', ops)
881 -- The algorithm for ChangeAll is as follows:
883 -- * generate all (primary, secondary) node pairs for the target groups
884 -- * for each pair, execute the needed moves (r:s, f, r:s) and compute
885 -- the final node list state and group score
886 -- * select the best choice via a foldl that uses the same Either
887 -- String solution as the ChangeSecondary mode
888 nodeEvacInstance nl il ChangeAll
889 inst@(Instance.Instance {Instance.diskTemplate = DTDrbd8})
892 let no_nodes = Left "no nodes available"
893 node_pairs = [(p,s) | p <- avail_nodes, s <- avail_nodes, p /= s]
894 (nl', il', ops, _) <-
895 annotateResult "Can't find any good nodes for relocation" $
898 (\accu nodes -> case evacDrbdAllInner nl il inst gdx nodes of
902 -- we don't need more details (which
903 -- nodes, etc.) as we only selected
904 -- this group if we can allocate on
905 -- it, hence failures will not
906 -- propagate out of this fold loop
907 Left _ -> Left $ "Allocation failed: " ++ msg
908 Ok result@(_, _, _, new_cv) ->
909 let new_accu = Right result in
912 Right (_, _, _, old_cv) ->
916 ) no_nodes node_pairs
918 return (nl', il', ops)
920 -- | Inner fold function for changing secondary of a DRBD instance.
922 -- The running solution is either a @Left String@, which means we
923 -- don't have yet a working solution, or a @Right (...)@, which
924 -- represents a valid solution; it holds the modified node list, the
925 -- modified instance (after evacuation), the score of that solution,
926 -- and the new secondary node index.
927 evacDrbdSecondaryInner :: Node.List -- ^ Cluster node list
928 -> Instance.Instance -- ^ Instance being evacuated
929 -> Gdx -- ^ The group index of the instance
930 -> Either String ( Node.List
933 , Ndx) -- ^ Current best solution
934 -> Ndx -- ^ Node we're evaluating as new secondary
935 -> Either String ( Node.List
938 , Ndx) -- ^ New best solution
939 evacDrbdSecondaryInner nl inst gdx accu ndx =
940 case applyMove nl inst (ReplaceSecondary ndx) of
944 Left _ -> Left $ "Node " ++ Container.nameOf nl ndx ++
945 " failed: " ++ show fm
946 OpGood (nl', inst', _, _) ->
947 let nodes = Container.elems nl'
948 -- The fromJust below is ugly (it can fail nastily), but
949 -- at this point we should have any internal mismatches,
950 -- and adding a monad here would be quite involved
951 grpnodes = fromJust (gdx `lookup` Node.computeGroups nodes)
952 new_cv = compCVNodes grpnodes
953 new_accu = Right (nl', inst', new_cv, ndx)
956 Right (_, _, old_cv, _) ->
961 -- | Compute result of changing all nodes of a DRBD instance.
963 -- Given the target primary and secondary node (which might be in a
964 -- different group or not), this function will 'execute' all the
965 -- required steps and assuming all operations succceed, will return
966 -- the modified node and instance lists, the opcodes needed for this
967 -- and the new group score.
968 evacDrbdAllInner :: Node.List -- ^ Cluster node list
969 -> Instance.List -- ^ Cluster instance list
970 -> Instance.Instance -- ^ The instance to be moved
971 -> Gdx -- ^ The target group index
972 -- (which can differ from the
973 -- current group of the
975 -> (Ndx, Ndx) -- ^ Tuple of new
976 -- primary\/secondary nodes
977 -> Result (Node.List, Instance.List, [OpCodes.OpCode], Score)
978 evacDrbdAllInner nl il inst gdx (t_pdx, t_sdx) =
980 let primary = Container.find (Instance.pNode inst) nl
981 idx = Instance.idx inst
982 -- if the primary is offline, then we first failover
983 (nl1, inst1, ops1) <-
984 if Node.offline primary
986 (nl', inst', _, _) <-
987 annotateResult "Failing over to the secondary" $
988 opToResult $ applyMove nl inst Failover
989 return (nl', inst', [Failover])
990 else return (nl, inst, [])
991 let (o1, o2, o3) = (ReplaceSecondary t_pdx,
993 ReplaceSecondary t_sdx)
994 -- we now need to execute a replace secondary to the future
996 (nl2, inst2, _, _) <-
997 annotateResult "Changing secondary to new primary" $
999 applyMove nl1 inst1 o1
1001 -- we now execute another failover, the primary stays fixed now
1002 (nl3, inst3, _, _) <- annotateResult "Failing over to new primary" $
1003 opToResult $ applyMove nl2 inst2 o2
1005 -- and finally another replace secondary, to the final secondary
1006 (nl4, inst4, _, _) <-
1007 annotateResult "Changing secondary to final secondary" $
1009 applyMove nl3 inst3 o3
1011 il' = Container.add idx inst4 il
1012 ops = concatMap (iMoveToJob nl4 il' idx) $ reverse ops4
1013 let nodes = Container.elems nl4
1014 -- The fromJust below is ugly (it can fail nastily), but
1015 -- at this point we should have any internal mismatches,
1016 -- and adding a monad here would be quite involved
1017 grpnodes = fromJust (gdx `lookup` Node.computeGroups nodes)
1018 new_cv = compCVNodes grpnodes
1019 return (nl4, il', ops, new_cv)
1021 -- | Computes the nodes in a given group which are available for
1023 availableGroupNodes :: [(Gdx, [Ndx])] -- ^ Group index/node index assoc list
1024 -> IntSet.IntSet -- ^ Nodes that are excluded
1025 -> Gdx -- ^ The group for which we
1027 -> Result [Ndx] -- ^ List of available node indices
1028 availableGroupNodes group_nodes excl_ndx gdx = do
1029 local_nodes <- maybe (Bad $ "Can't find group with index " ++ show gdx)
1030 Ok (lookup gdx group_nodes)
1031 let avail_nodes = filter (not . flip IntSet.member excl_ndx) local_nodes
1034 -- | Updates the evac solution with the results of an instance
1036 updateEvacSolution :: (Node.List, Instance.List, EvacSolution)
1038 -> Result (Node.List, Instance.List, [OpCodes.OpCode])
1039 -> (Node.List, Instance.List, EvacSolution)
1040 updateEvacSolution (nl, il, es) idx (Bad msg) =
1041 (nl, il, es { esFailed = (idx, msg):esFailed es})
1042 updateEvacSolution (_, _, es) idx (Ok (nl, il, opcodes)) =
1043 (nl, il, es { esMoved = new_elem:esMoved es
1044 , esOpCodes = [opcodes]:esOpCodes es })
1045 where inst = Container.find idx il
1047 instancePriGroup nl inst,
1048 Instance.allNodes inst)
1050 -- | Node-evacuation IAllocator mode main function.
1051 tryNodeEvac :: Group.List -- ^ The cluster groups
1052 -> Node.List -- ^ The node list (cluster-wide, not per group)
1053 -> Instance.List -- ^ Instance list (cluster-wide)
1054 -> EvacMode -- ^ The evacuation mode
1055 -> [Idx] -- ^ List of instance (indices) to be evacuated
1056 -> Result (Node.List, Instance.List, EvacSolution)
1057 tryNodeEvac _ ini_nl ini_il mode idxs =
1058 let evac_ndx = nodesToEvacuate ini_il mode idxs
1059 offline = map Node.idx . filter Node.offline $ Container.elems ini_nl
1060 excl_ndx = foldl' (flip IntSet.insert) evac_ndx offline
1061 group_ndx = map (\(gdx, (nl, _)) -> (gdx, map Node.idx
1062 (Container.elems nl))) $
1063 splitCluster ini_nl ini_il
1064 (fin_nl, fin_il, esol) =
1065 foldl' (\state@(nl, il, _) inst ->
1066 let gdx = instancePriGroup nl inst
1067 pdx = Instance.pNode inst in
1068 updateEvacSolution state (Instance.idx inst) $
1069 availableGroupNodes group_ndx
1070 (IntSet.insert pdx excl_ndx) gdx >>=
1071 nodeEvacInstance nl il mode inst gdx
1073 (ini_nl, ini_il, emptyEvacSolution)
1074 (map (`Container.find` ini_il) idxs)
1075 in return (fin_nl, fin_il, reverseEvacSolution esol)
1077 -- | Change-group IAllocator mode main function.
1079 -- This is very similar to 'tryNodeEvac', the only difference is that
1080 -- we don't choose as target group the current instance group, but
1083 -- 1. at the start of the function, we compute which are the target
1084 -- groups; either no groups were passed in, in which case we choose
1085 -- all groups out of which we don't evacuate instance, or there were
1086 -- some groups passed, in which case we use those
1088 -- 2. for each instance, we use 'findBestAllocGroup' to choose the
1089 -- best group to hold the instance, and then we do what
1090 -- 'tryNodeEvac' does, except for this group instead of the current
1093 -- Note that the correct behaviour of this function relies on the
1094 -- function 'nodeEvacInstance' to be able to do correctly both
1095 -- intra-group and inter-group moves when passed the 'ChangeAll' mode.
1096 tryChangeGroup :: Group.List -- ^ The cluster groups
1097 -> Node.List -- ^ The node list (cluster-wide)
1098 -> Instance.List -- ^ Instance list (cluster-wide)
1099 -> [Gdx] -- ^ Target groups; if empty, any
1100 -- groups not being evacuated
1101 -> [Idx] -- ^ List of instance (indices) to be evacuated
1102 -> Result (Node.List, Instance.List, EvacSolution)
1103 tryChangeGroup gl ini_nl ini_il gdxs idxs =
1104 let evac_gdxs = nub $ map (instancePriGroup ini_nl .
1105 flip Container.find ini_il) idxs
1106 target_gdxs = (if null gdxs
1107 then Container.keys gl
1108 else gdxs) \\ evac_gdxs
1109 offline = map Node.idx . filter Node.offline $ Container.elems ini_nl
1110 excl_ndx = foldl' (flip IntSet.insert) IntSet.empty offline
1111 group_ndx = map (\(gdx, (nl, _)) -> (gdx, map Node.idx
1112 (Container.elems nl))) $
1113 splitCluster ini_nl ini_il
1114 (fin_nl, fin_il, esol) =
1115 foldl' (\state@(nl, il, _) inst ->
1117 let ncnt = Instance.requiredNodes $
1118 Instance.diskTemplate inst
1119 (gdx, _, _) <- findBestAllocGroup gl nl il
1120 (Just target_gdxs) inst ncnt
1121 av_nodes <- availableGroupNodes group_ndx
1123 nodeEvacInstance nl il ChangeAll inst
1125 in updateEvacSolution state
1126 (Instance.idx inst) solution
1128 (ini_nl, ini_il, emptyEvacSolution)
1129 (map (`Container.find` ini_il) idxs)
1130 in return (fin_nl, fin_il, reverseEvacSolution esol)
1132 -- | Recursively place instances on the cluster until we're out of space.
1133 iterateAlloc :: Node.List
1136 -> Instance.Instance
1138 -> [Instance.Instance]
1140 -> Result AllocResult
1141 iterateAlloc nl il limit newinst allocnodes ixes cstats =
1142 let depth = length ixes
1143 newname = printf "new-%d" depth::String
1144 newidx = length (Container.elems il) + depth
1145 newi2 = Instance.setIdx (Instance.setName newinst newname) newidx
1146 newlimit = fmap (flip (-) 1) limit
1147 in case tryAlloc nl il newi2 allocnodes of
1149 Ok (AllocSolution { asFailures = errs, asSolution = sols3 }) ->
1150 let newsol = Ok (collapseFailures errs, nl, il, ixes, cstats) in
1153 Just (xnl, xi, _, _) ->
1156 else iterateAlloc xnl (Container.add newidx xi il)
1157 newlimit newinst allocnodes (xi:ixes)
1158 (totalResources xnl:cstats)
1160 -- | The core of the tiered allocation mode.
1161 tieredAlloc :: Node.List
1164 -> Instance.Instance
1166 -> [Instance.Instance]
1168 -> Result AllocResult
1169 tieredAlloc nl il limit newinst allocnodes ixes cstats =
1170 case iterateAlloc nl il limit newinst allocnodes ixes cstats of
1172 Ok (errs, nl', il', ixes', cstats') ->
1173 let newsol = Ok (errs, nl', il', ixes', cstats')
1174 ixes_cnt = length ixes'
1175 (stop, newlimit) = case limit of
1176 Nothing -> (False, Nothing)
1177 Just n -> (n <= ixes_cnt,
1178 Just (n - ixes_cnt)) in
1179 if stop then newsol else
1180 case Instance.shrinkByType newinst . fst . last $
1181 sortBy (comparing snd) errs of
1183 Ok newinst' -> tieredAlloc nl' il' newlimit
1184 newinst' allocnodes ixes' cstats'
1186 -- * Formatting functions
1188 -- | Given the original and final nodes, computes the relocation description.
1189 computeMoves :: Instance.Instance -- ^ The instance to be moved
1190 -> String -- ^ The instance name
1191 -> IMove -- ^ The move being performed
1192 -> String -- ^ New primary
1193 -> String -- ^ New secondary
1194 -> (String, [String])
1195 -- ^ Tuple of moves and commands list; moves is containing
1196 -- either @/f/@ for failover or @/r:name/@ for replace
1197 -- secondary, while the command list holds gnt-instance
1198 -- commands (without that prefix), e.g \"@failover instance1@\"
1199 computeMoves i inam mv c d =
1201 Failover -> ("f", [mig])
1202 FailoverAndReplace _ -> (printf "f r:%s" d, [mig, rep d])
1203 ReplaceSecondary _ -> (printf "r:%s" d, [rep d])
1204 ReplaceAndFailover _ -> (printf "r:%s f" c, [rep c, mig])
1205 ReplacePrimary _ -> (printf "f r:%s f" c, [mig, rep c, mig])
1206 where morf = if Instance.running i then "migrate" else "failover"
1207 mig = printf "%s -f %s" morf inam::String
1208 rep n = printf "replace-disks -n %s %s" n inam
1210 -- | Converts a placement to string format.
1211 printSolutionLine :: Node.List -- ^ The node list
1212 -> Instance.List -- ^ The instance list
1213 -> Int -- ^ Maximum node name length
1214 -> Int -- ^ Maximum instance name length
1215 -> Placement -- ^ The current placement
1216 -> Int -- ^ The index of the placement in
1218 -> (String, [String])
1219 printSolutionLine nl il nmlen imlen plc pos =
1221 pmlen = (2*nmlen + 1)
1222 (i, p, s, mv, c) = plc
1223 inst = Container.find i il
1224 inam = Instance.alias inst
1225 npri = Node.alias $ Container.find p nl
1226 nsec = Node.alias $ Container.find s nl
1227 opri = Node.alias $ Container.find (Instance.pNode inst) nl
1228 osec = Node.alias $ Container.find (Instance.sNode inst) nl
1229 (moves, cmds) = computeMoves inst inam mv npri nsec
1230 ostr = printf "%s:%s" opri osec::String
1231 nstr = printf "%s:%s" npri nsec::String
1233 (printf " %3d. %-*s %-*s => %-*s %.8f a=%s"
1234 pos imlen inam pmlen ostr
1238 -- | Return the instance and involved nodes in an instance move.
1240 -- Note that the output list length can vary, and is not required nor
1241 -- guaranteed to be of any specific length.
1242 involvedNodes :: Instance.List -- ^ Instance list, used for retrieving
1243 -- the instance from its index; note
1244 -- that this /must/ be the original
1245 -- instance list, so that we can
1246 -- retrieve the old nodes
1247 -> Placement -- ^ The placement we're investigating,
1248 -- containing the new nodes and
1250 -> [Ndx] -- ^ Resulting list of node indices
1251 involvedNodes il plc =
1252 let (i, np, ns, _, _) = plc
1253 inst = Container.find i il
1254 in nub $ [np, ns] ++ Instance.allNodes inst
1256 -- | Inner function for splitJobs, that either appends the next job to
1257 -- the current jobset, or starts a new jobset.
1258 mergeJobs :: ([JobSet], [Ndx]) -> MoveJob -> ([JobSet], [Ndx])
1259 mergeJobs ([], _) n@(ndx, _, _, _) = ([[n]], ndx)
1260 mergeJobs (cjs@(j:js), nbuf) n@(ndx, _, _, _)
1261 | null (ndx `intersect` nbuf) = ((n:j):js, ndx ++ nbuf)
1262 | otherwise = ([n]:cjs, ndx)
1264 -- | Break a list of moves into independent groups. Note that this
1265 -- will reverse the order of jobs.
1266 splitJobs :: [MoveJob] -> [JobSet]
1267 splitJobs = fst . foldl mergeJobs ([], [])
1269 -- | Given a list of commands, prefix them with @gnt-instance@ and
1270 -- also beautify the display a little.
1271 formatJob :: Int -> Int -> (Int, MoveJob) -> [String]
1272 formatJob jsn jsl (sn, (_, _, _, cmds)) =
1274 printf " echo job %d/%d" jsn sn:
1276 map (" gnt-instance " ++) cmds
1278 then ["", printf "echo jobset %d, %d jobs" jsn jsl] ++ out
1281 -- | Given a list of commands, prefix them with @gnt-instance@ and
1282 -- also beautify the display a little.
1283 formatCmds :: [JobSet] -> String
1286 concatMap (\(jsn, js) -> concatMap (formatJob jsn (length js))
1290 -- | Print the node list.
1291 printNodes :: Node.List -> [String] -> String
1293 let fields = case fs of
1294 [] -> Node.defaultFields
1295 "+":rest -> Node.defaultFields ++ rest
1297 snl = sortBy (comparing Node.idx) (Container.elems nl)
1298 (header, isnum) = unzip $ map Node.showHeader fields
1299 in unlines . map ((:) ' ' . intercalate " ") $
1300 formatTable (header:map (Node.list fields) snl) isnum
1302 -- | Print the instance list.
1303 printInsts :: Node.List -> Instance.List -> String
1305 let sil = sortBy (comparing Instance.idx) (Container.elems il)
1306 helper inst = [ if Instance.running inst then "R" else " "
1307 , Instance.name inst
1308 , Container.nameOf nl (Instance.pNode inst)
1309 , let sdx = Instance.sNode inst
1310 in if sdx == Node.noSecondary
1312 else Container.nameOf nl sdx
1313 , if Instance.autoBalance inst then "Y" else "N"
1314 , printf "%3d" $ Instance.vcpus inst
1315 , printf "%5d" $ Instance.mem inst
1316 , printf "%5d" $ Instance.dsk inst `div` 1024
1322 where DynUtil lC lM lD lN = Instance.util inst
1323 header = [ "F", "Name", "Pri_node", "Sec_node", "Auto_bal"
1324 , "vcpu", "mem" , "dsk", "lCpu", "lMem", "lDsk", "lNet" ]
1325 isnum = False:False:False:False:False:repeat True
1326 in unlines . map ((:) ' ' . intercalate " ") $
1327 formatTable (header:map helper sil) isnum
1329 -- | Shows statistics for a given node list.
1330 printStats :: Node.List -> String
1332 let dcvs = compDetailedCV $ Container.elems nl
1333 (weights, names) = unzip detailedCVInfo
1334 hd = zip3 (weights ++ repeat 1) (names ++ repeat "unknown") dcvs
1335 formatted = map (\(w, header, val) ->
1336 printf "%s=%.8f(x%.2f)" header val w::String) hd
1337 in intercalate ", " formatted
1339 -- | Convert a placement into a list of OpCodes (basically a job).
1340 iMoveToJob :: Node.List -- ^ The node list; only used for node
1341 -- names, so any version is good
1342 -- (before or after the operation)
1343 -> Instance.List -- ^ The instance list; also used for
1345 -> Idx -- ^ The index of the instance being
1347 -> IMove -- ^ The actual move to be described
1348 -> [OpCodes.OpCode] -- ^ The list of opcodes equivalent to
1350 iMoveToJob nl il idx move =
1351 let inst = Container.find idx il
1352 iname = Instance.name inst
1353 lookNode = Just . Container.nameOf nl
1354 opF = OpCodes.OpInstanceMigrate iname True False True Nothing
1355 opR n = OpCodes.OpInstanceReplaceDisks iname (lookNode n)
1356 OpCodes.ReplaceNewSecondary [] Nothing
1359 ReplacePrimary np -> [ opF, opR np, opF ]
1360 ReplaceSecondary ns -> [ opR ns ]
1361 ReplaceAndFailover np -> [ opR np, opF ]
1362 FailoverAndReplace ns -> [ opF, opR ns ]
1364 -- * Node group functions
1366 -- | Computes the group of an instance.
1367 instanceGroup :: Node.List -> Instance.Instance -> Result Gdx
1368 instanceGroup nl i =
1369 let sidx = Instance.sNode i
1370 pnode = Container.find (Instance.pNode i) nl
1371 snode = if sidx == Node.noSecondary
1373 else Container.find sidx nl
1374 pgroup = Node.group pnode
1375 sgroup = Node.group snode
1376 in if pgroup /= sgroup
1377 then fail ("Instance placed accross two node groups, primary " ++
1378 show pgroup ++ ", secondary " ++ show sgroup)
1381 -- | Computes the group of an instance per the primary node.
1382 instancePriGroup :: Node.List -> Instance.Instance -> Gdx
1383 instancePriGroup nl i =
1384 let pnode = Container.find (Instance.pNode i) nl
1387 -- | Compute the list of badly allocated instances (split across node
1389 findSplitInstances :: Node.List -> Instance.List -> [Instance.Instance]
1390 findSplitInstances nl =
1391 filter (not . isOk . instanceGroup nl) . Container.elems
1393 -- | Splits a cluster into the component node groups.
1394 splitCluster :: Node.List -> Instance.List ->
1395 [(Gdx, (Node.List, Instance.List))]
1396 splitCluster nl il =
1397 let ngroups = Node.computeGroups (Container.elems nl)
1398 in map (\(guuid, nodes) ->
1399 let nidxs = map Node.idx nodes
1400 nodes' = zip nidxs nodes
1401 instances = Container.filter ((`elem` nidxs) . Instance.pNode) il
1402 in (guuid, (Container.fromList nodes', instances))) ngroups
1404 -- | Compute the list of nodes that are to be evacuated, given a list
1405 -- of instances and an evacuation mode.
1406 nodesToEvacuate :: Instance.List -- ^ The cluster-wide instance list
1407 -> EvacMode -- ^ The evacuation mode we're using
1408 -> [Idx] -- ^ List of instance indices being evacuated
1409 -> IntSet.IntSet -- ^ Set of node indices
1410 nodesToEvacuate il mode =
1411 IntSet.delete Node.noSecondary .
1413 let i = Container.find idx il
1414 pdx = Instance.pNode i
1415 sdx = Instance.sNode i
1416 dt = Instance.diskTemplate i
1417 withSecondary = case dt of
1418 DTDrbd8 -> IntSet.insert sdx ns
1421 ChangePrimary -> IntSet.insert pdx ns
1422 ChangeSecondary -> withSecondary
1423 ChangeAll -> IntSet.insert pdx withSecondary