{-
-Copyright (C) 2009 Google Inc.
+Copyright (C) 2009, 2010 Google Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
AllocSolution
, Table(..)
, CStats(..)
+ , AllocStats
-- * Generic functions
, totalResources
+ , computeAllocationDelta
-- * First phase functions
, computeBadItems
-- * Second phase functions
- , printSolution
, printSolutionLine
, formatCmds
, involvedNodes
, printInsts
-- * Balacing functions
, checkMove
+ , doNextBalance
, tryBalance
, compCV
, printStats
-- * IAllocator functions
, tryAlloc
, tryReloc
+ , tryEvac
, collapseFailures
+ -- * Allocation functions
+ , iterateAlloc
+ , tieredAlloc
+ , instanceGroup
+ , findSplitInstances
+ , splitCluster
) where
import Data.List
+import Data.Ord (comparing)
import Text.Printf (printf)
-import Data.Function
import Control.Monad
import qualified Ganeti.HTools.Container as Container
-- * Types
-- | Allocation\/relocation solution.
-type AllocSolution = ([FailMode], Int, Maybe (Score, AllocElement))
-
--- | Allocation\/relocation element.
-type AllocElement = (Node.List, Instance.Instance, [Node.Node])
-
+type AllocSolution = ([FailMode], Int, [Node.AllocElement])
-- | The complete state for the balancing solution
data Table = Table Node.List Instance.List Score [Placement]
, csTmem :: Double -- ^ Cluster total mem
, csTdsk :: Double -- ^ Cluster total disk
, csTcpu :: Double -- ^ Cluster total cpus
+ , csVcpu :: Int -- ^ Cluster virtual cpus (if
+ -- node pCpu has been set,
+ -- otherwise -1)
, csXmem :: Int -- ^ Unnacounted for mem
, csNmem :: Int -- ^ Node own memory
, csScore :: Score -- ^ The cluster score
, csNinst :: Int -- ^ The total number of instances
}
+ deriving (Show)
+
+-- | Currently used, possibly to allocate, unallocable
+type AllocStats = (RSpec, RSpec, RSpec)
-- * Utility functions
([Node.Node], [Instance.Instance])
computeBadItems nl il =
let bad_nodes = verifyN1 $ getOnline nl
- bad_instances = map (\idx -> Container.find idx il) .
+ bad_instances = map (`Container.find` il) .
sort . nub $
concatMap (\ n -> Node.sList n ++ Node.pList n) bad_nodes
in
-- | Zero-initializer for the CStats type
emptyCStats :: CStats
-emptyCStats = CStats 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+emptyCStats = CStats 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-- | Update stats with data from a new node
updateCStats :: CStats -> Node.Node -> CStats
csMmem = x_mmem, csMdsk = x_mdsk, csMcpu = x_mcpu,
csImem = x_imem, csIdsk = x_idsk, csIcpu = x_icpu,
csTmem = x_tmem, csTdsk = x_tdsk, csTcpu = x_tcpu,
+ csVcpu = x_vcpu,
csXmem = x_xmem, csNmem = x_nmem, csNinst = x_ninst
}
= cs
- Node.xMem node - Node.fMem node
inc_icpu = Node.uCpu node
inc_idsk = truncate (Node.tDsk node) - Node.fDsk node
+ inc_vcpu = Node.hiCpu node
in cs { csFmem = x_fmem + Node.fMem node
, csFdsk = x_fdsk + Node.fDsk node
, csTmem = x_tmem + Node.tMem node
, csTdsk = x_tdsk + Node.tDsk node
, csTcpu = x_tcpu + Node.tCpu node
+ , csVcpu = x_vcpu + inc_vcpu
, csXmem = x_xmem + Node.xMem node
, csNmem = x_nmem + Node.nMem node
, csNinst = x_ninst + length (Node.pList node)
let cs = foldl' updateCStats emptyCStats . Container.elems $ nl
in cs { csScore = compCV nl }
--- | The names of the individual elements in the CV list
-detailedCVNames :: [String]
-detailedCVNames = [ "free_mem_cv"
- , "free_disk_cv"
- , "n1_cnt"
- , "reserved_mem_cv"
- , "offline_all_cnt"
- , "offline_pri_cnt"
- , "vcpu_ratio_cv"
- , "cpu_load_cv"
- , "mem_load_cv"
- , "disk_load_cv"
- , "net_load_cv"
- , "pri_tags_score"
- ]
+-- | Compute the delta between two cluster state.
+--
+-- This is used when doing allocations, to understand better the
+-- available cluster resources. The return value is a triple of the
+-- current used values, the delta that was still allocated, and what
+-- was left unallocated.
+computeAllocationDelta :: CStats -> CStats -> AllocStats
+computeAllocationDelta cini cfin =
+ let CStats {csImem = i_imem, csIdsk = i_idsk, csIcpu = i_icpu} = cini
+ CStats {csImem = f_imem, csIdsk = f_idsk, csIcpu = f_icpu,
+ csTmem = t_mem, csTdsk = t_dsk, csVcpu = v_cpu } = cfin
+ rini = RSpec i_icpu i_imem i_idsk
+ rfin = RSpec (f_icpu - i_icpu) (f_imem - i_imem) (f_idsk - i_idsk)
+ un_cpu = v_cpu - f_icpu
+ runa = RSpec un_cpu (truncate t_mem - f_imem) (truncate t_dsk - f_idsk)
+ in (rini, rfin, runa)
+
+-- | The names and weights of the individual elements in the CV list
+detailedCVInfo :: [(Double, String)]
+detailedCVInfo = [ (1, "free_mem_cv")
+ , (1, "free_disk_cv")
+ , (1, "n1_cnt")
+ , (1, "reserved_mem_cv")
+ , (4, "offline_all_cnt")
+ , (16, "offline_pri_cnt")
+ , (1, "vcpu_ratio_cv")
+ , (1, "cpu_load_cv")
+ , (1, "mem_load_cv")
+ , (1, "disk_load_cv")
+ , (1, "net_load_cv")
+ , (2, "pri_tags_score")
+ ]
+
+detailedCVWeights :: [Double]
+detailedCVWeights = map fst detailedCVInfo
-- | Compute the mem and disk covariance.
compDetailedCV :: Node.List -> [Double]
mem_cv = varianceCoeff mem_l
-- metric: disk covariance
dsk_cv = varianceCoeff dsk_l
- n1_l = length $ filter Node.failN1 nodes
- -- metric: count of failN1 nodes
- n1_score = fromIntegral n1_l::Double
+ -- metric: count of instances living on N1 failing nodes
+ n1_score = fromIntegral . sum . map (\n -> length (Node.sList n) +
+ length (Node.pList n)) .
+ filter Node.failN1 $ nodes :: Double
res_l = map Node.pRem nodes
-- metric: reserved memory covariance
res_cv = varianceCoeff res_l
-- | Compute the /total/ variance.
compCV :: Node.List -> Double
-compCV = sum . compDetailedCV
+compCV = sum . zipWith (*) detailedCVWeights . compDetailedCV
-- | Compute online nodes from a Node.List
getOnline :: Node.List -> [Node.Node]
old_s = Container.find old_sdx nl
int_p = Node.removePri old_p inst
int_s = Node.removeSec old_s inst
+ force_p = Node.offline old_p
new_nl = do -- Maybe monad
- new_p <- Node.addPri int_s inst
+ new_p <- Node.addPriEx force_p int_s inst
new_s <- Node.addSec int_p inst old_sdx
let new_inst = Instance.setBoth inst old_sdx old_pdx
return (Container.addTwo old_pdx new_s old_sdx new_p nl,
tgt_n = Container.find new_pdx nl
int_p = Node.removePri old_p inst
int_s = Node.removeSec old_s inst
+ force_p = Node.offline old_p
new_nl = do -- Maybe monad
-- check that the current secondary can host the instance
-- during the migration
- tmp_s <- Node.addPri int_s inst
+ tmp_s <- Node.addPriEx force_p int_s inst
let tmp_s' = Node.removePri tmp_s inst
- new_p <- Node.addPri tgt_n inst
- new_s <- Node.addSec tmp_s' inst new_pdx
+ new_p <- Node.addPriEx force_p tgt_n inst
+ new_s <- Node.addSecEx force_p tmp_s' inst new_pdx
let new_inst = Instance.setPri inst new_pdx
return (Container.add new_pdx new_p $
Container.addTwo old_pdx int_p old_sdx new_s nl,
old_s = Container.find old_sdx nl
tgt_n = Container.find new_sdx nl
int_s = Node.removeSec old_s inst
+ force_s = Node.offline old_s
new_inst = Instance.setSec inst new_sdx
- new_nl = Node.addSec tgt_n inst old_pdx >>=
+ new_nl = Node.addSecEx force_s tgt_n inst old_pdx >>=
\new_s -> return (Container.addTwo new_sdx
new_s old_sdx int_s nl,
new_inst, old_pdx, new_sdx)
tgt_n = Container.find new_pdx nl
int_p = Node.removePri old_p inst
int_s = Node.removeSec old_s inst
+ force_s = Node.offline old_s
new_nl = do -- Maybe monad
new_p <- Node.addPri tgt_n inst
- new_s <- Node.addSec int_p inst new_pdx
+ new_s <- Node.addSecEx force_s int_p inst new_pdx
let new_inst = Instance.setBoth inst new_pdx old_pdx
return (Container.add new_pdx new_p $
Container.addTwo old_pdx new_s old_sdx int_s nl,
tgt_n = Container.find new_sdx nl
int_p = Node.removePri old_p inst
int_s = Node.removeSec old_s inst
+ force_p = Node.offline old_p
new_nl = do -- Maybe monad
- new_p <- Node.addPri int_s inst
- new_s <- Node.addSec tgt_n inst old_sdx
+ new_p <- Node.addPriEx force_p int_s inst
+ new_s <- Node.addSecEx force_p tgt_n inst old_sdx
let new_inst = Instance.setBoth inst old_sdx new_sdx
return (Container.add new_sdx new_s $
Container.addTwo old_sdx new_p old_pdx int_p nl,
-- | Tries to allocate an instance on one given node.
allocateOnSingle :: Node.List -> Instance.Instance -> Node.Node
- -> OpResult AllocElement
+ -> OpResult Node.AllocElement
allocateOnSingle nl inst p =
let new_pdx = Node.idx p
new_inst = Instance.setBoth inst new_pdx Node.noSecondary
- new_nl = Node.addPri p inst >>= \new_p ->
- return (Container.add new_pdx new_p nl, new_inst, [new_p])
- in new_nl
+ in Node.addPri p inst >>= \new_p -> do
+ let new_nl = Container.add new_pdx new_p nl
+ new_score = compCV nl
+ return (new_nl, new_inst, [new_p], new_score)
-- | Tries to allocate an instance on a given pair of nodes.
allocateOnPair :: Node.List -> Instance.Instance -> Node.Node -> Node.Node
- -> OpResult AllocElement
+ -> OpResult Node.AllocElement
allocateOnPair nl inst tgt_p tgt_s =
let new_pdx = Node.idx tgt_p
new_sdx = Node.idx tgt_s
- new_nl = do -- Maybe monad
- new_p <- Node.addPri tgt_p inst
- new_s <- Node.addSec tgt_s inst new_pdx
- let new_inst = Instance.setBoth inst new_pdx new_sdx
- return (Container.addTwo new_pdx new_p new_sdx new_s nl, new_inst,
- [new_p, new_s])
- in new_nl
+ in do
+ new_p <- Node.addPri tgt_p inst
+ new_s <- Node.addSec tgt_s inst new_pdx
+ let new_inst = Instance.setBoth inst new_pdx new_sdx
+ new_nl = Container.addTwo new_pdx new_p new_sdx new_s nl
+ return (new_nl, new_inst, [new_p, new_s], compCV new_nl)
-- | Tries to perform an instance move and returns the best table
-- between the original one and the new one.
best_tbl =
foldl'
(\ step_tbl em ->
- if Instance.sNode em == Node.noSecondary then step_tbl
- else compareTables step_tbl $
- checkInstanceMove nodes_idx disk_moves ini_tbl em)
+ compareTables step_tbl $
+ checkInstanceMove nodes_idx disk_moves ini_tbl em)
ini_tbl victims
Table _ _ _ best_plc = best_tbl
- in
- if length best_plc == length ini_plc then -- no advancement
- ini_tbl
- else
- best_tbl
+ in if length best_plc == length ini_plc
+ then ini_tbl -- no advancement
+ else best_tbl
+
+-- | Check if we are allowed to go deeper in the balancing
+doNextBalance :: Table -- ^ The starting table
+ -> Int -- ^ Remaining length
+ -> Score -- ^ Score at which to stop
+ -> Bool -- ^ The resulting table and commands
+doNextBalance ini_tbl max_rounds min_score =
+ let Table _ _ ini_cv ini_plc = ini_tbl
+ ini_plc_len = length ini_plc
+ in (max_rounds < 0 || ini_plc_len < max_rounds) && ini_cv > min_score
-- | Run a balance move
-
tryBalance :: Table -- ^ The starting table
- -> Int -- ^ Remaining length
-> Bool -- ^ Allow disk moves
- -> Score -- ^ Score at which to stop
+ -> Bool -- ^ Only evacuate moves
+ -> Score -- ^ Min gain threshold
+ -> Score -- ^ Min gain
-> Maybe Table -- ^ The resulting table and commands
-tryBalance ini_tbl max_rounds disk_moves min_score =
- let Table ini_nl ini_il ini_cv ini_plc = ini_tbl
- ini_plc_len = length ini_plc
- allowed_next = (max_rounds < 0 || ini_plc_len < max_rounds) &&
- ini_cv > min_score
+tryBalance ini_tbl disk_moves evac_mode mg_limit min_gain =
+ let Table ini_nl ini_il ini_cv _ = ini_tbl
+ all_inst = Container.elems ini_il
+ all_inst' = if evac_mode
+ then let bad_nodes = map Node.idx . filter Node.offline $
+ Container.elems ini_nl
+ in filter (\e -> Instance.sNode e `elem` bad_nodes ||
+ Instance.pNode e `elem` bad_nodes)
+ all_inst
+ else all_inst
+ reloc_inst = filter Instance.movable all_inst'
+ node_idx = map Node.idx . filter (not . Node.offline) $
+ Container.elems ini_nl
+ fin_tbl = checkMove node_idx disk_moves ini_tbl reloc_inst
+ (Table _ _ fin_cv _) = fin_tbl
in
- if allowed_next
- then let all_inst = Container.elems ini_il
- node_idx = map Node.idx . filter (not . Node.offline) $
- Container.elems ini_nl
- fin_tbl = checkMove node_idx disk_moves ini_tbl all_inst
- (Table _ _ fin_cv _) = fin_tbl
- in
- if fin_cv < ini_cv
- then Just fin_tbl -- this round made success, try deeper
- else Nothing
+ if fin_cv < ini_cv && (ini_cv > mg_limit || ini_cv - fin_cv >= min_gain)
+ then Just fin_tbl -- this round made success, return the new table
else Nothing
-- * Allocation functions
-- | Build failure stats out of a list of failures
collapseFailures :: [FailMode] -> FailStats
collapseFailures flst =
- map (\k -> (k, length $ filter ((==) k) flst)) [minBound..maxBound]
+ map (\k -> (k, length $ filter (k ==) flst)) [minBound..maxBound]
-- | Update current Allocation solution and failure stats with new
-- elements
-concatAllocs :: AllocSolution -> OpResult AllocElement -> AllocSolution
+concatAllocs :: AllocSolution -> OpResult Node.AllocElement -> AllocSolution
concatAllocs (flst, cntok, sols) (OpFail reason) = (reason:flst, cntok, sols)
-concatAllocs (flst, cntok, osols) (OpGood ns@(nl, _, _)) =
- let nscore = compCV nl
- -- Choose the old or new solution, based on the cluster score
+concatAllocs (flst, cntok, osols) (OpGood ns@(_, _, _, nscore)) =
+ let -- Choose the old or new solution, based on the cluster score
nsols = case osols of
- Nothing -> Just (nscore, ns)
- Just (oscore, _) ->
+ [] -> [ns]
+ (_, _, _, oscore):[] ->
if oscore < nscore
then osols
- else Just (nscore, ns)
+ else [ns]
+ -- FIXME: here we simply concat to lists with more
+ -- than one element; we should instead abort, since
+ -- this is not a valid usage of this function
+ xs -> ns:xs
nsuc = cntok + 1
-- Note: we force evaluation of nsols here in order to keep the
-- memory profile low - we know that we will need nsols for sure
ok_pairs = filter (\(x, y) -> Node.idx x /= Node.idx y) all_pairs
sols = foldl' (\cstate (p, s) ->
concatAllocs cstate $ allocateOnPair nl inst p s
- ) ([], 0, Nothing) ok_pairs
+ ) ([], 0, []) ok_pairs
in return sols
tryAlloc nl _ inst 1 =
let all_nodes = getOnline nl
sols = foldl' (\cstate ->
concatAllocs cstate . allocateOnSingle nl inst
- ) ([], 0, Nothing) all_nodes
+ ) ([], 0, []) all_nodes
in return sols
tryAlloc _ _ _ reqn = fail $ "Unsupported number of allocation \
let em = do
(mnl, i, _, _) <-
applyMove nl inst (ReplaceSecondary x)
- return (mnl, i, [Container.find x mnl])
+ return (mnl, i, [Container.find x mnl],
+ compCV mnl)
in concatAllocs cstate em
- ) ([], 0, Nothing) valid_idxes
+ ) ([], 0, []) valid_idxes
in return sols1
tryReloc _ _ _ reqn _ = fail $ "Unsupported number of relocation \
\destinations required (" ++ show reqn ++
"), only one supported"
+-- | Try to evacuate a list of nodes.
+tryEvac :: (Monad m) =>
+ Node.List -- ^ The node list
+ -> Instance.List -- ^ The instance list
+ -> [Ndx] -- ^ Nodes to be evacuated
+ -> m AllocSolution -- ^ Solution list
+tryEvac nl il ex_ndx =
+ let ex_nodes = map (`Container.find` nl) ex_ndx
+ all_insts = nub . concatMap Node.sList $ ex_nodes
+ in do
+ (_, sol) <- foldM (\(nl', (_, _, rsols)) idx -> do
+ -- FIXME: hardcoded one node here
+ (fm, cs, aes) <- tryReloc nl' il idx 1 ex_ndx
+ case aes of
+ csol@(nl'', _, _, _):_ ->
+ return (nl'', (fm, cs, csol:rsols))
+ _ -> fail $ "Can't evacuate instance " ++
+ Instance.name (Container.find idx il)
+ ) (nl, ([], 0, [])) all_insts
+ return sol
+
+-- | Recursively place instances on the cluster until we're out of space
+iterateAlloc :: Node.List
+ -> Instance.List
+ -> Instance.Instance
+ -> Int
+ -> [Instance.Instance]
+ -> Result (FailStats, Node.List, Instance.List,
+ [Instance.Instance])
+iterateAlloc nl il newinst nreq ixes =
+ let depth = length ixes
+ newname = printf "new-%d" depth::String
+ newidx = length (Container.elems il) + depth
+ newi2 = Instance.setIdx (Instance.setName newinst newname) newidx
+ in case tryAlloc nl il newi2 nreq of
+ Bad s -> Bad s
+ Ok (errs, _, sols3) ->
+ case sols3 of
+ [] -> Ok (collapseFailures errs, nl, il, ixes)
+ (xnl, xi, _, _):[] ->
+ iterateAlloc xnl (Container.add newidx xi il)
+ newinst nreq $! (xi:ixes)
+ _ -> Bad "Internal error: multiple solutions for single\
+ \ allocation"
+
+tieredAlloc :: Node.List
+ -> Instance.List
+ -> Instance.Instance
+ -> Int
+ -> [Instance.Instance]
+ -> Result (FailStats, Node.List, Instance.List,
+ [Instance.Instance])
+tieredAlloc nl il newinst nreq ixes =
+ case iterateAlloc nl il newinst nreq ixes of
+ Bad s -> Bad s
+ Ok (errs, nl', il', ixes') ->
+ case Instance.shrinkByType newinst . fst . last $
+ sortBy (comparing snd) errs of
+ Bad _ -> Ok (errs, nl', il', ixes')
+ Ok newinst' ->
+ tieredAlloc nl' il' newinst' nreq ixes'
+
-- * Formatting functions
-- | Given the original and final nodes, computes the relocation description.
pmlen = (2*nmlen + 1)
(i, p, s, mv, c) = plc
inst = Container.find i il
- inam = Instance.name inst
- npri = Container.nameOf nl p
- nsec = Container.nameOf nl s
- opri = Container.nameOf nl $ Instance.pNode inst
- osec = Container.nameOf nl $ Instance.sNode inst
+ inam = Instance.alias inst
+ npri = Node.alias $ Container.find p nl
+ nsec = Node.alias $ Container.find s nl
+ opri = Node.alias $ Container.find (Instance.pNode inst) nl
+ osec = Node.alias $ Container.find (Instance.sNode inst) nl
(moves, cmds) = computeMoves inst inam mv npri nsec
ostr = printf "%s:%s" opri osec::String
nstr = printf "%s:%s" npri nsec::String
(zip [1..] js)) .
zip [1..]
--- | Converts a solution to string format.
-printSolution :: Node.List
- -> Instance.List
- -> [Placement]
- -> ([String], [[String]])
-printSolution nl il sol =
- let
- nmlen = Container.maxNameLen nl
- imlen = Container.maxNameLen il
- in
- unzip $ zipWith (printSolutionLine nl il nmlen imlen) sol [1..]
-
-- | Print the node list.
printNodes :: Node.List -> [String] -> String
printNodes nl fs =
- let fields = if null fs
- then Node.defaultFields
- else fs
- snl = sortBy (compare `on` Node.idx) (Container.elems nl)
+ let fields = case fs of
+ [] -> Node.defaultFields
+ "+":rest -> Node.defaultFields ++ rest
+ _ -> fs
+ snl = sortBy (comparing Node.idx) (Container.elems nl)
(header, isnum) = unzip $ map Node.showHeader fields
in unlines . map ((:) ' ' . intercalate " ") $
formatTable (header:map (Node.list fields) snl) isnum
-- | Print the instance list.
printInsts :: Node.List -> Instance.List -> String
printInsts nl il =
- let sil = sortBy (compare `on` Instance.idx) (Container.elems il)
+ let sil = sortBy (comparing Instance.idx) (Container.elems il)
helper inst = [ if Instance.running inst then "R" else " "
, Instance.name inst
, Container.nameOf nl (Instance.pNode inst)
- , (let sdx = Instance.sNode inst
- in if sdx == Node.noSecondary
- then ""
- else Container.nameOf nl sdx)
+ , let sdx = Instance.sNode inst
+ in if sdx == Node.noSecondary
+ then ""
+ else Container.nameOf nl sdx
, printf "%3d" $ Instance.vcpus inst
, printf "%5d" $ Instance.mem inst
, printf "%5d" $ Instance.dsk inst `div` 1024
printStats :: Node.List -> String
printStats nl =
let dcvs = compDetailedCV nl
- hd = zip (detailedCVNames ++ repeat "unknown") dcvs
- formatted = map (\(header, val) ->
- printf "%s=%.8f" header val::String) hd
+ (weights, names) = unzip detailedCVInfo
+ hd = zip3 (weights ++ repeat 1) (names ++ repeat "unknown") dcvs
+ formatted = map (\(w, header, val) ->
+ printf "%s=%.8f(x%.2f)" header val w::String) hd
in intercalate ", " formatted
-- | Convert a placement into a list of OpCodes (basically a job).
-iMoveToJob :: String -> Node.List -> Instance.List
+iMoveToJob :: Node.List -> Instance.List
-> Idx -> IMove -> [OpCodes.OpCode]
-iMoveToJob csf nl il idx move =
+iMoveToJob nl il idx move =
let inst = Container.find idx il
- iname = Instance.name inst ++ csf
- lookNode n = Just (Container.nameOf nl n ++ csf)
+ iname = Instance.name inst
+ lookNode = Just . Container.nameOf nl
opF = if Instance.running inst
then OpCodes.OpMigrateInstance iname True False
else OpCodes.OpFailoverInstance iname False
ReplaceSecondary ns -> [ opR ns ]
ReplaceAndFailover np -> [ opR np, opF ]
FailoverAndReplace ns -> [ opF, opR ns ]
+
+-- | Computes the group of an instance
+instanceGroup :: Node.List -> Instance.Instance -> Result GroupID
+instanceGroup nl i =
+ let sidx = Instance.sNode i
+ pnode = Container.find (Instance.pNode i) nl
+ snode = if sidx == Node.noSecondary
+ then pnode
+ else Container.find sidx nl
+ puuid = Node.group pnode
+ suuid = Node.group snode
+ in if puuid /= suuid
+ then fail ("Instance placed accross two node groups, primary " ++ puuid ++
+ ", secondary " ++ suuid)
+ else return puuid
+
+-- | Compute the list of badly allocated instances (split across node
+-- groups)
+findSplitInstances :: Node.List -> Instance.List -> [Instance.Instance]
+findSplitInstances nl il =
+ filter (not . isOk . instanceGroup nl) (Container.elems il)
+
+-- | Splits a cluster into the component node groups
+splitCluster :: Node.List -> Instance.List ->
+ [(GroupID, (Node.List, Instance.List))]
+splitCluster nl il =
+ let ngroups = Node.computeGroups (Container.elems nl)
+ in map (\(guuid, nodes) ->
+ let nidxs = map Node.idx nodes
+ nodes' = zip nidxs nodes
+ instances = Container.filter ((`elem` nidxs) . Instance.pNode) il
+ in (guuid, (Container.fromAssocList nodes', instances))) ngroups