Revision ec0292f1
b/lib/cmdlib.py | ||
---|---|---|
500 | 500 |
return _BuildInstanceHookEnv(**args) |
501 | 501 |
|
502 | 502 |
|
503 |
def _AdjustCandidatePool(lu): |
|
504 |
"""Adjust the candidate pool after node operations. |
|
505 |
|
|
506 |
""" |
|
507 |
mod_list = lu.cfg.MaintainCandidatePool() |
|
508 |
if mod_list: |
|
509 |
lu.LogInfo("Promoted nodes to master candidate role: %s", |
|
510 |
", ".join(mod_list)) |
|
511 |
for name in mod_list: |
|
512 |
lu.context.ReaddNode(name) |
|
513 |
mc_now, mc_max = lu.cfg.GetMasterCandidateStats() |
|
514 |
if mc_now > mc_max: |
|
515 |
lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" % |
|
516 |
(mc_now, mc_max)) |
|
517 |
|
|
518 |
|
|
503 | 519 |
def _CheckInstanceBridgesExist(lu, instance): |
504 | 520 |
"""Check that the brigdes needed by an instance exist. |
505 | 521 |
|
... | ... | |
1358 | 1374 |
# we want to update nodes after the cluster so that if any errors |
1359 | 1375 |
# happen, we have recorded and saved the cluster info |
1360 | 1376 |
if self.op.candidate_pool_size is not None: |
1361 |
node_info = self.cfg.GetAllNodesInfo().values() |
|
1362 |
num_candidates = len([node for node in node_info |
|
1363 |
if node.master_candidate]) |
|
1364 |
num_nodes = len(node_info) |
|
1365 |
if num_candidates < self.op.candidate_pool_size: |
|
1366 |
random.shuffle(node_info) |
|
1367 |
for node in node_info: |
|
1368 |
if num_candidates >= self.op.candidate_pool_size: |
|
1369 |
break |
|
1370 |
if node.master_candidate: |
|
1371 |
continue |
|
1372 |
node.master_candidate = True |
|
1373 |
self.LogInfo("Promoting node %s to master candidate", node.name) |
|
1374 |
self.cfg.Update(node) |
|
1375 |
self.context.ReaddNode(node) |
|
1376 |
num_candidates += 1 |
|
1377 |
elif num_candidates > self.op.candidate_pool_size: |
|
1378 |
self.LogInfo("Note: more nodes are candidates (%d) than the new value" |
|
1379 |
" of candidate_pool_size (%d)" % |
|
1380 |
(num_candidates, self.op.candidate_pool_size)) |
|
1377 |
_AdjustCandidatePool(self) |
|
1381 | 1378 |
|
1382 | 1379 |
|
1383 | 1380 |
def _WaitForSync(lu, instance, oneshot=False, unlock=False): |
... | ... | |
1623 | 1620 |
self.rpc.call_node_leave_cluster(node.name) |
1624 | 1621 |
|
1625 | 1622 |
# Promote nodes to master candidate as needed |
1626 |
cp_size = self.cfg.GetClusterInfo().candidate_pool_size |
|
1627 |
node_info = self.cfg.GetAllNodesInfo().values() |
|
1628 |
num_candidates = len([n for n in node_info |
|
1629 |
if n.master_candidate]) |
|
1630 |
num_nodes = len(node_info) |
|
1631 |
random.shuffle(node_info) |
|
1632 |
for node in node_info: |
|
1633 |
if num_candidates >= cp_size or num_candidates >= num_nodes: |
|
1634 |
break |
|
1635 |
if node.master_candidate: |
|
1636 |
continue |
|
1637 |
node.master_candidate = True |
|
1638 |
self.LogInfo("Promoting node %s to master candidate", node.name) |
|
1639 |
self.cfg.Update(node) |
|
1640 |
self.context.ReaddNode(node) |
|
1641 |
num_candidates += 1 |
|
1623 |
_AdjustCandidatePool(self) |
|
1642 | 1624 |
|
1643 | 1625 |
|
1644 | 1626 |
class LUQueryNodes(NoHooksLU): |
... | ... | |
1973 | 1955 |
|
1974 | 1956 |
cp_size = self.cfg.GetClusterInfo().candidate_pool_size |
1975 | 1957 |
node_info = self.cfg.GetAllNodesInfo().values() |
1976 |
num_candidates = len([n for n in node_info |
|
1977 |
if n.master_candidate]) |
|
1978 |
master_candidate = num_candidates < cp_size |
|
1958 |
mc_now, _ = self.cfg.GetMasterCandidateStats() |
|
1959 |
master_candidate = mc_now < cp_size |
|
1979 | 1960 |
|
1980 | 1961 |
self.new_node = objects.Node(name=node, |
1981 | 1962 |
primary_ip=primary_ip, |
b/lib/config.py | ||
---|---|---|
275 | 275 |
if not data.nodes[data.cluster.master_node].master_candidate: |
276 | 276 |
result.append("Master node is not a master candidate") |
277 | 277 |
|
278 |
cp_size = data.cluster.candidate_pool_size |
|
279 |
num_c = 0 |
|
280 |
for node in data.nodes.values(): |
|
281 |
if node.master_candidate: |
|
282 |
num_c += 1 |
|
283 |
if cp_size > num_c and num_c < len(data.nodes): |
|
284 |
result.append("Not enough master candidates: actual %d, desired %d," |
|
285 |
" %d total nodes" % (num_c, cp_size, len(data.nodes))) |
|
278 |
mc_now, mc_max = self._UnlockedGetMasterCandidateStats() |
|
279 |
if mc_now < mc_max: |
|
280 |
result.append("Not enough master candidates: actual %d, target %d" % |
|
281 |
(mc_now, mc_max)) |
|
286 | 282 |
|
287 | 283 |
return result |
288 | 284 |
|
... | ... | |
772 | 768 |
"""Get the configuration of all nodes. |
773 | 769 |
|
774 | 770 |
@rtype: dict |
775 |
@returns: dict of (node, node_info), where node_info is what
|
|
771 |
@return: dict of (node, node_info), where node_info is what |
|
776 | 772 |
would GetNodeInfo return for the node |
777 | 773 |
|
778 | 774 |
""" |
... | ... | |
780 | 776 |
for node in self._UnlockedGetNodeList()]) |
781 | 777 |
return my_dict |
782 | 778 |
|
779 |
def _UnlockedGetMasterCandidateStats(self): |
|
780 |
"""Get the number of current and maximum desired and possible candidates. |
|
781 |
|
|
782 |
@rtype: tuple |
|
783 |
@return: tuple of (current, desired and possible) |
|
784 |
|
|
785 |
""" |
|
786 |
mc_now = mc_max = 0 |
|
787 |
for node in self._config_data.nodes.itervalues(): |
|
788 |
if not node.offline: |
|
789 |
mc_max += 1 |
|
790 |
if node.master_candidate: |
|
791 |
mc_now += 1 |
|
792 |
mc_max = min(mc_max, self._config_data.cluster.candidate_pool_size) |
|
793 |
return (mc_now, mc_max) |
|
794 |
|
|
795 |
@locking.ssynchronized(_config_lock, shared=1) |
|
796 |
def GetMasterCandidateStats(self): |
|
797 |
"""Get the number of current and maximum possible candidates. |
|
798 |
|
|
799 |
This is just a wrapper over L{_UnlockedGetMasterCandidateStats}. |
|
800 |
|
|
801 |
@rtype: tuple |
|
802 |
@return: tuple of (current, max) |
|
803 |
|
|
804 |
""" |
|
805 |
return self._UnlockedGetMasterCandidateStats() |
|
806 |
|
|
807 |
@locking.ssynchronized(_config_lock) |
|
808 |
def MaintainCandidatePool(self): |
|
809 |
"""Try to grow the candidate pool to the desired size. |
|
810 |
|
|
811 |
@rtype: list |
|
812 |
@return: list with the adjusted node names |
|
813 |
|
|
814 |
""" |
|
815 |
mc_now, mc_max = self._UnlockedGetMasterCandidateStats() |
|
816 |
mod_list = [] |
|
817 |
if mc_now < mc_max: |
|
818 |
node_list = self._config_data.nodes.keys() |
|
819 |
random.shuffle(node_list) |
|
820 |
for name in node_list: |
|
821 |
if mc_now >= mc_max: |
|
822 |
break |
|
823 |
node = self._config_data.nodes[name] |
|
824 |
if node.master_candidate or node.offline: |
|
825 |
continue |
|
826 |
mod_list.append(node.name) |
|
827 |
node.master_candidate = True |
|
828 |
node.serial_no += 1 |
|
829 |
mc_now += 1 |
|
830 |
if mc_now != mc_max: |
|
831 |
# this should not happen |
|
832 |
logging.warning("Warning: MaintainCandidatePool didn't manage to" |
|
833 |
" fill the candidate pool (%d/%d)", mc_now, mc_max) |
|
834 |
if mod_list: |
|
835 |
self._config_data.cluster.serial_no += 1 |
|
836 |
self._WriteConfig() |
|
837 |
|
|
838 |
return mod_list |
|
839 |
|
|
783 | 840 |
def _BumpSerialNo(self): |
784 | 841 |
"""Bump up the serial number of the config. |
785 | 842 |
|
Also available in: Unified diff