Revision 1c3231aa lib/bootstrap.py
b/lib/bootstrap.py | ||
---|---|---|
609 | 609 |
mac_prefix=mac_prefix, |
610 | 610 |
volume_group_name=vg_name, |
611 | 611 |
tcpudp_port_pool=set(), |
612 |
master_node=hostname.name, |
|
613 | 612 |
master_ip=clustername.ip, |
614 | 613 |
master_netmask=master_netmask, |
615 | 614 |
master_netdev=master_netdev, |
... | ... | |
688 | 687 |
_INITCONF_ECID) |
689 | 688 |
master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID, |
690 | 689 |
_INITCONF_ECID) |
690 |
cluster_config.master_node = master_node_config.uuid |
|
691 | 691 |
nodes = { |
692 |
master_node_config.name: master_node_config,
|
|
692 |
master_node_config.uuid: master_node_config,
|
|
693 | 693 |
} |
694 | 694 |
default_nodegroup = objects.NodeGroup( |
695 | 695 |
uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID), |
696 | 696 |
name=constants.INITIAL_NODE_GROUP_NAME, |
697 |
members=[master_node_config.name],
|
|
697 |
members=[master_node_config.uuid],
|
|
698 | 698 |
diskparams={}, |
699 | 699 |
) |
700 | 700 |
nodegroups = { |
... | ... | |
714 | 714 |
mode=0600) |
715 | 715 |
|
716 | 716 |
|
717 |
def FinalizeClusterDestroy(master): |
|
717 |
def FinalizeClusterDestroy(master_uuid):
|
|
718 | 718 |
"""Execute the last steps of cluster destroy |
719 | 719 |
|
720 | 720 |
This function shuts down all the daemons, completing the destroy |
... | ... | |
725 | 725 |
modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup |
726 | 726 |
runner = rpc.BootstrapRunner() |
727 | 727 |
|
728 |
master_name = cfg.GetNodeName(master_uuid) |
|
729 |
|
|
728 | 730 |
master_params = cfg.GetMasterNetworkParameters() |
729 |
master_params.name = master
|
|
731 |
master_params.uuid = master_uuid
|
|
730 | 732 |
ems = cfg.GetUseExternalMipScript() |
731 |
result = runner.call_node_deactivate_master_ip(master_params.name,
|
|
732 |
master_params, ems)
|
|
733 |
result = runner.call_node_deactivate_master_ip(master_name, master_params,
|
|
734 |
ems) |
|
733 | 735 |
|
734 | 736 |
msg = result.fail_msg |
735 | 737 |
if msg: |
736 | 738 |
logging.warning("Could not disable the master IP: %s", msg) |
737 | 739 |
|
738 |
result = runner.call_node_stop_master(master) |
|
740 |
result = runner.call_node_stop_master(master_name)
|
|
739 | 741 |
msg = result.fail_msg |
740 | 742 |
if msg: |
741 | 743 |
logging.warning("Could not disable the master role: %s", msg) |
742 | 744 |
|
743 |
result = runner.call_node_leave_cluster(master, modify_ssh_setup) |
|
745 |
result = runner.call_node_leave_cluster(master_name, modify_ssh_setup)
|
|
744 | 746 |
msg = result.fail_msg |
745 | 747 |
if msg: |
746 | 748 |
logging.warning("Could not shutdown the node daemon and cleanup" |
... | ... | |
788 | 790 |
sstore = ssconf.SimpleStore() |
789 | 791 |
|
790 | 792 |
old_master, new_master = ssconf.GetMasterAndMyself(sstore) |
791 |
node_list = sstore.GetNodeList()
|
|
793 |
node_names = sstore.GetNodeList()
|
|
792 | 794 |
mc_list = sstore.GetMasterCandidates() |
793 | 795 |
|
794 | 796 |
if old_master == new_master: |
... | ... | |
807 | 809 |
errors.ECODE_STATE) |
808 | 810 |
|
809 | 811 |
if not no_voting: |
810 |
vote_list = GatherMasterVotes(node_list)
|
|
812 |
vote_list = GatherMasterVotes(node_names)
|
|
811 | 813 |
|
812 | 814 |
if vote_list: |
813 | 815 |
voted_master = vote_list[0][0] |
... | ... | |
832 | 834 |
# configuration data |
833 | 835 |
cfg = config.ConfigWriter(accept_foreign=True) |
834 | 836 |
|
837 |
old_master_node = cfg.GetNodeInfoByName(old_master) |
|
838 |
if old_master_node is None: |
|
839 |
raise errors.OpPrereqError("Could not find old master node '%s' in" |
|
840 |
" cluster configuration." % old_master, |
|
841 |
errors.ECODE_NOENT) |
|
842 |
|
|
835 | 843 |
cluster_info = cfg.GetClusterInfo() |
836 |
cluster_info.master_node = new_master |
|
844 |
new_master_node = cfg.GetNodeInfoByName(new_master) |
|
845 |
if new_master_node is None: |
|
846 |
raise errors.OpPrereqError("Could not find new master node '%s' in" |
|
847 |
" cluster configuration." % new_master, |
|
848 |
errors.ECODE_NOENT) |
|
849 |
|
|
850 |
cluster_info.master_node = new_master_node.uuid |
|
837 | 851 |
# this will also regenerate the ssconf files, since we updated the |
838 | 852 |
# cluster info |
839 | 853 |
cfg.Update(cluster_info, logging.error) |
... | ... | |
851 | 865 |
|
852 | 866 |
runner = rpc.BootstrapRunner() |
853 | 867 |
master_params = cfg.GetMasterNetworkParameters() |
854 |
master_params.name = old_master
|
|
868 |
master_params.uuid = old_master_node.uuid
|
|
855 | 869 |
ems = cfg.GetUseExternalMipScript() |
856 |
result = runner.call_node_deactivate_master_ip(master_params.name,
|
|
870 |
result = runner.call_node_deactivate_master_ip(old_master,
|
|
857 | 871 |
master_params, ems) |
858 | 872 |
|
859 | 873 |
msg = result.fail_msg |
... | ... | |
917 | 931 |
return old_master |
918 | 932 |
|
919 | 933 |
|
920 |
def GatherMasterVotes(node_list):
|
|
934 |
def GatherMasterVotes(node_names):
|
|
921 | 935 |
"""Check the agreement on who is the master. |
922 | 936 |
|
923 | 937 |
This function will return a list of (node, number of votes), ordered |
... | ... | |
931 | 945 |
since we use the same source for configuration information for both |
932 | 946 |
backend and boostrap, we'll always vote for ourselves. |
933 | 947 |
|
934 |
@type node_list: list
|
|
935 |
@param node_list: the list of nodes to query for master info; the current
|
|
948 |
@type node_names: list
|
|
949 |
@param node_names: the list of nodes to query for master info; the current
|
|
936 | 950 |
node will be removed if it is in the list |
937 | 951 |
@rtype: list |
938 | 952 |
@return: list of (node, votes) |
... | ... | |
940 | 954 |
""" |
941 | 955 |
myself = netutils.Hostname.GetSysName() |
942 | 956 |
try: |
943 |
node_list.remove(myself)
|
|
957 |
node_names.remove(myself)
|
|
944 | 958 |
except ValueError: |
945 | 959 |
pass |
946 |
if not node_list:
|
|
960 |
if not node_names:
|
|
947 | 961 |
# no nodes left (eventually after removing myself) |
948 | 962 |
return [] |
949 |
results = rpc.BootstrapRunner().call_master_info(node_list)
|
|
963 |
results = rpc.BootstrapRunner().call_master_info(node_names)
|
|
950 | 964 |
if not isinstance(results, dict): |
951 | 965 |
# this should not happen (unless internal error in rpc) |
952 | 966 |
logging.critical("Can't complete rpc call, aborting master startup") |
953 |
return [(None, len(node_list))]
|
|
967 |
return [(None, len(node_names))]
|
|
954 | 968 |
votes = {} |
955 |
for node in results: |
|
956 |
nres = results[node] |
|
969 |
for node_name in results:
|
|
970 |
nres = results[node_name]
|
|
957 | 971 |
data = nres.payload |
958 | 972 |
msg = nres.fail_msg |
959 | 973 |
fail = False |
960 | 974 |
if msg: |
961 |
logging.warning("Error contacting node %s: %s", node, msg) |
|
975 |
logging.warning("Error contacting node %s: %s", node_name, msg)
|
|
962 | 976 |
fail = True |
963 | 977 |
# for now we accept both length 3, 4 and 5 (data[3] is primary ip version |
964 | 978 |
# and data[4] is the master netmask) |
965 | 979 |
elif not isinstance(data, (tuple, list)) or len(data) < 3: |
966 |
logging.warning("Invalid data received from node %s: %s", node, data) |
|
980 |
logging.warning("Invalid data received from node %s: %s", |
|
981 |
node_name, data) |
|
967 | 982 |
fail = True |
968 | 983 |
if fail: |
969 | 984 |
if None not in votes: |
Also available in: Unified diff