4 # Copyright (C) 2010 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 """Tool to merge two or more clusters together.
23 The clusters have to run the same version of Ganeti!
27 # pylint: disable-msg=C0103
28 # C0103: Invalid name cluster-merge
37 from ganeti import cli
38 from ganeti import config
39 from ganeti import constants
40 from ganeti import errors
41 from ganeti import ssh
42 from ganeti import utils
45 _GROUPS_MERGE = "merge"
46 _GROUPS_RENAME = "rename"
47 _CLUSTERMERGE_ECID = "clustermerge-ecid"
49 PAUSE_PERIOD_OPT = cli.cli_option("-p", "--watcher-pause-period", default=1800,
50 action="store", type="int",
52 help=("Amount of time in seconds watcher"
53 " should be suspended from running"))
54 GROUPS_OPT = cli.cli_option("--groups", default=None, metavar="STRATEGY",
55 choices=(_GROUPS_MERGE, _GROUPS_RENAME),
57 help=("How to handle groups that have the"
58 " same name (One of: %s/%s)" %
59 (_GROUPS_MERGE, _GROUPS_RENAME)))
62 def Flatten(unflattened_list):
65 @param unflattened_list: A list of unflattened list objects.
66 @return: A flattened list
71 for item in unflattened_list:
72 if isinstance(item, list):
73 flattened_list.extend(Flatten(item))
75 flattened_list.append(item)
79 class MergerData(object):
80 """Container class to hold data used for merger.
83 def __init__(self, cluster, key_path, nodes, instances, config_path=None):
84 """Initialize the container.
86 @param cluster: The name of the cluster
87 @param key_path: Path to the ssh private key used for authentication
88 @param nodes: List of online nodes in the merging cluster
89 @param instances: List of instances running on merging cluster
90 @param config_path: Path to the merging cluster config
93 self.cluster = cluster
94 self.key_path = key_path
96 self.instances = instances
97 self.config_path = config_path
100 class Merger(object):
101 """Handling the merge.
104 def __init__(self, clusters, pause_period, groups):
105 """Initialize object with sane defaults and infos required.
107 @param clusters: The list of clusters to merge in
108 @param pause_period: The time watcher shall be disabled for
109 @param groups: How to handle group conflicts
112 self.merger_data = []
113 self.clusters = clusters
114 self.pause_period = pause_period
115 self.work_dir = tempfile.mkdtemp(suffix="cluster-merger")
116 (self.cluster_name, ) = cli.GetClient().QueryConfigValues(["cluster_name"])
117 self.ssh_runner = ssh.SshRunner(self.cluster_name)
121 """Sets up our end so we can do the merger.
123 This method is setting us up as a preparation for the merger.
124 It makes the initial contact and gathers information needed.
126 @raise errors.RemoteError: for errors in communication/grabbing
129 (remote_path, _, _) = ssh.GetUserFiles("root")
131 if self.cluster_name in self.clusters:
132 raise errors.CommandError("Cannot merge cluster %s with itself" %
135 # Fetch remotes private key
136 for cluster in self.clusters:
137 result = self._RunCmd(cluster, "cat %s" % remote_path, batch=False,
140 raise errors.RemoteError("There was an error while grabbing ssh private"
141 " key from %s. Fail reason: %s; output: %s" %
142 (cluster, result.fail_reason, result.output))
144 key_path = utils.PathJoin(self.work_dir, cluster)
145 utils.WriteFile(key_path, mode=0600, data=result.stdout)
147 result = self._RunCmd(cluster, "gnt-node list -o name,offline"
148 " --no-header --separator=,", private_key=key_path)
150 raise errors.RemoteError("Unable to retrieve list of nodes from %s."
151 " Fail reason: %s; output: %s" %
152 (cluster, result.fail_reason, result.output))
153 nodes_statuses = [line.split(',') for line in result.stdout.splitlines()]
154 nodes = [node_status[0] for node_status in nodes_statuses
155 if node_status[1] == "N"]
157 result = self._RunCmd(cluster, "gnt-instance list -o name --no-header",
158 private_key=key_path)
160 raise errors.RemoteError("Unable to retrieve list of instances from"
161 " %s. Fail reason: %s; output: %s" %
162 (cluster, result.fail_reason, result.output))
163 instances = result.stdout.splitlines()
165 self.merger_data.append(MergerData(cluster, key_path, nodes, instances))
167 def _PrepareAuthorizedKeys(self):
168 """Prepare the authorized_keys on every merging node.
170 This method add our public key to remotes authorized_key for further
174 (_, pub_key_file, auth_keys) = ssh.GetUserFiles("root")
175 pub_key = utils.ReadFile(pub_key_file)
177 for data in self.merger_data:
178 for node in data.nodes:
179 result = self._RunCmd(node, ("cat >> %s << '!EOF.'\n%s!EOF.\n" %
180 (auth_keys, pub_key)),
181 private_key=data.key_path)
184 raise errors.RemoteError("Unable to add our public key to %s in %s."
185 " Fail reason: %s; output: %s" %
186 (node, data.cluster, result.fail_reason,
189 def _RunCmd(self, hostname, command, user="root", use_cluster_key=False,
190 strict_host_check=False, private_key=None, batch=True,
192 """Wrapping SshRunner.Run with default parameters.
194 For explanation of parameters see L{ganeti.ssh.SshRunner.Run}.
197 return self.ssh_runner.Run(hostname=hostname, command=command, user=user,
198 use_cluster_key=use_cluster_key,
199 strict_host_check=strict_host_check,
200 private_key=private_key, batch=batch,
203 def _StopMergingInstances(self):
204 """Stop instances on merging clusters.
207 for cluster in self.clusters:
208 result = self._RunCmd(cluster, "gnt-instance shutdown --all"
212 raise errors.RemoteError("Unable to stop instances on %s."
213 " Fail reason: %s; output: %s" %
214 (cluster, result.fail_reason, result.output))
216 def _DisableWatcher(self):
217 """Disable watch on all merging clusters, including ourself.
220 for cluster in ["localhost"] + self.clusters:
221 result = self._RunCmd(cluster, "gnt-cluster watcher pause %d" %
225 raise errors.RemoteError("Unable to pause watcher on %s."
226 " Fail reason: %s; output: %s" %
227 (cluster, result.fail_reason, result.output))
229 def _StopDaemons(self):
230 """Stop all daemons on merging nodes.
233 cmd = "%s stop-all" % constants.DAEMON_UTIL
234 for data in self.merger_data:
235 for node in data.nodes:
236 result = self._RunCmd(node, cmd)
239 raise errors.RemoteError("Unable to stop daemons on %s."
240 " Fail reason: %s; output: %s." %
241 (node, result.fail_reason, result.output))
243 def _FetchRemoteConfig(self):
244 """Fetches and stores remote cluster config from the master.
246 This step is needed before we can merge the config.
249 for data in self.merger_data:
250 result = self._RunCmd(data.cluster, "cat %s" %
251 constants.CLUSTER_CONF_FILE)
254 raise errors.RemoteError("Unable to retrieve remote config on %s."
255 " Fail reason: %s; output %s" %
256 (data.cluster, result.fail_reason,
259 data.config_path = utils.PathJoin(self.work_dir, "%s_config.data" %
261 utils.WriteFile(data.config_path, data=result.stdout)
263 # R0201: Method could be a function
264 def _KillMasterDaemon(self): # pylint: disable-msg=R0201
265 """Kills the local master daemon.
267 @raise errors.CommandError: If unable to kill
270 result = utils.RunCmd([constants.DAEMON_UTIL, "stop-master"])
272 raise errors.CommandError("Unable to stop master daemons."
273 " Fail reason: %s; output: %s" %
274 (result.fail_reason, result.output))
276 def _MergeConfig(self):
277 """Merges all foreign config into our own config.
280 my_config = config.ConfigWriter(offline=True)
281 fake_ec_id = 0 # Needs to be uniq over the whole config merge
283 for data in self.merger_data:
284 other_config = config.ConfigWriter(data.config_path, accept_foreign=True)
285 self._MergeNodeGroups(my_config, other_config)
287 for node in other_config.GetNodeList():
288 node_info = other_config.GetNodeInfo(node)
289 my_config.AddNode(node_info, _CLUSTERMERGE_ECID + str(fake_ec_id))
292 for instance in other_config.GetInstanceList():
293 instance_info = other_config.GetInstanceInfo(instance)
295 # Update the DRBD port assignments
296 # This is a little bit hackish
297 for dsk in instance_info.disks:
298 if dsk.dev_type in constants.LDS_DRBD:
299 port = my_config.AllocatePort()
301 logical_id = list(dsk.logical_id)
303 dsk.logical_id = tuple(logical_id)
305 physical_id = list(dsk.physical_id)
306 physical_id[1] = physical_id[3] = port
307 dsk.physical_id = tuple(physical_id)
309 my_config.AddInstance(instance_info,
310 _CLUSTERMERGE_ECID + str(fake_ec_id))
313 # R0201: Method could be a function
314 def _MergeNodeGroups(self, my_config, other_config):
315 """Adds foreign node groups
317 ConfigWriter.AddNodeGroup takes care of making sure there are no conflicts.
319 # pylint: disable-msg=R0201
320 logging.info("Node group conflict strategy: %s", self.groups)
322 my_grps = my_config.GetAllNodeGroupsInfo().values()
323 other_grps = other_config.GetAllNodeGroupsInfo().values()
325 # Check for node group naming conflicts:
327 for other_grp in other_grps:
328 for my_grp in my_grps:
329 if other_grp.name == my_grp.name:
330 conflicts.append(other_grp)
333 conflict_names = utils.CommaJoin([g.name for g in conflicts])
334 logging.info("Node groups in both local and remote cluster: %s",
337 # User hasn't specified how to handle conflicts
339 raise errors.CommandError("The following node group(s) are in both"
340 " clusters, and no merge strategy has been"
341 " supplied (see the --groups option): %s" %
344 # User wants to rename conflicts
345 elif self.groups == _GROUPS_RENAME:
346 for grp in conflicts:
347 new_name = "%s-%s" % (grp.name, other_config.GetClusterName())
348 logging.info("Renaming remote node group from %s to %s"
349 " to resolve conflict", grp.name, new_name)
352 # User wants to merge conflicting groups
353 elif self.groups == 'merge':
354 for other_grp in conflicts:
355 logging.info("Merging local and remote '%s' groups", other_grp.name)
356 for node_name in other_grp.members[:]:
357 node = other_config.GetNodeInfo(node_name)
358 # Access to a protected member of a client class
359 # pylint: disable-msg=W0212
360 other_config._UnlockedRemoveNodeFromGroup(node)
362 # Access to a protected member of a client class
363 # pylint: disable-msg=W0212
364 my_grp_uuid = my_config._UnlockedLookupNodeGroup(other_grp.name)
366 # Access to a protected member of a client class
367 # pylint: disable-msg=W0212
368 my_config._UnlockedAddNodeToGroup(node, my_grp_uuid)
369 node.group = my_grp_uuid
370 # Remove from list of groups to add
371 other_grps.remove(other_grp)
373 for grp in other_grps:
374 #TODO: handle node group conflicts
375 my_config.AddNodeGroup(grp, _CLUSTERMERGE_ECID)
377 # R0201: Method could be a function
378 def _StartMasterDaemon(self, no_vote=False): # pylint: disable-msg=R0201
379 """Starts the local master daemon.
381 @param no_vote: Should the masterd started without voting? default: False
382 @raise errors.CommandError: If unable to start daemon.
387 env["EXTRA_MASTERD_ARGS"] = "--no-voting --yes-do-it"
389 result = utils.RunCmd([constants.DAEMON_UTIL, "start-master"], env=env)
391 raise errors.CommandError("Couldn't start ganeti master."
392 " Fail reason: %s; output: %s" %
393 (result.fail_reason, result.output))
395 def _ReaddMergedNodesAndRedist(self):
396 """Readds all merging nodes and make sure their config is up-to-date.
398 @raise errors.CommandError: If anything fails.
401 for data in self.merger_data:
402 for node in data.nodes:
403 result = utils.RunCmd(["gnt-node", "add", "--readd",
404 "--no-ssh-key-check", "--force-join", node])
406 raise errors.CommandError("Couldn't readd node %s. Fail reason: %s;"
407 " output: %s" % (node, result.fail_reason,
410 result = utils.RunCmd(["gnt-cluster", "redist-conf"])
412 raise errors.CommandError("Redistribution failed. Fail reason: %s;"
413 " output: %s" % (result.fail_reason,
416 # R0201: Method could be a function
417 def _StartupAllInstances(self): # pylint: disable-msg=R0201
418 """Starts up all instances (locally).
420 @raise errors.CommandError: If unable to start clusters
423 result = utils.RunCmd(["gnt-instance", "startup", "--all",
426 raise errors.CommandError("Unable to start all instances."
427 " Fail reason: %s; output: %s" %
428 (result.fail_reason, result.output))
430 # R0201: Method could be a function
431 def _VerifyCluster(self): # pylint: disable-msg=R0201
432 """Runs gnt-cluster verify to verify the health.
434 @raise errors.ProgrammError: If cluster fails on verification
437 result = utils.RunCmd(["gnt-cluster", "verify"])
439 raise errors.CommandError("Verification of cluster failed."
440 " Fail reason: %s; output: %s" %
441 (result.fail_reason, result.output))
444 """Does the actual merge.
446 It runs all the steps in the right order and updates the user about steps
447 taken. Also it keeps track of rollback_steps to undo everything.
452 logging.info("Pre cluster verification")
453 self._VerifyCluster()
455 logging.info("Prepare authorized_keys")
456 rbsteps.append("Remove our key from authorized_keys on nodes:"
458 self._PrepareAuthorizedKeys()
460 rbsteps.append("Start all instances again on the merging"
461 " clusters: %(clusters)s")
462 logging.info("Stopping merging instances (takes a while)")
463 self._StopMergingInstances()
465 logging.info("Disable watcher")
466 self._DisableWatcher()
467 logging.info("Stop daemons on merging nodes")
469 logging.info("Merging config")
470 self._FetchRemoteConfig()
472 logging.info("Stopping master daemon")
473 self._KillMasterDaemon()
475 rbsteps.append("Restore %s from another master candidate"
476 " and restart master daemon" %
477 constants.CLUSTER_CONF_FILE)
479 self._StartMasterDaemon(no_vote=True)
481 # Point of no return, delete rbsteps
484 logging.warning("We are at the point of no return. Merge can not easily"
485 " be undone after this point.")
486 logging.info("Readd nodes")
487 self._ReaddMergedNodesAndRedist()
489 logging.info("Merge done, restart master daemon normally")
490 self._KillMasterDaemon()
491 self._StartMasterDaemon()
493 logging.info("Starting instances again")
494 self._StartupAllInstances()
495 logging.info("Post cluster verification")
496 self._VerifyCluster()
497 except errors.GenericError, e:
501 nodes = Flatten([data.nodes for data in self.merger_data])
503 "clusters": self.clusters,
506 logging.critical("In order to rollback do the following:")
508 logging.critical(" * %s", step % info)
510 logging.critical("Nothing to rollback.")
512 # TODO: Keep track of steps done for a flawless resume?
515 """Clean up our environment.
517 This cleans up remote private keys and configs and after that
518 deletes the temporary directory.
521 shutil.rmtree(self.work_dir)
524 def SetupLogging(options):
525 """Setting up logging infrastructure.
527 @param options: Parsed command line options
530 formatter = logging.Formatter("%(asctime)s: %(levelname)s %(message)s")
532 stderr_handler = logging.StreamHandler()
533 stderr_handler.setFormatter(formatter)
535 stderr_handler.setLevel(logging.NOTSET)
536 elif options.verbose:
537 stderr_handler.setLevel(logging.INFO)
539 stderr_handler.setLevel(logging.ERROR)
541 root_logger = logging.getLogger("")
542 root_logger.setLevel(logging.NOTSET)
543 root_logger.addHandler(stderr_handler)
550 program = os.path.basename(sys.argv[0])
552 parser = optparse.OptionParser(usage=("%%prog [--debug|--verbose]"
553 " [--watcher-pause-period SECONDS]"
554 " [--groups [%s|%s]]"
555 " <cluster> [<cluster...>]" %
556 (_GROUPS_MERGE, _GROUPS_RENAME)),
558 parser.add_option(cli.DEBUG_OPT)
559 parser.add_option(cli.VERBOSE_OPT)
560 parser.add_option(PAUSE_PERIOD_OPT)
561 parser.add_option(GROUPS_OPT)
563 (options, args) = parser.parse_args()
565 SetupLogging(options)
568 parser.error("No clusters specified")
570 cluster_merger = Merger(utils.UniqueSequence(args), options.pause_period,
574 cluster_merger.Setup()
575 cluster_merger.Merge()
576 except errors.GenericError, e:
578 return constants.EXIT_FAILURE
580 cluster_merger.Cleanup()
582 return constants.EXIT_SUCCESS
585 if __name__ == "__main__":