4 # Copyright (C) 2010, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 """Tool to merge two or more clusters together.
23 The clusters have to run the same version of Ganeti!
27 # pylint: disable=C0103
28 # C0103: Invalid name cluster-merge
37 from ganeti import cli
38 from ganeti import config
39 from ganeti import constants
40 from ganeti import errors
41 from ganeti import ssh
42 from ganeti import utils
43 from ganeti import pathutils
46 _GROUPS_MERGE = "merge"
47 _GROUPS_RENAME = "rename"
48 _CLUSTERMERGE_ECID = "clustermerge-ecid"
51 _RESTART_NONE = "none"
52 _RESTART_CHOICES = (_RESTART_ALL, _RESTART_UP, _RESTART_NONE)
53 _PARAMS_STRICT = "strict"
55 _PARAMS_CHOICES = (_PARAMS_STRICT, _PARAMS_WARN)
58 PAUSE_PERIOD_OPT = cli.cli_option("-p", "--watcher-pause-period", default=1800,
59 action="store", type="int",
61 help=("Amount of time in seconds watcher"
62 " should be suspended from running"))
63 GROUPS_OPT = cli.cli_option("--groups", default=None, metavar="STRATEGY",
64 choices=(_GROUPS_MERGE, _GROUPS_RENAME),
66 help=("How to handle groups that have the"
67 " same name (One of: %s/%s)" %
68 (_GROUPS_MERGE, _GROUPS_RENAME)))
69 PARAMS_OPT = cli.cli_option("--parameter-conflicts", default=_PARAMS_STRICT,
71 choices=_PARAMS_CHOICES,
73 help=("How to handle params that have"
74 " different values (One of: %s/%s)" %
77 RESTART_OPT = cli.cli_option("--restart", default=_RESTART_ALL,
79 choices=_RESTART_CHOICES,
81 help=("How to handle restarting instances"
82 " same name (One of: %s/%s/%s)" %
85 SKIP_STOP_INSTANCES_OPT = \
86 cli.cli_option("--skip-stop-instances", default=True, action="store_false",
87 dest="stop_instances",
88 help=("Don't stop the instances on the clusters, just check "
89 "that none is running"))
92 def Flatten(unflattened_list):
95 @param unflattened_list: A list of unflattened list objects.
96 @return: A flattened list
101 for item in unflattened_list:
102 if isinstance(item, list):
103 flattened_list.extend(Flatten(item))
105 flattened_list.append(item)
106 return flattened_list
109 class MergerData(object):
110 """Container class to hold data used for merger.
113 def __init__(self, cluster, key_path, nodes, instances, master_node,
115 """Initialize the container.
117 @param cluster: The name of the cluster
118 @param key_path: Path to the ssh private key used for authentication
119 @param nodes: List of online nodes in the merging cluster
120 @param instances: List of instances running on merging cluster
121 @param master_node: Name of the master node
122 @param config_path: Path to the merging cluster config
125 self.cluster = cluster
126 self.key_path = key_path
128 self.instances = instances
129 self.master_node = master_node
130 self.config_path = config_path
133 class Merger(object):
134 """Handling the merge.
137 RUNNING_STATUSES = frozenset([
138 constants.INSTST_RUNNING,
139 constants.INSTST_ERRORUP,
142 def __init__(self, clusters, pause_period, groups, restart, params,
144 """Initialize object with sane defaults and infos required.
146 @param clusters: The list of clusters to merge in
147 @param pause_period: The time watcher shall be disabled for
148 @param groups: How to handle group conflicts
149 @param restart: How to handle instance restart
150 @param stop_instances: Indicates whether the instances must be stopped
151 (True) or if the Merger must only check if no
152 instances are running on the mergee clusters (False)
155 self.merger_data = []
156 self.clusters = clusters
157 self.pause_period = pause_period
158 self.work_dir = tempfile.mkdtemp(suffix="cluster-merger")
159 (self.cluster_name, ) = cli.GetClient().QueryConfigValues(["cluster_name"])
160 self.ssh_runner = ssh.SshRunner(self.cluster_name)
162 self.restart = restart
164 self.stop_instances = stop_instances
165 if self.restart == _RESTART_UP:
166 raise NotImplementedError
169 """Sets up our end so we can do the merger.
171 This method is setting us up as a preparation for the merger.
172 It makes the initial contact and gathers information needed.
174 @raise errors.RemoteError: for errors in communication/grabbing
177 (remote_path, _, _) = ssh.GetUserFiles("root")
179 if self.cluster_name in self.clusters:
180 raise errors.CommandError("Cannot merge cluster %s with itself" %
183 # Fetch remotes private key
184 for cluster in self.clusters:
185 result = self._RunCmd(cluster, "cat %s" % remote_path, batch=False,
188 raise errors.RemoteError("There was an error while grabbing ssh private"
189 " key from %s. Fail reason: %s; output: %s" %
190 (cluster, result.fail_reason, result.output))
192 key_path = utils.PathJoin(self.work_dir, cluster)
193 utils.WriteFile(key_path, mode=0600, data=result.stdout)
195 result = self._RunCmd(cluster, "gnt-node list -o name,offline"
196 " --no-headers --separator=,", private_key=key_path)
198 raise errors.RemoteError("Unable to retrieve list of nodes from %s."
199 " Fail reason: %s; output: %s" %
200 (cluster, result.fail_reason, result.output))
201 nodes_statuses = [line.split(",") for line in result.stdout.splitlines()]
202 nodes = [node_status[0] for node_status in nodes_statuses
203 if node_status[1] == "N"]
205 result = self._RunCmd(cluster, "gnt-instance list -o name --no-headers",
206 private_key=key_path)
208 raise errors.RemoteError("Unable to retrieve list of instances from"
209 " %s. Fail reason: %s; output: %s" %
210 (cluster, result.fail_reason, result.output))
211 instances = result.stdout.splitlines()
213 path = utils.PathJoin(pathutils.DATA_DIR, "ssconf_%s" %
214 constants.SS_MASTER_NODE)
215 result = self._RunCmd(cluster, "cat %s" % path, private_key=key_path)
217 raise errors.RemoteError("Unable to retrieve the master node name from"
218 " %s. Fail reason: %s; output: %s" %
219 (cluster, result.fail_reason, result.output))
220 master_node = result.stdout.strip()
222 self.merger_data.append(MergerData(cluster, key_path, nodes, instances,
225 def _PrepareAuthorizedKeys(self):
226 """Prepare the authorized_keys on every merging node.
228 This method add our public key to remotes authorized_key for further
232 (_, pub_key_file, auth_keys) = ssh.GetUserFiles("root")
233 pub_key = utils.ReadFile(pub_key_file)
235 for data in self.merger_data:
236 for node in data.nodes:
237 result = self._RunCmd(node, ("cat >> %s << '!EOF.'\n%s!EOF.\n" %
238 (auth_keys, pub_key)),
239 private_key=data.key_path, max_attempts=3)
242 raise errors.RemoteError("Unable to add our public key to %s in %s."
243 " Fail reason: %s; output: %s" %
244 (node, data.cluster, result.fail_reason,
247 def _RunCmd(self, hostname, command, user="root", use_cluster_key=False,
248 strict_host_check=False, private_key=None, batch=True,
249 ask_key=False, max_attempts=1):
250 """Wrapping SshRunner.Run with default parameters.
252 For explanation of parameters see L{ganeti.ssh.SshRunner.Run}.
255 for _ in range(max_attempts):
256 result = self.ssh_runner.Run(hostname=hostname, command=command,
257 user=user, use_cluster_key=use_cluster_key,
258 strict_host_check=strict_host_check,
259 private_key=private_key, batch=batch,
261 if not result.failed:
266 def _CheckRunningInstances(self):
267 """Checks if on the clusters to be merged there are running instances
270 @return: True if there are running instances, False otherwise
273 for cluster in self.clusters:
274 result = self._RunCmd(cluster, "gnt-instance list -o status")
275 if self.RUNNING_STATUSES.intersection(result.output.splitlines()):
280 def _StopMergingInstances(self):
281 """Stop instances on merging clusters.
284 for cluster in self.clusters:
285 result = self._RunCmd(cluster, "gnt-instance shutdown --all"
289 raise errors.RemoteError("Unable to stop instances on %s."
290 " Fail reason: %s; output: %s" %
291 (cluster, result.fail_reason, result.output))
293 def _DisableWatcher(self):
294 """Disable watch on all merging clusters, including ourself.
297 for cluster in ["localhost"] + self.clusters:
298 result = self._RunCmd(cluster, "gnt-cluster watcher pause %d" %
302 raise errors.RemoteError("Unable to pause watcher on %s."
303 " Fail reason: %s; output: %s" %
304 (cluster, result.fail_reason, result.output))
306 def _RemoveMasterIps(self):
307 """Removes the master IPs from the master nodes of each cluster.
310 for data in self.merger_data:
311 result = self._RunCmd(data.master_node,
312 "gnt-cluster deactivate-master-ip --yes")
315 raise errors.RemoteError("Unable to remove master IP on %s."
316 " Fail reason: %s; output: %s" %
321 def _StopDaemons(self):
322 """Stop all daemons on merging nodes.
325 cmd = "%s stop-all" % pathutils.DAEMON_UTIL
326 for data in self.merger_data:
327 for node in data.nodes:
328 result = self._RunCmd(node, cmd, max_attempts=3)
331 raise errors.RemoteError("Unable to stop daemons on %s."
332 " Fail reason: %s; output: %s." %
333 (node, result.fail_reason, result.output))
335 def _FetchRemoteConfig(self):
336 """Fetches and stores remote cluster config from the master.
338 This step is needed before we can merge the config.
341 for data in self.merger_data:
342 result = self._RunCmd(data.cluster, "cat %s" %
343 pathutils.CLUSTER_CONF_FILE)
346 raise errors.RemoteError("Unable to retrieve remote config on %s."
347 " Fail reason: %s; output %s" %
348 (data.cluster, result.fail_reason,
351 data.config_path = utils.PathJoin(self.work_dir, "%s_config.data" %
353 utils.WriteFile(data.config_path, data=result.stdout)
355 # R0201: Method could be a function
356 def _KillMasterDaemon(self): # pylint: disable=R0201
357 """Kills the local master daemon.
359 @raise errors.CommandError: If unable to kill
362 result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-master"])
364 raise errors.CommandError("Unable to stop master daemons."
365 " Fail reason: %s; output: %s" %
366 (result.fail_reason, result.output))
368 def _MergeConfig(self):
369 """Merges all foreign config into our own config.
372 my_config = config.ConfigWriter(offline=True)
373 fake_ec_id = 0 # Needs to be uniq over the whole config merge
375 for data in self.merger_data:
376 other_config = config.ConfigWriter(data.config_path, accept_foreign=True)
377 self._MergeClusterConfigs(my_config, other_config)
378 self._MergeNodeGroups(my_config, other_config)
380 for node in other_config.GetNodeList():
381 node_info = other_config.GetNodeInfo(node)
382 # Offline the node, it will be reonlined later at node readd
383 node_info.master_candidate = False
384 node_info.drained = False
385 node_info.offline = True
386 my_config.AddNode(node_info, _CLUSTERMERGE_ECID + str(fake_ec_id))
389 for instance in other_config.GetInstanceList():
390 instance_info = other_config.GetInstanceInfo(instance)
392 # Update the DRBD port assignments
393 # This is a little bit hackish
394 for dsk in instance_info.disks:
395 if dsk.dev_type in constants.LDS_DRBD:
396 port = my_config.AllocatePort()
398 logical_id = list(dsk.logical_id)
400 dsk.logical_id = tuple(logical_id)
402 physical_id = list(dsk.physical_id)
403 physical_id[1] = physical_id[3] = port
404 dsk.physical_id = tuple(physical_id)
406 my_config.AddInstance(instance_info,
407 _CLUSTERMERGE_ECID + str(fake_ec_id))
410 def _MergeClusterConfigs(self, my_config, other_config):
411 """Checks that all relevant cluster parameters are compatible
414 my_cluster = my_config.GetClusterInfo()
415 other_cluster = other_config.GetClusterInfo()
423 "default_iallocator",
424 "drbd_usermode_helper",
426 "maintain_node_health",
434 check_params_strict = [
437 if constants.ENABLE_FILE_STORAGE:
438 check_params_strict.append("file_storage_dir")
439 if constants.ENABLE_SHARED_FILE_STORAGE:
440 check_params_strict.append("shared_file_storage_dir")
441 check_params.extend(check_params_strict)
443 if self.params == _PARAMS_STRICT:
446 params_strict = False
448 for param_name in check_params:
449 my_param = getattr(my_cluster, param_name)
450 other_param = getattr(other_cluster, param_name)
451 if my_param != other_param:
452 logging.error("The value (%s) of the cluster parameter %s on %s"
453 " differs to this cluster's value (%s)",
454 other_param, param_name, other_cluster.cluster_name,
456 if params_strict or param_name in check_params_strict:
463 # Check default hypervisor
464 my_defhyp = my_cluster.enabled_hypervisors[0]
465 other_defhyp = other_cluster.enabled_hypervisors[0]
466 if my_defhyp != other_defhyp:
467 logging.warning("The default hypervisor (%s) differs on %s, new"
468 " instances will be created with this cluster's"
469 " default hypervisor (%s)", other_defhyp,
470 other_cluster.cluster_name, my_defhyp)
472 if (set(my_cluster.enabled_hypervisors) !=
473 set(other_cluster.enabled_hypervisors)):
474 logging.error("The set of enabled hypervisors (%s) on %s differs to"
475 " this cluster's set (%s)",
476 other_cluster.enabled_hypervisors,
477 other_cluster.cluster_name, my_cluster.enabled_hypervisors)
480 # Check hypervisor params for hypervisors we care about
481 for hyp in my_cluster.enabled_hypervisors:
482 for param in my_cluster.hvparams[hyp]:
483 my_value = my_cluster.hvparams[hyp][param]
484 other_value = other_cluster.hvparams[hyp][param]
485 if my_value != other_value:
486 logging.error("The value (%s) of the %s parameter of the %s"
487 " hypervisor on %s differs to this cluster's parameter"
489 other_value, param, hyp, other_cluster.cluster_name,
494 # Check os hypervisor params for hypervisors we care about
495 for os_name in set(my_cluster.os_hvp.keys() + other_cluster.os_hvp.keys()):
496 for hyp in my_cluster.enabled_hypervisors:
497 my_os_hvp = self._GetOsHypervisor(my_cluster, os_name, hyp)
498 other_os_hvp = self._GetOsHypervisor(other_cluster, os_name, hyp)
499 if my_os_hvp != other_os_hvp:
500 logging.error("The OS parameters (%s) for the %s OS for the %s"
501 " hypervisor on %s differs to this cluster's parameters"
503 other_os_hvp, os_name, hyp, other_cluster.cluster_name,
511 if my_cluster.modify_etc_hosts != other_cluster.modify_etc_hosts:
512 logging.warning("The modify_etc_hosts value (%s) differs on %s,"
513 " this cluster's value (%s) will take precedence",
514 other_cluster.modify_etc_hosts,
515 other_cluster.cluster_name,
516 my_cluster.modify_etc_hosts)
518 if my_cluster.modify_ssh_setup != other_cluster.modify_ssh_setup:
519 logging.warning("The modify_ssh_setup value (%s) differs on %s,"
520 " this cluster's value (%s) will take precedence",
521 other_cluster.modify_ssh_setup,
522 other_cluster.cluster_name,
523 my_cluster.modify_ssh_setup)
528 my_cluster.reserved_lvs = list(set(my_cluster.reserved_lvs +
529 other_cluster.reserved_lvs))
531 if my_cluster.prealloc_wipe_disks != other_cluster.prealloc_wipe_disks:
532 logging.warning("The prealloc_wipe_disks value (%s) on %s differs to this"
533 " cluster's value (%s). The least permissive value (%s)"
534 " will be used", other_cluster.prealloc_wipe_disks,
535 other_cluster.cluster_name,
536 my_cluster.prealloc_wipe_disks, True)
537 my_cluster.prealloc_wipe_disks = True
539 for os_, osparams in other_cluster.osparams.items():
540 if os_ not in my_cluster.osparams:
541 my_cluster.osparams[os_] = osparams
542 elif my_cluster.osparams[os_] != osparams:
543 logging.error("The OS parameters (%s) for the %s OS on %s differs to"
544 " this cluster's parameters (%s)",
545 osparams, os_, other_cluster.cluster_name,
546 my_cluster.osparams[os_])
551 raise errors.ConfigurationError("Cluster config for %s has incompatible"
552 " values, please fix and re-run" %
553 other_cluster.cluster_name)
555 # R0201: Method could be a function
556 def _GetOsHypervisor(self, cluster, os_name, hyp): # pylint: disable=R0201
557 if os_name in cluster.os_hvp:
558 return cluster.os_hvp[os_name].get(hyp, None)
562 # R0201: Method could be a function
563 def _MergeNodeGroups(self, my_config, other_config):
564 """Adds foreign node groups
566 ConfigWriter.AddNodeGroup takes care of making sure there are no conflicts.
568 # pylint: disable=R0201
569 logging.info("Node group conflict strategy: %s", self.groups)
571 my_grps = my_config.GetAllNodeGroupsInfo().values()
572 other_grps = other_config.GetAllNodeGroupsInfo().values()
574 # Check for node group naming conflicts:
576 for other_grp in other_grps:
577 for my_grp in my_grps:
578 if other_grp.name == my_grp.name:
579 conflicts.append(other_grp)
582 conflict_names = utils.CommaJoin([g.name for g in conflicts])
583 logging.info("Node groups in both local and remote cluster: %s",
586 # User hasn't specified how to handle conflicts
588 raise errors.CommandError("The following node group(s) are in both"
589 " clusters, and no merge strategy has been"
590 " supplied (see the --groups option): %s" %
593 # User wants to rename conflicts
594 elif self.groups == _GROUPS_RENAME:
595 for grp in conflicts:
596 new_name = "%s-%s" % (grp.name, other_config.GetClusterName())
597 logging.info("Renaming remote node group from %s to %s"
598 " to resolve conflict", grp.name, new_name)
601 # User wants to merge conflicting groups
602 elif self.groups == _GROUPS_MERGE:
603 for other_grp in conflicts:
604 logging.info("Merging local and remote '%s' groups", other_grp.name)
605 for node_name in other_grp.members[:]:
606 node = other_config.GetNodeInfo(node_name)
607 # Access to a protected member of a client class
608 # pylint: disable=W0212
609 other_config._UnlockedRemoveNodeFromGroup(node)
611 # Access to a protected member of a client class
612 # pylint: disable=W0212
613 my_grp_uuid = my_config._UnlockedLookupNodeGroup(other_grp.name)
615 # Access to a protected member of a client class
616 # pylint: disable=W0212
617 my_config._UnlockedAddNodeToGroup(node, my_grp_uuid)
618 node.group = my_grp_uuid
619 # Remove from list of groups to add
620 other_grps.remove(other_grp)
622 for grp in other_grps:
623 #TODO: handle node group conflicts
624 my_config.AddNodeGroup(grp, _CLUSTERMERGE_ECID)
626 # R0201: Method could be a function
627 def _StartMasterDaemon(self, no_vote=False): # pylint: disable=R0201
628 """Starts the local master daemon.
630 @param no_vote: Should the masterd started without voting? default: False
631 @raise errors.CommandError: If unable to start daemon.
636 env["EXTRA_MASTERD_ARGS"] = "--no-voting --yes-do-it"
638 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-master"], env=env)
640 raise errors.CommandError("Couldn't start ganeti master."
641 " Fail reason: %s; output: %s" %
642 (result.fail_reason, result.output))
644 def _ReaddMergedNodesAndRedist(self):
645 """Readds all merging nodes and make sure their config is up-to-date.
647 @raise errors.CommandError: If anything fails.
650 for data in self.merger_data:
651 for node in data.nodes:
652 logging.info("Readding node %s", node)
653 result = utils.RunCmd(["gnt-node", "add", "--readd",
654 "--no-ssh-key-check", node])
656 logging.error("%s failed to be readded. Reason: %s, output: %s",
657 node, result.fail_reason, result.output)
659 result = utils.RunCmd(["gnt-cluster", "redist-conf"])
661 raise errors.CommandError("Redistribution failed. Fail reason: %s;"
662 " output: %s" % (result.fail_reason,
665 # R0201: Method could be a function
666 def _StartupAllInstances(self): # pylint: disable=R0201
667 """Starts up all instances (locally).
669 @raise errors.CommandError: If unable to start clusters
672 result = utils.RunCmd(["gnt-instance", "startup", "--all",
675 raise errors.CommandError("Unable to start all instances."
676 " Fail reason: %s; output: %s" %
677 (result.fail_reason, result.output))
679 # R0201: Method could be a function
680 # TODO: make this overridable, for some verify errors
681 def _VerifyCluster(self): # pylint: disable=R0201
682 """Runs gnt-cluster verify to verify the health.
684 @raise errors.ProgrammError: If cluster fails on verification
687 result = utils.RunCmd(["gnt-cluster", "verify"])
689 raise errors.CommandError("Verification of cluster failed."
690 " Fail reason: %s; output: %s" %
691 (result.fail_reason, result.output))
694 """Does the actual merge.
696 It runs all the steps in the right order and updates the user about steps
697 taken. Also it keeps track of rollback_steps to undo everything.
702 logging.info("Pre cluster verification")
703 self._VerifyCluster()
705 logging.info("Prepare authorized_keys")
706 rbsteps.append("Remove our key from authorized_keys on nodes:"
708 self._PrepareAuthorizedKeys()
710 rbsteps.append("Start all instances again on the merging"
711 " clusters: %(clusters)s")
712 if self.stop_instances:
713 logging.info("Stopping merging instances (takes a while)")
714 self._StopMergingInstances()
715 logging.info("Checking that no instances are running on the mergees")
716 instances_running = self._CheckRunningInstances()
717 if instances_running:
718 raise errors.CommandError("Some instances are still running on the"
720 logging.info("Disable watcher")
721 self._DisableWatcher()
722 logging.info("Merging config")
723 self._FetchRemoteConfig()
724 logging.info("Removing master IPs on mergee master nodes")
725 self._RemoveMasterIps()
726 logging.info("Stop daemons on merging nodes")
729 logging.info("Stopping master daemon")
730 self._KillMasterDaemon()
732 rbsteps.append("Restore %s from another master candidate"
733 " and restart master daemon" %
734 pathutils.CLUSTER_CONF_FILE)
736 self._StartMasterDaemon(no_vote=True)
738 # Point of no return, delete rbsteps
741 logging.warning("We are at the point of no return. Merge can not easily"
742 " be undone after this point.")
743 logging.info("Readd nodes")
744 self._ReaddMergedNodesAndRedist()
746 logging.info("Merge done, restart master daemon normally")
747 self._KillMasterDaemon()
748 self._StartMasterDaemon()
750 if self.restart == _RESTART_ALL:
751 logging.info("Starting instances again")
752 self._StartupAllInstances()
754 logging.info("Not starting instances again")
755 logging.info("Post cluster verification")
756 self._VerifyCluster()
757 except errors.GenericError, e:
761 nodes = Flatten([data.nodes for data in self.merger_data])
763 "clusters": self.clusters,
766 logging.critical("In order to rollback do the following:")
768 logging.critical(" * %s", step % info)
770 logging.critical("Nothing to rollback.")
772 # TODO: Keep track of steps done for a flawless resume?
775 """Clean up our environment.
777 This cleans up remote private keys and configs and after that
778 deletes the temporary directory.
781 shutil.rmtree(self.work_dir)
784 def SetupLogging(options):
785 """Setting up logging infrastructure.
787 @param options: Parsed command line options
790 formatter = logging.Formatter("%(asctime)s: %(levelname)s %(message)s")
792 stderr_handler = logging.StreamHandler()
793 stderr_handler.setFormatter(formatter)
795 stderr_handler.setLevel(logging.NOTSET)
796 elif options.verbose:
797 stderr_handler.setLevel(logging.INFO)
799 stderr_handler.setLevel(logging.WARNING)
801 root_logger = logging.getLogger("")
802 root_logger.setLevel(logging.NOTSET)
803 root_logger.addHandler(stderr_handler)
810 program = os.path.basename(sys.argv[0])
812 parser = optparse.OptionParser(usage="%%prog [options...] <cluster...>",
814 parser.add_option(cli.DEBUG_OPT)
815 parser.add_option(cli.VERBOSE_OPT)
816 parser.add_option(PAUSE_PERIOD_OPT)
817 parser.add_option(GROUPS_OPT)
818 parser.add_option(RESTART_OPT)
819 parser.add_option(PARAMS_OPT)
820 parser.add_option(SKIP_STOP_INSTANCES_OPT)
822 (options, args) = parser.parse_args()
824 SetupLogging(options)
827 parser.error("No clusters specified")
829 cluster_merger = Merger(utils.UniqueSequence(args), options.pause_period,
830 options.groups, options.restart, options.params,
831 options.stop_instances)
834 cluster_merger.Setup()
835 cluster_merger.Merge()
836 except errors.GenericError, e:
838 return constants.EXIT_FAILURE
840 cluster_merger.Cleanup()
842 return constants.EXIT_SUCCESS
845 if __name__ == "__main__":