4 # Copyright (C) 2006, 2007 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
32 from itertools import izip, islice, cycle
33 from cStringIO import StringIO
35 from ganeti import opcodes
36 from ganeti import constants
37 from ganeti import cli
38 from ganeti import errors
39 from ganeti import utils
42 USAGE = ("\tburnin -o OS_NAME [options...] instance_name ...")
46 class InstanceDown(Exception):
47 """The checked instance was not up"""
50 class BurninFailure(Exception):
51 """Failure detected during burning"""
55 """Shows program usage information and exits the program."""
57 print >> sys.stderr, "Usage:"
58 print >> sys.stderr, USAGE
62 def Log(msg, indent=0):
63 """Simple function that prints out its argument.
71 sys.stdout.write("%*s%s%s\n" % (2*indent, "",
72 headers.get(indent, " "), msg))
75 def Err(msg, exit_code=1):
76 """Simple error logging that prints to stderr.
79 sys.stderr.write(msg + "\n")
84 class SimpleOpener(urllib.FancyURLopener):
85 """A simple url opener"""
87 def prompt_user_passwd(self, host, realm, clear_cache = 0):
88 """No-interaction version of prompt_user_passwd."""
91 def http_error_default(self, url, fp, errcode, errmsg, headers):
92 """Custom error handling"""
93 # make sure sockets are not left in CLOSE_WAIT, this is similar
94 # but with a different exception to the BasicURLOpener class
95 _ = fp.read() # throw away data
97 raise InstanceDown("HTTP error returned: code %s, msg %s" %
102 cli.cli_option("-o", "--os", dest="os", default=None,
103 help="OS to use during burnin",
105 completion_suggest=cli.OPT_COMPL_ONE_OS),
106 cli.cli_option("--disk-size", dest="disk_size",
107 help="Disk size (determines disk count)",
108 default="128m", type="string", metavar="<size,size,...>",
109 completion_suggest=("128M 512M 1G 4G 1G,256M"
110 " 4G,1G,1G 10G").split()),
111 cli.cli_option("--disk-growth", dest="disk_growth", help="Disk growth",
112 default="128m", type="string", metavar="<size,size,...>"),
113 cli.cli_option("--mem-size", dest="mem_size", help="Memory size",
114 default=128, type="unit", metavar="<size>",
115 completion_suggest=("128M 256M 512M 1G 4G 8G"
116 " 12G 16G").split()),
118 cli.cli_option("--no-replace1", dest="do_replace1",
119 help="Skip disk replacement with the same secondary",
120 action="store_false", default=True),
121 cli.cli_option("--no-replace2", dest="do_replace2",
122 help="Skip disk replacement with a different secondary",
123 action="store_false", default=True),
124 cli.cli_option("--no-failover", dest="do_failover",
125 help="Skip instance failovers", action="store_false",
127 cli.cli_option("--no-migrate", dest="do_migrate",
128 help="Skip instance live migration",
129 action="store_false", default=True),
130 cli.cli_option("--no-move", dest="do_move",
131 help="Skip instance moves", action="store_false",
133 cli.cli_option("--no-importexport", dest="do_importexport",
134 help="Skip instance export/import", action="store_false",
136 cli.cli_option("--no-startstop", dest="do_startstop",
137 help="Skip instance stop/start", action="store_false",
139 cli.cli_option("--no-reinstall", dest="do_reinstall",
140 help="Skip instance reinstall", action="store_false",
142 cli.cli_option("--no-reboot", dest="do_reboot",
143 help="Skip instance reboot", action="store_false",
145 cli.cli_option("--no-activate-disks", dest="do_activate_disks",
146 help="Skip disk activation/deactivation",
147 action="store_false", default=True),
148 cli.cli_option("--no-add-disks", dest="do_addremove_disks",
149 help="Skip disk addition/removal",
150 action="store_false", default=True),
151 cli.cli_option("--no-add-nics", dest="do_addremove_nics",
152 help="Skip NIC addition/removal",
153 action="store_false", default=True),
154 cli.cli_option("--no-nics", dest="nics",
155 help="No network interfaces", action="store_const",
156 const=[], default=[{}]),
157 cli.cli_option("--rename", dest="rename", default=None,
158 help=("Give one unused instance name which is taken"
159 " to start the renaming sequence"),
160 metavar="<instance_name>"),
161 cli.cli_option("-t", "--disk-template", dest="disk_template",
162 choices=list(constants.DISK_TEMPLATES),
163 default=constants.DT_DRBD8,
164 help="Disk template (diskless, file, plain or drbd) [drbd]"),
165 cli.cli_option("-n", "--nodes", dest="nodes", default="",
166 help=("Comma separated list of nodes to perform"
167 " the burnin on (defaults to all nodes)"),
168 completion_suggest=cli.OPT_COMPL_MANY_NODES),
169 cli.cli_option("-I", "--iallocator", dest="iallocator",
170 default=None, type="string",
171 help=("Perform the allocation using an iallocator"
172 " instead of fixed node spread (node restrictions no"
173 " longer apply, therefore -n/--nodes must not be"
175 completion_suggest=cli.OPT_COMPL_ONE_IALLOCATOR),
176 cli.cli_option("-p", "--parallel", default=False, action="store_true",
178 help=("Enable parallelization of some operations in"
179 " order to speed burnin or to test granular locking")),
180 cli.cli_option("--net-timeout", default=15, type="int",
182 help=("The instance check network timeout in seconds"
183 " (defaults to 15 seconds)"),
184 completion_suggest="15 60 300 900".split()),
185 cli.cli_option("-C", "--http-check", default=False, action="store_true",
187 help=("Enable checking of instance status via http,"
188 " looking for /hostname.txt that should contain the"
189 " name of the instance")),
190 cli.cli_option("-K", "--keep-instances", default=False,
192 dest="keep_instances",
193 help=("Leave instances on the cluster after burnin,"
194 " for investigation in case of errors or simply"
198 # Mainly used for bash completion
199 ARGUMENTS = [cli.ArgInstance(min=1)]
202 class Burner(object):
207 utils.SetupLogging(constants.LOG_BURNIN, debug=False, stderr_logging=True)
208 self.url_opener = SimpleOpener()
209 self._feed_buf = StringIO()
215 self.queue_retry = False
216 self.disk_count = self.disk_growth = self.disk_size = None
217 self.hvp = self.bep = None
219 self.cl = cli.GetClient()
222 def ClearFeedbackBuf(self):
223 """Clear the feedback buffer."""
224 self._feed_buf.truncate(0)
226 def GetFeedbackBuf(self):
227 """Return the contents of the buffer."""
228 return self._feed_buf.getvalue()
230 def Feedback(self, msg):
231 """Acumulate feedback in our buffer."""
232 formatted_msg = "%s %s" % (time.ctime(utils.MergeTime(msg[0])), msg[2])
233 self._feed_buf.write(formatted_msg + "\n")
234 if self.opts.verbose:
235 Log(formatted_msg, indent=3)
237 def MaybeRetry(self, retry_count, msg, fn, *args):
238 """Possibly retry a given function execution.
240 @type retry_count: int
241 @param retry_count: retry counter:
242 - 0: non-retryable action
243 - 1: last retry for a retryable action
244 - MAX_RETRIES: original try for a retryable action
246 @param msg: the kind of the operation
248 @param fn: the function to be called
253 if retry_count > 0 and retry_count < MAX_RETRIES:
254 Log("Idempotent %s succeeded after %d retries" %
255 (msg, MAX_RETRIES - retry_count))
257 except Exception, err:
259 Log("Non-idempotent %s failed, aborting" % (msg, ))
261 elif retry_count == 1:
262 Log("Idempotent %s repeated failure, aborting" % (msg, ))
265 Log("Idempotent %s failed, retry #%d/%d: %s" %
266 (msg, MAX_RETRIES - retry_count + 1, MAX_RETRIES, err))
267 self.MaybeRetry(retry_count - 1, msg, fn, *args)
269 def _ExecOp(self, *ops):
270 """Execute one or more opcodes and manage the exec buffer.
272 @result: if only opcode has been passed, we return its result;
273 otherwise we return the list of results
276 job_id = cli.SendJob(ops, cl=self.cl)
277 results = cli.PollJob(job_id, cl=self.cl, feedback_fn=self.Feedback)
283 def ExecOp(self, retry, *ops):
284 """Execute one or more opcodes and manage the exec buffer.
286 @result: if only opcode has been passed, we return its result;
287 otherwise we return the list of results
294 return self.MaybeRetry(rval, "opcode", self._ExecOp, *ops)
296 def ExecOrQueue(self, name, *ops):
297 """Execute an opcode and manage the exec buffer."""
298 if self.opts.parallel:
299 self.queued_ops.append((ops, name))
301 return self.ExecOp(self.queue_retry, *ops)
303 def StartBatch(self, retry):
304 """Start a new batch of jobs.
306 @param retry: whether this is a retryable batch
310 self.queue_retry = retry
312 def CommitQueue(self):
313 """Execute all submitted opcodes in case of parallel burnin"""
314 if not self.opts.parallel:
323 results = self.MaybeRetry(rval, "jobset", self.ExecJobSet,
329 def ExecJobSet(self, jobs):
330 """Execute a set of jobs and return once all are done.
332 The method will return the list of results, if all jobs are
333 successful. Otherwise, OpExecError will be raised from within
337 self.ClearFeedbackBuf()
338 job_ids = [cli.SendJob(row[0], cl=self.cl) for row in jobs]
339 Log("Submitted job ID(s) %s" % ", ".join(job_ids), indent=1)
341 for jid, (_, iname) in zip(job_ids, jobs):
342 Log("waiting for job %s for %s" % (jid, iname), indent=2)
344 results.append(cli.PollJob(jid, cl=self.cl, feedback_fn=self.Feedback))
345 except Exception, err:
346 Log("Job for %s failed: %s" % (iname, err))
347 if len(results) != len(jobs):
348 raise BurninFailure()
351 def _DoCheckInstances(fn):
352 """Decorator for checking instances.
355 def wrapper(self, *args, **kwargs):
356 val = fn(self, *args, **kwargs)
357 for instance in self.instances:
358 self._CheckInstanceAlive(instance)
364 """Decorator for possible batch operations.
366 Must come after the _DoCheckInstances decorator (if any).
368 @param retry: whether this is a retryable batch, will be
373 def batched(self, *args, **kwargs):
374 self.StartBatch(retry)
375 val = fn(self, *args, **kwargs)
382 def ParseOptions(self):
383 """Parses the command line options.
385 In case of command line errors, it will show the usage and exit the
389 parser = optparse.OptionParser(usage="\n%s" % USAGE,
390 version=("%%prog (ganeti) %s" %
391 constants.RELEASE_VERSION),
394 options, args = parser.parse_args()
395 if len(args) < 1 or options.os is None:
398 supported_disk_templates = (constants.DT_DISKLESS,
402 if options.disk_template not in supported_disk_templates:
403 Err("Unknown disk template '%s'" % options.disk_template)
405 if options.disk_template == constants.DT_DISKLESS:
406 disk_size = disk_growth = []
407 options.do_addremove_disks = False
409 disk_size = [utils.ParseUnit(v) for v in options.disk_size.split(",")]
410 disk_growth = [utils.ParseUnit(v)
411 for v in options.disk_growth.split(",")]
412 if len(disk_growth) != len(disk_size):
413 Err("Wrong disk sizes/growth combination")
414 if ((disk_size and options.disk_template == constants.DT_DISKLESS) or
415 (not disk_size and options.disk_template != constants.DT_DISKLESS)):
416 Err("Wrong disk count/disk template combination")
418 self.disk_size = disk_size
419 self.disk_growth = disk_growth
420 self.disk_count = len(disk_size)
422 if options.nodes and options.iallocator:
423 Err("Give either the nodes option or the iallocator option, not both")
426 self.instances = args
428 constants.BE_MEMORY: options.mem_size,
429 constants.BE_VCPUS: 1,
433 socket.setdefaulttimeout(options.net_timeout)
436 """Read the cluster state from the config."""
438 names = self.opts.nodes.split(",")
442 op = opcodes.OpQueryNodes(output_fields=["name", "offline", "drained"],
443 names=names, use_locking=True)
444 result = self.ExecOp(True, op)
445 except errors.GenericError, err:
446 err_code, msg = cli.FormatError(err)
447 Err(msg, exit_code=err_code)
448 self.nodes = [data[0] for data in result if not (data[1] or data[2])]
450 op_diagos = opcodes.OpDiagnoseOS(output_fields=["name", "valid"], names=[])
451 result = self.ExecOp(True, op_diagos)
454 Err("Can't get the OS list")
456 # filter non-valid OS-es
457 os_set = [val[0] for val in result if val[1]]
459 if self.opts.os not in os_set:
460 Err("OS '%s' not found" % self.opts.os)
464 def BurnCreateInstances(self):
465 """Create the given instances.
469 mytor = izip(cycle(self.nodes),
470 islice(cycle(self.nodes), 1, None),
473 Log("Creating instances")
474 for pnode, snode, instance in mytor:
475 Log("instance %s" % instance, indent=1)
476 if self.opts.iallocator:
478 msg = "with iallocator %s" % self.opts.iallocator
479 elif self.opts.disk_template not in constants.DTS_NET_MIRROR:
481 msg = "on %s" % pnode
483 msg = "on %s, %s" % (pnode, snode)
487 op = opcodes.OpCreateInstance(instance_name=instance,
488 disks = [ {"size": size}
489 for size in self.disk_size],
490 disk_template=self.opts.disk_template,
492 mode=constants.INSTANCE_CREATE,
493 os_type=self.opts.os,
500 file_storage_dir=None,
501 iallocator=self.opts.iallocator,
506 self.ExecOrQueue(instance, op)
507 self.to_rem.append(instance)
510 def BurnGrowDisks(self):
511 """Grow both the os and the swap disks by the requested amount, if any."""
513 for instance in self.instances:
514 Log("instance %s" % instance, indent=1)
515 for idx, growth in enumerate(self.disk_growth):
517 op = opcodes.OpGrowDisk(instance_name=instance, disk=idx,
518 amount=growth, wait_for_sync=True)
519 Log("increase disk/%s by %s MB" % (idx, growth), indent=2)
520 self.ExecOrQueue(instance, op)
523 def BurnReplaceDisks1D8(self):
524 """Replace disks on primary and secondary for drbd8."""
525 Log("Replacing disks on the same nodes")
526 for instance in self.instances:
527 Log("instance %s" % instance, indent=1)
529 for mode in constants.REPLACE_DISK_SEC, constants.REPLACE_DISK_PRI:
530 op = opcodes.OpReplaceDisks(instance_name=instance,
532 disks=[i for i in range(self.disk_count)])
533 Log("run %s" % mode, indent=2)
535 self.ExecOrQueue(instance, *ops)
538 def BurnReplaceDisks2(self):
539 """Replace secondary node."""
540 Log("Changing the secondary node")
541 mode = constants.REPLACE_DISK_CHG
543 mytor = izip(islice(cycle(self.nodes), 2, None),
545 for tnode, instance in mytor:
546 Log("instance %s" % instance, indent=1)
547 if self.opts.iallocator:
549 msg = "with iallocator %s" % self.opts.iallocator
552 op = opcodes.OpReplaceDisks(instance_name=instance,
555 iallocator=self.opts.iallocator,
557 Log("run %s %s" % (mode, msg), indent=2)
558 self.ExecOrQueue(instance, op)
562 def BurnFailover(self):
563 """Failover the instances."""
564 Log("Failing over instances")
565 for instance in self.instances:
566 Log("instance %s" % instance, indent=1)
567 op = opcodes.OpFailoverInstance(instance_name=instance,
568 ignore_consistency=False)
569 self.ExecOrQueue(instance, op)
574 """Move the instances."""
575 Log("Moving instances")
576 mytor = izip(islice(cycle(self.nodes), 1, None),
578 for tnode, instance in mytor:
579 Log("instance %s" % instance, indent=1)
580 op = opcodes.OpMoveInstance(instance_name=instance,
582 self.ExecOrQueue(instance, op)
585 def BurnMigrate(self):
586 """Migrate the instances."""
587 Log("Migrating instances")
588 for instance in self.instances:
589 Log("instance %s" % instance, indent=1)
590 op1 = opcodes.OpMigrateInstance(instance_name=instance, live=True,
593 op2 = opcodes.OpMigrateInstance(instance_name=instance, live=True,
595 Log("migration and migration cleanup", indent=2)
596 self.ExecOrQueue(instance, op1, op2)
600 def BurnImportExport(self):
601 """Export the instance, delete it, and import it back.
604 Log("Exporting and re-importing instances")
605 mytor = izip(cycle(self.nodes),
606 islice(cycle(self.nodes), 1, None),
607 islice(cycle(self.nodes), 2, None),
610 for pnode, snode, enode, instance in mytor:
611 Log("instance %s" % instance, indent=1)
612 # read the full name of the instance
613 nam_op = opcodes.OpQueryInstances(output_fields=["name"],
614 names=[instance], use_locking=True)
615 full_name = self.ExecOp(False, nam_op)[0][0]
617 if self.opts.iallocator:
619 import_log_msg = ("import from %s"
620 " with iallocator %s" %
621 (enode, self.opts.iallocator))
622 elif self.opts.disk_template not in constants.DTS_NET_MIRROR:
624 import_log_msg = ("import from %s to %s" %
627 import_log_msg = ("import from %s to %s, %s" %
628 (enode, pnode, snode))
630 exp_op = opcodes.OpExportInstance(instance_name=instance,
633 rem_op = opcodes.OpRemoveInstance(instance_name=instance,
634 ignore_failures=True)
635 imp_dir = os.path.join(constants.EXPORT_DIR, full_name)
636 imp_op = opcodes.OpCreateInstance(instance_name=instance,
637 disks = [ {"size": size}
638 for size in self.disk_size],
639 disk_template=self.opts.disk_template,
641 mode=constants.INSTANCE_IMPORT,
649 file_storage_dir=None,
651 iallocator=self.opts.iallocator,
656 erem_op = opcodes.OpRemoveExport(instance_name=instance)
658 Log("export to node %s" % enode, indent=2)
659 Log("remove instance", indent=2)
660 Log(import_log_msg, indent=2)
661 Log("remove export", indent=2)
662 self.ExecOrQueue(instance, exp_op, rem_op, imp_op, erem_op)
664 def StopInstanceOp(self, instance):
665 """Stop given instance."""
666 return opcodes.OpShutdownInstance(instance_name=instance)
668 def StartInstanceOp(self, instance):
669 """Start given instance."""
670 return opcodes.OpStartupInstance(instance_name=instance, force=False)
672 def RenameInstanceOp(self, instance, instance_new):
673 """Rename instance."""
674 return opcodes.OpRenameInstance(instance_name=instance,
675 new_name=instance_new)
679 def BurnStopStart(self):
680 """Stop/start the instances."""
681 Log("Stopping and starting instances")
682 for instance in self.instances:
683 Log("instance %s" % instance, indent=1)
684 op1 = self.StopInstanceOp(instance)
685 op2 = self.StartInstanceOp(instance)
686 self.ExecOrQueue(instance, op1, op2)
689 def BurnRemove(self):
690 """Remove the instances."""
691 Log("Removing instances")
692 for instance in self.to_rem:
693 Log("instance %s" % instance, indent=1)
694 op = opcodes.OpRemoveInstance(instance_name=instance,
695 ignore_failures=True)
696 self.ExecOrQueue(instance, op)
698 def BurnRename(self):
699 """Rename the instances.
701 Note that this function will not execute in parallel, since we
702 only have one target for rename.
705 Log("Renaming instances")
706 rename = self.opts.rename
707 for instance in self.instances:
708 Log("instance %s" % instance, indent=1)
709 op_stop1 = self.StopInstanceOp(instance)
710 op_stop2 = self.StopInstanceOp(rename)
711 op_rename1 = self.RenameInstanceOp(instance, rename)
712 op_rename2 = self.RenameInstanceOp(rename, instance)
713 op_start1 = self.StartInstanceOp(rename)
714 op_start2 = self.StartInstanceOp(instance)
715 self.ExecOp(False, op_stop1, op_rename1, op_start1)
716 self._CheckInstanceAlive(rename)
717 self.ExecOp(False, op_stop2, op_rename2, op_start2)
718 self._CheckInstanceAlive(instance)
722 def BurnReinstall(self):
723 """Reinstall the instances."""
724 Log("Reinstalling instances")
725 for instance in self.instances:
726 Log("instance %s" % instance, indent=1)
727 op1 = self.StopInstanceOp(instance)
728 op2 = opcodes.OpReinstallInstance(instance_name=instance)
729 Log("reinstall without passing the OS", indent=2)
730 op3 = opcodes.OpReinstallInstance(instance_name=instance,
731 os_type=self.opts.os)
732 Log("reinstall specifying the OS", indent=2)
733 op4 = self.StartInstanceOp(instance)
734 self.ExecOrQueue(instance, op1, op2, op3, op4)
738 def BurnReboot(self):
739 """Reboot the instances."""
740 Log("Rebooting instances")
741 for instance in self.instances:
742 Log("instance %s" % instance, indent=1)
744 for reboot_type in constants.REBOOT_TYPES:
745 op = opcodes.OpRebootInstance(instance_name=instance,
746 reboot_type=reboot_type,
747 ignore_secondaries=False)
748 Log("reboot with type '%s'" % reboot_type, indent=2)
750 self.ExecOrQueue(instance, *ops)
754 def BurnActivateDisks(self):
755 """Activate and deactivate disks of the instances."""
756 Log("Activating/deactivating disks")
757 for instance in self.instances:
758 Log("instance %s" % instance, indent=1)
759 op_start = self.StartInstanceOp(instance)
760 op_act = opcodes.OpActivateInstanceDisks(instance_name=instance)
761 op_deact = opcodes.OpDeactivateInstanceDisks(instance_name=instance)
762 op_stop = self.StopInstanceOp(instance)
763 Log("activate disks when online", indent=2)
764 Log("activate disks when offline", indent=2)
765 Log("deactivate disks (when offline)", indent=2)
766 self.ExecOrQueue(instance, op_act, op_stop, op_act, op_deact, op_start)
770 def BurnAddRemoveDisks(self):
771 """Add and remove an extra disk for the instances."""
772 Log("Adding and removing disks")
773 for instance in self.instances:
774 Log("instance %s" % instance, indent=1)
775 op_add = opcodes.OpSetInstanceParams(\
776 instance_name=instance,
777 disks=[(constants.DDM_ADD, {"size": self.disk_size[0]})])
778 op_rem = opcodes.OpSetInstanceParams(\
779 instance_name=instance, disks=[(constants.DDM_REMOVE, {})])
780 op_stop = self.StopInstanceOp(instance)
781 op_start = self.StartInstanceOp(instance)
782 Log("adding a disk", indent=2)
783 Log("removing last disk", indent=2)
784 self.ExecOrQueue(instance, op_add, op_stop, op_rem, op_start)
787 def BurnAddRemoveNICs(self):
788 """Add and remove an extra NIC for the instances."""
789 Log("Adding and removing NICs")
790 for instance in self.instances:
791 Log("instance %s" % instance, indent=1)
792 op_add = opcodes.OpSetInstanceParams(\
793 instance_name=instance, nics=[(constants.DDM_ADD, {})])
794 op_rem = opcodes.OpSetInstanceParams(\
795 instance_name=instance, nics=[(constants.DDM_REMOVE, {})])
796 Log("adding a NIC", indent=2)
797 Log("removing last NIC", indent=2)
798 self.ExecOrQueue(instance, op_add, op_rem)
800 def _CheckInstanceAlive(self, instance):
801 """Check if an instance is alive by doing http checks.
803 This will try to retrieve the url on the instance /hostname.txt
804 and check that it contains the hostname of the instance. In case
805 we get ECONNREFUSED, we retry up to the net timeout seconds, for
806 any other error we abort.
809 if not self.opts.http_check:
811 end_time = time.time() + self.opts.net_timeout
813 while time.time() < end_time and url is None:
815 url = self.url_opener.open("http://%s/hostname.txt" % instance)
817 # here we can have connection refused, no route to host, etc.
820 raise InstanceDown(instance, "Cannot contact instance")
821 hostname = url.read().strip()
823 if hostname != instance:
824 raise InstanceDown(instance, ("Hostname mismatch, expected %s, got %s" %
825 (instance, hostname)))
827 def BurninCluster(self):
828 """Test a cluster intensively.
830 This will create instances and then start/stop/failover them.
831 It is safe for existing instances but could impact performance.
837 Log("Testing global parameters")
839 if (len(self.nodes) == 1 and
840 opts.disk_template not in (constants.DT_DISKLESS, constants.DT_PLAIN,
842 Err("When one node is available/selected the disk template must"
843 " be 'diskless', 'file' or 'plain'")
847 self.BurnCreateInstances()
848 if opts.do_replace1 and opts.disk_template in constants.DTS_NET_MIRROR:
849 self.BurnReplaceDisks1D8()
850 if (opts.do_replace2 and len(self.nodes) > 2 and
851 opts.disk_template in constants.DTS_NET_MIRROR) :
852 self.BurnReplaceDisks2()
854 if (opts.disk_template != constants.DT_DISKLESS and
855 utils.any(self.disk_growth, lambda n: n > 0)):
858 if opts.do_failover and opts.disk_template in constants.DTS_NET_MIRROR:
861 if opts.do_migrate and opts.disk_template == constants.DT_DRBD8:
864 if opts.do_move and opts.disk_template in [constants.DT_PLAIN,
868 if (opts.do_importexport and
869 opts.disk_template not in (constants.DT_DISKLESS,
871 self.BurnImportExport()
873 if opts.do_reinstall:
879 if opts.do_addremove_disks:
880 self.BurnAddRemoveDisks()
882 if opts.do_addremove_nics:
883 self.BurnAddRemoveNICs()
885 if opts.do_activate_disks:
886 self.BurnActivateDisks()
891 if opts.do_startstop:
897 Log("Error detected: opcode buffer follows:\n\n")
898 Log(self.GetFeedbackBuf())
900 if not self.opts.keep_instances:
903 except Exception, err:
904 if has_err: # already detected errors, so errors in removal
906 Log("Note: error detected during instance remove: %s" % str(err))
907 else: # non-expected error
917 return burner.BurninCluster()
920 if __name__ == "__main__":