4 # Copyright (C) 2006, 2007 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
32 from itertools import izip, islice, cycle
33 from cStringIO import StringIO
35 from ganeti import opcodes
36 from ganeti import constants
37 from ganeti import cli
38 from ganeti import errors
39 from ganeti import utils
42 USAGE = ("\tburnin -o OS_NAME [options...] instance_name ...")
46 class InstanceDown(Exception):
47 """The checked instance was not up"""
50 class BurninFailure(Exception):
51 """Failure detected during burning"""
55 """Shows program usage information and exits the program."""
57 print >> sys.stderr, "Usage:"
58 print >> sys.stderr, USAGE
62 def Log(msg, indent=0):
63 """Simple function that prints out its argument.
71 sys.stdout.write("%*s%s%s\n" % (2*indent, "",
72 headers.get(indent, " "), msg))
75 def Err(msg, exit_code=1):
76 """Simple error logging that prints to stderr.
79 sys.stderr.write(msg + "\n")
84 class SimpleOpener(urllib.FancyURLopener):
85 """A simple url opener"""
87 def prompt_user_passwd(self, host, realm, clear_cache = 0):
88 """No-interaction version of prompt_user_passwd."""
91 def http_error_default(self, url, fp, errcode, errmsg, headers):
92 """Custom error handling"""
93 # make sure sockets are not left in CLOSE_WAIT, this is similar
94 # but with a different exception to the BasicURLOpener class
95 _ = fp.read() # throw away data
97 raise InstanceDown("HTTP error returned: code %s, msg %s" %
102 cli.cli_option("-o", "--os", dest="os", default=None,
103 help="OS to use during burnin",
105 completion_suggest=cli.OPT_COMPL_ONE_OS),
106 cli.cli_option("--disk-size", dest="disk_size",
107 help="Disk size (determines disk count)",
108 default="128m", type="string", metavar="<size,size,...>",
109 completion_suggest=("128M 512M 1G 4G 1G,256M"
110 " 4G,1G,1G 10G").split()),
111 cli.cli_option("--disk-growth", dest="disk_growth", help="Disk growth",
112 default="128m", type="string", metavar="<size,size,...>"),
113 cli.cli_option("--mem-size", dest="mem_size", help="Memory size",
114 default=128, type="unit", metavar="<size>",
115 completion_suggest=("128M 256M 512M 1G 4G 8G"
116 " 12G 16G").split()),
118 cli.cli_option("--no-replace1", dest="do_replace1",
119 help="Skip disk replacement with the same secondary",
120 action="store_false", default=True),
121 cli.cli_option("--no-replace2", dest="do_replace2",
122 help="Skip disk replacement with a different secondary",
123 action="store_false", default=True),
124 cli.cli_option("--no-failover", dest="do_failover",
125 help="Skip instance failovers", action="store_false",
127 cli.cli_option("--no-migrate", dest="do_migrate",
128 help="Skip instance live migration",
129 action="store_false", default=True),
130 cli.cli_option("--no-move", dest="do_move",
131 help="Skip instance moves", action="store_false",
133 cli.cli_option("--no-importexport", dest="do_importexport",
134 help="Skip instance export/import", action="store_false",
136 cli.cli_option("--no-startstop", dest="do_startstop",
137 help="Skip instance stop/start", action="store_false",
139 cli.cli_option("--no-reinstall", dest="do_reinstall",
140 help="Skip instance reinstall", action="store_false",
142 cli.cli_option("--no-reboot", dest="do_reboot",
143 help="Skip instance reboot", action="store_false",
145 cli.cli_option("--no-activate-disks", dest="do_activate_disks",
146 help="Skip disk activation/deactivation",
147 action="store_false", default=True),
148 cli.cli_option("--no-add-disks", dest="do_addremove_disks",
149 help="Skip disk addition/removal",
150 action="store_false", default=True),
151 cli.cli_option("--no-add-nics", dest="do_addremove_nics",
152 help="Skip NIC addition/removal",
153 action="store_false", default=True),
154 cli.cli_option("--no-nics", dest="nics",
155 help="No network interfaces", action="store_const",
156 const=[], default=[{}]),
157 cli.cli_option("--rename", dest="rename", default=None,
158 help=("Give one unused instance name which is taken"
159 " to start the renaming sequence"),
160 metavar="<instance_name>"),
161 cli.cli_option("-t", "--disk-template", dest="disk_template",
162 choices=list(constants.DISK_TEMPLATES),
163 default=constants.DT_DRBD8,
164 help="Disk template (diskless, file, plain or drbd) [drbd]"),
165 cli.cli_option("-n", "--nodes", dest="nodes", default="",
166 help=("Comma separated list of nodes to perform"
167 " the burnin on (defaults to all nodes)"),
168 completion_suggest=cli.OPT_COMPL_MANY_NODES),
169 cli.cli_option("-I", "--iallocator", dest="iallocator",
170 default=None, type="string",
171 help=("Perform the allocation using an iallocator"
172 " instead of fixed node spread (node restrictions no"
173 " longer apply, therefore -n/--nodes must not be"
175 completion_suggest=cli.OPT_COMPL_ONE_IALLOCATOR),
176 cli.cli_option("-p", "--parallel", default=False, action="store_true",
178 help=("Enable parallelization of some operations in"
179 " order to speed burnin or to test granular locking")),
180 cli.cli_option("--net-timeout", default=15, type="int",
182 help=("The instance check network timeout in seconds"
183 " (defaults to 15 seconds)"),
184 completion_suggest="15 60 300 900".split()),
185 cli.cli_option("-C", "--http-check", default=False, action="store_true",
187 help=("Enable checking of instance status via http,"
188 " looking for /hostname.txt that should contain the"
189 " name of the instance")),
190 cli.cli_option("-K", "--keep-instances", default=False,
192 dest="keep_instances",
193 help=("Leave instances on the cluster after burnin,"
194 " for investigation in case of errors or simply"
198 # Mainly used for bash completion
199 ARGUMENTS = [cli.ArgInstance(min=1)]
202 class Burner(object):
207 utils.SetupLogging(constants.LOG_BURNIN, debug=False, stderr_logging=True)
208 self.url_opener = SimpleOpener()
209 self._feed_buf = StringIO()
215 self.queue_retry = False
216 self.disk_count = self.disk_growth = self.disk_size = None
217 self.hvp = self.bep = None
219 self.cl = cli.GetClient()
222 def ClearFeedbackBuf(self):
223 """Clear the feedback buffer."""
224 self._feed_buf.truncate(0)
226 def GetFeedbackBuf(self):
227 """Return the contents of the buffer."""
228 return self._feed_buf.getvalue()
230 def Feedback(self, msg):
231 """Acumulate feedback in our buffer."""
232 formatted_msg = "%s %s" % (time.ctime(utils.MergeTime(msg[0])), msg[2])
233 self._feed_buf.write(formatted_msg + "\n")
234 if self.opts.verbose:
235 Log(formatted_msg, indent=3)
237 def MaybeRetry(self, retry_count, msg, fn, *args):
238 """Possibly retry a given function execution.
240 @type retry_count: int
241 @param retry_count: retry counter:
242 - 0: non-retryable action
243 - 1: last retry for a retryable action
244 - MAX_RETRIES: original try for a retryable action
246 @param msg: the kind of the operation
248 @param fn: the function to be called
253 if retry_count > 0 and retry_count < MAX_RETRIES:
254 Log("Idempotent %s succeeded after %d retries" %
255 (msg, MAX_RETRIES - retry_count))
257 except Exception, err:
259 Log("Non-idempotent %s failed, aborting" % (msg, ))
261 elif retry_count == 1:
262 Log("Idempotent %s repeated failure, aborting" % (msg, ))
265 Log("Idempotent %s failed, retry #%d/%d: %s" %
266 (msg, MAX_RETRIES - retry_count + 1, MAX_RETRIES, err))
267 self.MaybeRetry(retry_count - 1, msg, fn, *args)
269 def _ExecOp(self, *ops):
270 """Execute one or more opcodes and manage the exec buffer.
272 @result: if only opcode has been passed, we return its result;
273 otherwise we return the list of results
276 job_id = cli.SendJob(ops, cl=self.cl)
277 results = cli.PollJob(job_id, cl=self.cl, feedback_fn=self.Feedback)
283 def ExecOp(self, retry, *ops):
284 """Execute one or more opcodes and manage the exec buffer.
286 @result: if only opcode has been passed, we return its result;
287 otherwise we return the list of results
294 return self.MaybeRetry(rval, "opcode", self._ExecOp, *ops)
296 def ExecOrQueue(self, name, *ops):
297 """Execute an opcode and manage the exec buffer."""
298 if self.opts.parallel:
299 self.queued_ops.append((ops, name))
301 return self.ExecOp(self.queue_retry, *ops)
303 def StartBatch(self, retry):
304 """Start a new batch of jobs.
306 @param retry: whether this is a retryable batch
310 self.queue_retry = retry
312 def CommitQueue(self):
313 """Execute all submitted opcodes in case of parallel burnin"""
314 if not self.opts.parallel:
323 results = self.MaybeRetry(rval, "jobset", self.ExecJobSet,
329 def ExecJobSet(self, jobs):
330 """Execute a set of jobs and return once all are done.
332 The method will return the list of results, if all jobs are
333 successful. Otherwise, OpExecError will be raised from within
337 self.ClearFeedbackBuf()
338 job_ids = [cli.SendJob(row[0], cl=self.cl) for row in jobs]
339 Log("Submitted job ID(s) %s" % ", ".join(job_ids), indent=1)
341 for jid, (_, iname) in zip(job_ids, jobs):
342 Log("waiting for job %s for %s" % (jid, iname), indent=2)
344 results.append(cli.PollJob(jid, cl=self.cl, feedback_fn=self.Feedback))
345 except Exception, err:
346 Log("Job for %s failed: %s" % (iname, err))
347 if len(results) != len(jobs):
348 raise BurninFailure()
351 def _DoCheckInstances(fn):
352 """Decorator for checking instances.
355 def wrapper(self, *args, **kwargs):
356 val = fn(self, *args, **kwargs)
357 for instance in self.instances:
358 self._CheckInstanceAlive(instance)
364 """Decorator for possible batch operations.
366 Must come after the _DoCheckInstances decorator (if any).
368 @param retry: whether this is a retryable batch, will be
373 def batched(self, *args, **kwargs):
374 self.StartBatch(retry)
375 val = fn(self, *args, **kwargs)
382 def ParseOptions(self):
383 """Parses the command line options.
385 In case of command line errors, it will show the usage and exit the
389 parser = optparse.OptionParser(usage="\n%s" % USAGE,
390 version=("%%prog (ganeti) %s" %
391 constants.RELEASE_VERSION),
394 options, args = parser.parse_args()
395 if len(args) < 1 or options.os is None:
398 supported_disk_templates = (constants.DT_DISKLESS,
402 if options.disk_template not in supported_disk_templates:
403 Err("Unknown disk template '%s'" % options.disk_template)
405 if options.disk_template == constants.DT_DISKLESS:
406 disk_size = disk_growth = []
407 options.do_addremove_disks = False
409 disk_size = [utils.ParseUnit(v) for v in options.disk_size.split(",")]
410 disk_growth = [utils.ParseUnit(v)
411 for v in options.disk_growth.split(",")]
412 if len(disk_growth) != len(disk_size):
413 Err("Wrong disk sizes/growth combination")
414 if ((disk_size and options.disk_template == constants.DT_DISKLESS) or
415 (not disk_size and options.disk_template != constants.DT_DISKLESS)):
416 Err("Wrong disk count/disk template combination")
418 self.disk_size = disk_size
419 self.disk_growth = disk_growth
420 self.disk_count = len(disk_size)
422 if options.nodes and options.iallocator:
423 Err("Give either the nodes option or the iallocator option, not both")
426 self.instances = args
428 constants.BE_MEMORY: options.mem_size,
429 constants.BE_VCPUS: 1,
433 socket.setdefaulttimeout(options.net_timeout)
436 """Read the cluster state from the config."""
438 names = self.opts.nodes.split(",")
442 op = opcodes.OpQueryNodes(output_fields=["name", "offline", "drained"],
443 names=names, use_locking=True)
444 result = self.ExecOp(True, op)
445 except errors.GenericError, err:
446 err_code, msg = cli.FormatError(err)
447 Err(msg, exit_code=err_code)
448 self.nodes = [data[0] for data in result if not (data[1] or data[2])]
450 op_diagnose = opcodes.OpDiagnoseOS(output_fields=["name", "valid",
451 "variants"], names=[])
452 result = self.ExecOp(True, op_diagnose)
455 Err("Can't get the OS list")
458 for (name, valid, variants) in result:
459 if valid and self.opts.os in cli.CalculateOSNames(name, variants):
464 Err("OS '%s' not found" % self.opts.os)
468 def BurnCreateInstances(self):
469 """Create the given instances.
473 mytor = izip(cycle(self.nodes),
474 islice(cycle(self.nodes), 1, None),
477 Log("Creating instances")
478 for pnode, snode, instance in mytor:
479 Log("instance %s" % instance, indent=1)
480 if self.opts.iallocator:
482 msg = "with iallocator %s" % self.opts.iallocator
483 elif self.opts.disk_template not in constants.DTS_NET_MIRROR:
485 msg = "on %s" % pnode
487 msg = "on %s, %s" % (pnode, snode)
491 op = opcodes.OpCreateInstance(instance_name=instance,
492 disks = [ {"size": size}
493 for size in self.disk_size],
494 disk_template=self.opts.disk_template,
496 mode=constants.INSTANCE_CREATE,
497 os_type=self.opts.os,
504 file_storage_dir=None,
505 iallocator=self.opts.iallocator,
510 self.ExecOrQueue(instance, op)
511 self.to_rem.append(instance)
514 def BurnGrowDisks(self):
515 """Grow both the os and the swap disks by the requested amount, if any."""
517 for instance in self.instances:
518 Log("instance %s" % instance, indent=1)
519 for idx, growth in enumerate(self.disk_growth):
521 op = opcodes.OpGrowDisk(instance_name=instance, disk=idx,
522 amount=growth, wait_for_sync=True)
523 Log("increase disk/%s by %s MB" % (idx, growth), indent=2)
524 self.ExecOrQueue(instance, op)
527 def BurnReplaceDisks1D8(self):
528 """Replace disks on primary and secondary for drbd8."""
529 Log("Replacing disks on the same nodes")
530 for instance in self.instances:
531 Log("instance %s" % instance, indent=1)
533 for mode in constants.REPLACE_DISK_SEC, constants.REPLACE_DISK_PRI:
534 op = opcodes.OpReplaceDisks(instance_name=instance,
536 disks=[i for i in range(self.disk_count)])
537 Log("run %s" % mode, indent=2)
539 self.ExecOrQueue(instance, *ops)
542 def BurnReplaceDisks2(self):
543 """Replace secondary node."""
544 Log("Changing the secondary node")
545 mode = constants.REPLACE_DISK_CHG
547 mytor = izip(islice(cycle(self.nodes), 2, None),
549 for tnode, instance in mytor:
550 Log("instance %s" % instance, indent=1)
551 if self.opts.iallocator:
553 msg = "with iallocator %s" % self.opts.iallocator
556 op = opcodes.OpReplaceDisks(instance_name=instance,
559 iallocator=self.opts.iallocator,
561 Log("run %s %s" % (mode, msg), indent=2)
562 self.ExecOrQueue(instance, op)
566 def BurnFailover(self):
567 """Failover the instances."""
568 Log("Failing over instances")
569 for instance in self.instances:
570 Log("instance %s" % instance, indent=1)
571 op = opcodes.OpFailoverInstance(instance_name=instance,
572 ignore_consistency=False)
573 self.ExecOrQueue(instance, op)
578 """Move the instances."""
579 Log("Moving instances")
580 mytor = izip(islice(cycle(self.nodes), 1, None),
582 for tnode, instance in mytor:
583 Log("instance %s" % instance, indent=1)
584 op = opcodes.OpMoveInstance(instance_name=instance,
586 self.ExecOrQueue(instance, op)
589 def BurnMigrate(self):
590 """Migrate the instances."""
591 Log("Migrating instances")
592 for instance in self.instances:
593 Log("instance %s" % instance, indent=1)
594 op1 = opcodes.OpMigrateInstance(instance_name=instance, live=True,
597 op2 = opcodes.OpMigrateInstance(instance_name=instance, live=True,
599 Log("migration and migration cleanup", indent=2)
600 self.ExecOrQueue(instance, op1, op2)
604 def BurnImportExport(self):
605 """Export the instance, delete it, and import it back.
608 Log("Exporting and re-importing instances")
609 mytor = izip(cycle(self.nodes),
610 islice(cycle(self.nodes), 1, None),
611 islice(cycle(self.nodes), 2, None),
614 for pnode, snode, enode, instance in mytor:
615 Log("instance %s" % instance, indent=1)
616 # read the full name of the instance
617 nam_op = opcodes.OpQueryInstances(output_fields=["name"],
618 names=[instance], use_locking=True)
619 full_name = self.ExecOp(False, nam_op)[0][0]
621 if self.opts.iallocator:
623 import_log_msg = ("import from %s"
624 " with iallocator %s" %
625 (enode, self.opts.iallocator))
626 elif self.opts.disk_template not in constants.DTS_NET_MIRROR:
628 import_log_msg = ("import from %s to %s" %
631 import_log_msg = ("import from %s to %s, %s" %
632 (enode, pnode, snode))
634 exp_op = opcodes.OpExportInstance(instance_name=instance,
637 rem_op = opcodes.OpRemoveInstance(instance_name=instance,
638 ignore_failures=True)
639 imp_dir = os.path.join(constants.EXPORT_DIR, full_name)
640 imp_op = opcodes.OpCreateInstance(instance_name=instance,
641 disks = [ {"size": size}
642 for size in self.disk_size],
643 disk_template=self.opts.disk_template,
645 mode=constants.INSTANCE_IMPORT,
653 file_storage_dir=None,
655 iallocator=self.opts.iallocator,
660 erem_op = opcodes.OpRemoveExport(instance_name=instance)
662 Log("export to node %s" % enode, indent=2)
663 Log("remove instance", indent=2)
664 Log(import_log_msg, indent=2)
665 Log("remove export", indent=2)
666 self.ExecOrQueue(instance, exp_op, rem_op, imp_op, erem_op)
668 def StopInstanceOp(self, instance):
669 """Stop given instance."""
670 return opcodes.OpShutdownInstance(instance_name=instance)
672 def StartInstanceOp(self, instance):
673 """Start given instance."""
674 return opcodes.OpStartupInstance(instance_name=instance, force=False)
676 def RenameInstanceOp(self, instance, instance_new):
677 """Rename instance."""
678 return opcodes.OpRenameInstance(instance_name=instance,
679 new_name=instance_new)
683 def BurnStopStart(self):
684 """Stop/start the instances."""
685 Log("Stopping and starting instances")
686 for instance in self.instances:
687 Log("instance %s" % instance, indent=1)
688 op1 = self.StopInstanceOp(instance)
689 op2 = self.StartInstanceOp(instance)
690 self.ExecOrQueue(instance, op1, op2)
693 def BurnRemove(self):
694 """Remove the instances."""
695 Log("Removing instances")
696 for instance in self.to_rem:
697 Log("instance %s" % instance, indent=1)
698 op = opcodes.OpRemoveInstance(instance_name=instance,
699 ignore_failures=True)
700 self.ExecOrQueue(instance, op)
702 def BurnRename(self):
703 """Rename the instances.
705 Note that this function will not execute in parallel, since we
706 only have one target for rename.
709 Log("Renaming instances")
710 rename = self.opts.rename
711 for instance in self.instances:
712 Log("instance %s" % instance, indent=1)
713 op_stop1 = self.StopInstanceOp(instance)
714 op_stop2 = self.StopInstanceOp(rename)
715 op_rename1 = self.RenameInstanceOp(instance, rename)
716 op_rename2 = self.RenameInstanceOp(rename, instance)
717 op_start1 = self.StartInstanceOp(rename)
718 op_start2 = self.StartInstanceOp(instance)
719 self.ExecOp(False, op_stop1, op_rename1, op_start1)
720 self._CheckInstanceAlive(rename)
721 self.ExecOp(False, op_stop2, op_rename2, op_start2)
722 self._CheckInstanceAlive(instance)
726 def BurnReinstall(self):
727 """Reinstall the instances."""
728 Log("Reinstalling instances")
729 for instance in self.instances:
730 Log("instance %s" % instance, indent=1)
731 op1 = self.StopInstanceOp(instance)
732 op2 = opcodes.OpReinstallInstance(instance_name=instance)
733 Log("reinstall without passing the OS", indent=2)
734 op3 = opcodes.OpReinstallInstance(instance_name=instance,
735 os_type=self.opts.os)
736 Log("reinstall specifying the OS", indent=2)
737 op4 = self.StartInstanceOp(instance)
738 self.ExecOrQueue(instance, op1, op2, op3, op4)
742 def BurnReboot(self):
743 """Reboot the instances."""
744 Log("Rebooting instances")
745 for instance in self.instances:
746 Log("instance %s" % instance, indent=1)
748 for reboot_type in constants.REBOOT_TYPES:
749 op = opcodes.OpRebootInstance(instance_name=instance,
750 reboot_type=reboot_type,
751 ignore_secondaries=False)
752 Log("reboot with type '%s'" % reboot_type, indent=2)
754 self.ExecOrQueue(instance, *ops)
758 def BurnActivateDisks(self):
759 """Activate and deactivate disks of the instances."""
760 Log("Activating/deactivating disks")
761 for instance in self.instances:
762 Log("instance %s" % instance, indent=1)
763 op_start = self.StartInstanceOp(instance)
764 op_act = opcodes.OpActivateInstanceDisks(instance_name=instance)
765 op_deact = opcodes.OpDeactivateInstanceDisks(instance_name=instance)
766 op_stop = self.StopInstanceOp(instance)
767 Log("activate disks when online", indent=2)
768 Log("activate disks when offline", indent=2)
769 Log("deactivate disks (when offline)", indent=2)
770 self.ExecOrQueue(instance, op_act, op_stop, op_act, op_deact, op_start)
774 def BurnAddRemoveDisks(self):
775 """Add and remove an extra disk for the instances."""
776 Log("Adding and removing disks")
777 for instance in self.instances:
778 Log("instance %s" % instance, indent=1)
779 op_add = opcodes.OpSetInstanceParams(\
780 instance_name=instance,
781 disks=[(constants.DDM_ADD, {"size": self.disk_size[0]})])
782 op_rem = opcodes.OpSetInstanceParams(\
783 instance_name=instance, disks=[(constants.DDM_REMOVE, {})])
784 op_stop = self.StopInstanceOp(instance)
785 op_start = self.StartInstanceOp(instance)
786 Log("adding a disk", indent=2)
787 Log("removing last disk", indent=2)
788 self.ExecOrQueue(instance, op_add, op_stop, op_rem, op_start)
791 def BurnAddRemoveNICs(self):
792 """Add and remove an extra NIC for the instances."""
793 Log("Adding and removing NICs")
794 for instance in self.instances:
795 Log("instance %s" % instance, indent=1)
796 op_add = opcodes.OpSetInstanceParams(\
797 instance_name=instance, nics=[(constants.DDM_ADD, {})])
798 op_rem = opcodes.OpSetInstanceParams(\
799 instance_name=instance, nics=[(constants.DDM_REMOVE, {})])
800 Log("adding a NIC", indent=2)
801 Log("removing last NIC", indent=2)
802 self.ExecOrQueue(instance, op_add, op_rem)
804 def _CheckInstanceAlive(self, instance):
805 """Check if an instance is alive by doing http checks.
807 This will try to retrieve the url on the instance /hostname.txt
808 and check that it contains the hostname of the instance. In case
809 we get ECONNREFUSED, we retry up to the net timeout seconds, for
810 any other error we abort.
813 if not self.opts.http_check:
815 end_time = time.time() + self.opts.net_timeout
817 while time.time() < end_time and url is None:
819 url = self.url_opener.open("http://%s/hostname.txt" % instance)
821 # here we can have connection refused, no route to host, etc.
824 raise InstanceDown(instance, "Cannot contact instance")
825 hostname = url.read().strip()
827 if hostname != instance:
828 raise InstanceDown(instance, ("Hostname mismatch, expected %s, got %s" %
829 (instance, hostname)))
831 def BurninCluster(self):
832 """Test a cluster intensively.
834 This will create instances and then start/stop/failover them.
835 It is safe for existing instances but could impact performance.
841 Log("Testing global parameters")
843 if (len(self.nodes) == 1 and
844 opts.disk_template not in (constants.DT_DISKLESS, constants.DT_PLAIN,
846 Err("When one node is available/selected the disk template must"
847 " be 'diskless', 'file' or 'plain'")
851 self.BurnCreateInstances()
852 if opts.do_replace1 and opts.disk_template in constants.DTS_NET_MIRROR:
853 self.BurnReplaceDisks1D8()
854 if (opts.do_replace2 and len(self.nodes) > 2 and
855 opts.disk_template in constants.DTS_NET_MIRROR) :
856 self.BurnReplaceDisks2()
858 if (opts.disk_template != constants.DT_DISKLESS and
859 utils.any(self.disk_growth, lambda n: n > 0)):
862 if opts.do_failover and opts.disk_template in constants.DTS_NET_MIRROR:
865 if opts.do_migrate and opts.disk_template == constants.DT_DRBD8:
868 if (opts.do_move and len(self.nodes) > 1 and
869 opts.disk_template in [constants.DT_PLAIN, constants.DT_FILE]):
872 if (opts.do_importexport and
873 opts.disk_template not in (constants.DT_DISKLESS,
875 self.BurnImportExport()
877 if opts.do_reinstall:
883 if opts.do_addremove_disks:
884 self.BurnAddRemoveDisks()
886 if opts.do_addremove_nics:
887 self.BurnAddRemoveNICs()
889 if opts.do_activate_disks:
890 self.BurnActivateDisks()
895 if opts.do_startstop:
901 Log("Error detected: opcode buffer follows:\n\n")
902 Log(self.GetFeedbackBuf())
904 if not self.opts.keep_instances:
907 except Exception, err:
908 if has_err: # already detected errors, so errors in removal
910 Log("Note: error detected during instance remove: %s" % str(err))
911 else: # non-expected error
921 return burner.BurninCluster()
924 if __name__ == "__main__":