Statistics
| Branch: | Tag: | Revision:

root / tools / burnin @ 99bdd139

History | View | Annotate | Download (26.5 kB)

1
#!/usr/bin/python
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Burnin program
23

    
24
"""
25

    
26
import os
27
import sys
28
import optparse
29
import time
30
import socket
31
import urllib2
32
import errno
33
from itertools import izip, islice, cycle
34
from cStringIO import StringIO
35

    
36
from ganeti import opcodes
37
from ganeti import mcpu
38
from ganeti import constants
39
from ganeti import cli
40
from ganeti import errors
41
from ganeti import utils
42

    
43

    
44
USAGE = ("\tburnin -o OS_NAME [options...] instance_name ...")
45

    
46

    
47
class InstanceDown(Exception):
48
  """The checked instance was not up"""
49

    
50

    
51
def Usage():
52
  """Shows program usage information and exits the program."""
53

    
54
  print >> sys.stderr, "Usage:"
55
  print >> sys.stderr, USAGE
56
  sys.exit(2)
57

    
58

    
59
def Log(msg, indent=0):
60
  """Simple function that prints out its argument.
61

    
62
  """
63
  headers = {
64
    0: "- ",
65
    1: "* ",
66
    2: ""
67
    }
68
  sys.stdout.write("%*s%s%s\n" % (2*indent, "",
69
                                   headers.get(indent, "  "), msg))
70
  sys.stdout.flush()
71

    
72
def Err(msg, exit_code=1):
73
  """Simple error logging that prints to stderr.
74

    
75
  """
76
  sys.stderr.write(msg + "\n")
77
  sys.stderr.flush()
78
  sys.exit(exit_code)
79

    
80
class Burner(object):
81
  """Burner class."""
82

    
83
  def __init__(self):
84
    """Constructor."""
85
    utils.SetupLogging(constants.LOG_BURNIN, debug=False, stderr_logging=True)
86
    self._feed_buf = StringIO()
87
    self.nodes = []
88
    self.instances = []
89
    self.to_rem = []
90
    self.opts = None
91
    self.cl = cli.GetClient()
92
    self.ParseOptions()
93
    self.GetState()
94

    
95
  def ClearFeedbackBuf(self):
96
    """Clear the feedback buffer."""
97
    self._feed_buf.truncate(0)
98

    
99
  def GetFeedbackBuf(self):
100
    """Return the contents of the buffer."""
101
    return self._feed_buf.getvalue()
102

    
103
  def Feedback(self, msg):
104
    """Acumulate feedback in our buffer."""
105
    self._feed_buf.write("%s %s\n" % (time.ctime(utils.MergeTime(msg[0])),
106
                                      msg[2]))
107
    if self.opts.verbose:
108
      Log(msg, indent=3)
109

    
110
  def ExecOp(self, op):
111
    """Execute an opcode and manage the exec buffer."""
112
    self.ClearFeedbackBuf()
113
    return cli.SubmitOpCode(op, feedback_fn=self.Feedback, cl=self.cl)
114

    
115
  def ExecJobSet(self, jobs):
116
    """Execute a set of jobs and return once all are done.
117

    
118
    The method will return the list of results, if all jobs are
119
    successfull. Otherwise, OpExecError will be raised from within
120
    cli.py.
121

    
122
    """
123
    self.ClearFeedbackBuf()
124
    job_ids = [cli.SendJob(job, cl=self.cl) for job in jobs]
125
    Log("Submitted job IDs %s" % ", ".join(job_ids), indent=1)
126
    results = []
127
    for jid in job_ids:
128
      Log("Waiting for job %s" % jid, indent=2)
129
      results.append(cli.PollJob(jid, cl=self.cl, feedback_fn=self.Feedback))
130

    
131
    return results
132

    
133
  def ParseOptions(self):
134
    """Parses the command line options.
135

    
136
    In case of command line errors, it will show the usage and exit the
137
    program.
138

    
139
    """
140

    
141
    parser = optparse.OptionParser(usage="\n%s" % USAGE,
142
                                   version="%%prog (ganeti) %s" %
143
                                   constants.RELEASE_VERSION,
144
                                   option_class=cli.CliOption)
145

    
146
    parser.add_option("-o", "--os", dest="os", default=None,
147
                      help="OS to use during burnin",
148
                      metavar="<OS>")
149
    parser.add_option("--disk-size", dest="disk_size",
150
                      help="Disk size (determines disk count)",
151
                      default="128m", type="string", metavar="<size,size,...>")
152
    parser.add_option("--disk-growth", dest="disk_growth", help="Disk growth",
153
                      default="128m", type="string", metavar="<size,size,...>")
154
    parser.add_option("--mem-size", dest="mem_size", help="Memory size",
155
                      default=128, type="unit", metavar="<size>")
156
    parser.add_option("-v", "--verbose",
157
                      action="store_true", dest="verbose", default=False,
158
                      help="print command execution messages to stdout")
159
    parser.add_option("--no-replace1", dest="do_replace1",
160
                      help="Skip disk replacement with the same secondary",
161
                      action="store_false", default=True)
162
    parser.add_option("--no-replace2", dest="do_replace2",
163
                      help="Skip disk replacement with a different secondary",
164
                      action="store_false", default=True)
165
    parser.add_option("--no-failover", dest="do_failover",
166
                      help="Skip instance failovers", action="store_false",
167
                      default=True)
168
    parser.add_option("--no-migrate", dest="do_migrate",
169
                      help="Skip instance live migration",
170
                      action="store_false", default=True)
171
    parser.add_option("--no-importexport", dest="do_importexport",
172
                      help="Skip instance export/import", action="store_false",
173
                      default=True)
174
    parser.add_option("--no-startstop", dest="do_startstop",
175
                      help="Skip instance stop/start", action="store_false",
176
                      default=True)
177
    parser.add_option("--no-reinstall", dest="do_reinstall",
178
                      help="Skip instance reinstall", action="store_false",
179
                      default=True)
180
    parser.add_option("--no-reboot", dest="do_reboot",
181
                      help="Skip instance reboot", action="store_false",
182
                      default=True)
183
    parser.add_option("--no-activate-disks", dest="do_activate_disks",
184
                      help="Skip disk activation/deactivation",
185
                      action="store_false", default=True)
186
    parser.add_option("--no-add-disks", dest="do_addremove_disks",
187
                      help="Skip disk addition/removal",
188
                      action="store_false", default=True)
189
    parser.add_option("--no-add-nics", dest="do_addremove_nics",
190
                      help="Skip NIC addition/removal",
191
                      action="store_false", default=True)
192
    parser.add_option("--no-nics", dest="nics",
193
                      help="No network interfaces", action="store_const",
194
                      const=[], default=[{}])
195
    parser.add_option("--rename", dest="rename", default=None,
196
                      help="Give one unused instance name which is taken"
197
                           " to start the renaming sequence",
198
                      metavar="<instance_name>")
199
    parser.add_option("-t", "--disk-template", dest="disk_template",
200
                      choices=("diskless", "file", "plain", "drbd"),
201
                      default="drbd",
202
                      help="Disk template (diskless, file, plain or drbd)"
203
                            " [drbd]")
204
    parser.add_option("-n", "--nodes", dest="nodes", default="",
205
                      help="Comma separated list of nodes to perform"
206
                      " the burnin on (defaults to all nodes)")
207
    parser.add_option("--iallocator", dest="iallocator",
208
                      default=None, type="string",
209
                      help="Perform the allocation using an iallocator"
210
                      " instead of fixed node spread (node restrictions no"
211
                      " longer apply, therefore -n/--nodes must not be used")
212
    parser.add_option("-p", "--parallel", default=False, action="store_true",
213
                      dest="parallel",
214
                      help="Enable parallelization of some operations in"
215
                      " order to speed burnin or to test granular locking")
216
    parser.add_option("--net-timeout", default=15, type="int",
217
                      dest="net_timeout",
218
                      help="The instance check network timeout in seconds"
219
                      " (defaults to 15 seconds)")
220
    parser.add_option("-C", "--http-check", default=False, action="store_true",
221
                      dest="http_check",
222
                      help="Enable checking of instance status via http,"
223
                      " looking for /hostname.txt that should contain the"
224
                      " name of the instance")
225

    
226

    
227
    options, args = parser.parse_args()
228
    if len(args) < 1 or options.os is None:
229
      Usage()
230

    
231
    supported_disk_templates = (constants.DT_DISKLESS,
232
                                constants.DT_FILE,
233
                                constants.DT_PLAIN,
234
                                constants.DT_DRBD8)
235
    if options.disk_template not in supported_disk_templates:
236
      Err("Unknown disk template '%s'" % options.disk_template)
237

    
238
    if options.disk_template == constants.DT_DISKLESS:
239
      disk_size = disk_growth = []
240
      options.do_addremove_disks = False
241
    else:
242
      disk_size = [utils.ParseUnit(v) for v in options.disk_size.split(",")]
243
      disk_growth = [utils.ParseUnit(v)
244
                     for v in options.disk_growth.split(",")]
245
      if len(disk_growth) != len(disk_size):
246
        Err("Wrong disk sizes/growth combination")
247
    if ((disk_size and options.disk_template == constants.DT_DISKLESS) or
248
        (not disk_size and options.disk_template != constants.DT_DISKLESS)):
249
      Err("Wrong disk count/disk template combination")
250

    
251
    self.disk_size = disk_size
252
    self.disk_growth = disk_growth
253
    self.disk_count = len(disk_size)
254

    
255
    if options.nodes and options.iallocator:
256
      Err("Give either the nodes option or the iallocator option, not both")
257

    
258
    self.opts = options
259
    self.instances = args
260
    self.bep = {
261
      constants.BE_MEMORY: options.mem_size,
262
      constants.BE_VCPUS: 1,
263
      }
264
    self.hvp = {}
265

    
266
    socket.setdefaulttimeout(options.net_timeout)
267

    
268
  def GetState(self):
269
    """Read the cluster state from the config."""
270
    if self.opts.nodes:
271
      names = self.opts.nodes.split(",")
272
    else:
273
      names = []
274
    try:
275
      op = opcodes.OpQueryNodes(output_fields=["name", "offline"], names=names)
276
      result = self.ExecOp(op)
277
    except errors.GenericError, err:
278
      err_code, msg = cli.FormatError(err)
279
      Err(msg, exit_code=err_code)
280
    self.nodes = [data[0] for data in result if not data[1]]
281

    
282
    result = self.ExecOp(opcodes.OpDiagnoseOS(output_fields=["name", "valid"],
283
                                              names=[]))
284

    
285
    if not result:
286
      Err("Can't get the OS list")
287

    
288
    # filter non-valid OS-es
289
    os_set = [val[0] for val in result if val[1]]
290

    
291
    if self.opts.os not in os_set:
292
      Err("OS '%s' not found" % self.opts.os)
293

    
294
  def CreateInstances(self):
295
    """Create the given instances.
296

    
297
    """
298
    self.to_rem = []
299
    mytor = izip(cycle(self.nodes),
300
                 islice(cycle(self.nodes), 1, None),
301
                 self.instances)
302
    jobset = []
303

    
304
    Log("Creating instances")
305
    for pnode, snode, instance in mytor:
306
      Log("instance %s" % instance, indent=1)
307
      if self.opts.iallocator:
308
        pnode = snode = None
309
        msg = "with iallocator %s" % self.opts.iallocator
310
      elif self.opts.disk_template not in constants.DTS_NET_MIRROR:
311
        snode = None
312
        msg = "on %s" % pnode
313
      else:
314
        msg = "on %s, %s" % (pnode, snode)
315

    
316
      Log(msg, indent=2)
317

    
318
      op = opcodes.OpCreateInstance(instance_name=instance,
319
                                    disks = [ {"size": size}
320
                                              for size in self.disk_size],
321
                                    disk_template=self.opts.disk_template,
322
                                    nics=self.opts.nics,
323
                                    mode=constants.INSTANCE_CREATE,
324
                                    os_type=self.opts.os,
325
                                    pnode=pnode,
326
                                    snode=snode,
327
                                    start=True,
328
                                    ip_check=True,
329
                                    wait_for_sync=True,
330
                                    file_driver="loop",
331
                                    file_storage_dir=None,
332
                                    iallocator=self.opts.iallocator,
333
                                    beparams=self.bep,
334
                                    hvparams=self.hvp,
335
                                    )
336

    
337
      if self.opts.parallel:
338
        jobset.append([op])
339
        # FIXME: here we should not append to to_rem uncoditionally,
340
        # but only when the job is successful
341
        self.to_rem.append(instance)
342
      else:
343
        self.ExecOp(op)
344
        self.to_rem.append(instance)
345
    if self.opts.parallel:
346
      self.ExecJobSet(jobset)
347

    
348
    for instance in self.instances:
349
      self._CheckInstanceAlive(instance)
350

    
351
  def GrowDisks(self):
352
    """Grow both the os and the swap disks by the requested amount, if any."""
353
    Log("Growing disks")
354
    for instance in self.instances:
355
      Log("instance %s" % instance, indent=1)
356
      for idx, growth in enumerate(self.disk_growth):
357
        if growth > 0:
358
          op = opcodes.OpGrowDisk(instance_name=instance, disk=idx,
359
                                  amount=growth, wait_for_sync=True)
360
          Log("increase disk/%s by %s MB" % (idx, growth), indent=2)
361
          self.ExecOp(op)
362

    
363
  def ReplaceDisks1D8(self):
364
    """Replace disks on primary and secondary for drbd8."""
365
    Log("Replacing disks on the same nodes")
366
    for instance in self.instances:
367
      Log("instance %s" % instance, indent=1)
368
      for mode in constants.REPLACE_DISK_SEC, constants.REPLACE_DISK_PRI:
369
        op = opcodes.OpReplaceDisks(instance_name=instance,
370
                                    mode=mode,
371
                                    disks=[i for i in range(self.disk_count)])
372
        Log("run %s" % mode, indent=2)
373
        self.ExecOp(op)
374

    
375
  def ReplaceDisks2(self):
376
    """Replace secondary node."""
377
    Log("Changing the secondary node")
378
    mode = constants.REPLACE_DISK_CHG
379

    
380
    mytor = izip(islice(cycle(self.nodes), 2, None),
381
                 self.instances)
382
    for tnode, instance in mytor:
383
      Log("instance %s" % instance, indent=1)
384
      if self.opts.iallocator:
385
        tnode = None
386
        msg = "with iallocator %s" % self.opts.iallocator
387
      else:
388
        msg = tnode
389
      op = opcodes.OpReplaceDisks(instance_name=instance,
390
                                  mode=mode,
391
                                  remote_node=tnode,
392
                                  iallocator=self.opts.iallocator,
393
                                  disks=[i for i in range(self.disk_count)])
394
      Log("run %s %s" % (mode, msg), indent=2)
395
      self.ExecOp(op)
396

    
397
  def Failover(self):
398
    """Failover the instances."""
399
    Log("Failing over instances")
400
    for instance in self.instances:
401
      Log("instance %s" % instance, indent=1)
402
      op = opcodes.OpFailoverInstance(instance_name=instance,
403
                                      ignore_consistency=False)
404

    
405
      self.ExecOp(op)
406
    for instance in self.instances:
407
      self._CheckInstanceAlive(instance)
408

    
409
  def Migrate(self):
410
    """Migrate the instances."""
411

    
412
    for instance in self.instances:
413
      op = opcodes.OpMigrateInstance(instance_name=instance, live=True,
414
                                     cleanup=False)
415

    
416
      Log("- Migrate instance %s" % (instance))
417
      self.ExecOp(op)
418
    for instance in self.instances:
419
      op = opcodes.OpMigrateInstance(instance_name=instance, live=True,
420
                                     cleanup=True)
421

    
422
      Log("- Testing 'migrate --cleanup' for instance %s" % (instance))
423
      self.ExecOp(op)
424

    
425
  def ImportExport(self):
426
    """Export the instance, delete it, and import it back.
427

    
428
    """
429
    Log("Exporting and re-importing instances")
430
    mytor = izip(cycle(self.nodes),
431
                 islice(cycle(self.nodes), 1, None),
432
                 islice(cycle(self.nodes), 2, None),
433
                 self.instances)
434

    
435
    for pnode, snode, enode, instance in mytor:
436
      Log("instance %s" % instance, indent=1)
437
      if self.opts.iallocator:
438
        pnode = snode = None
439
        import_log_msg = ("import from %s"
440
                          " with iallocator %s" %
441
                          (enode, self.opts.iallocator))
442
      elif self.opts.disk_template not in constants.DTS_NET_MIRROR:
443
        snode = None
444
        import_log_msg = ("import from %s to %s" %
445
                          (enode, pnode))
446
      else:
447
        import_log_msg = ("import from %s to %s, %s" %
448
                          (enode, pnode, snode))
449

    
450
      exp_op = opcodes.OpExportInstance(instance_name=instance,
451
                                           target_node=enode,
452
                                           shutdown=True)
453
      rem_op = opcodes.OpRemoveInstance(instance_name=instance,
454
                                        ignore_failures=True)
455
      nam_op = opcodes.OpQueryInstances(output_fields=["name"],
456
                                           names=[instance])
457
      full_name = self.ExecOp(nam_op)[0][0]
458
      imp_dir = os.path.join(constants.EXPORT_DIR, full_name)
459
      imp_op = opcodes.OpCreateInstance(instance_name=instance,
460
                                        disks = [ {"size": size}
461
                                                  for size in self.disk_size],
462
                                        disk_template=self.opts.disk_template,
463
                                        nics=self.opts.nics,
464
                                        mode=constants.INSTANCE_IMPORT,
465
                                        src_node=enode,
466
                                        src_path=imp_dir,
467
                                        pnode=pnode,
468
                                        snode=snode,
469
                                        start=True,
470
                                        ip_check=True,
471
                                        wait_for_sync=True,
472
                                        file_storage_dir=None,
473
                                        file_driver="loop",
474
                                        iallocator=self.opts.iallocator,
475
                                        beparams=self.bep,
476
                                        hvparams=self.hvp,
477
                                        )
478

    
479
      erem_op = opcodes.OpRemoveExport(instance_name=instance)
480

    
481
      Log("export to node %s" % enode, indent=2)
482
      self.ExecOp(exp_op)
483
      Log("remove instance", indent=2)
484
      self.ExecOp(rem_op)
485
      self.to_rem.remove(instance)
486
      Log(import_log_msg, indent=2)
487
      self.ExecOp(imp_op)
488
      Log("remove export", indent=2)
489
      self.ExecOp(erem_op)
490

    
491
      self.to_rem.append(instance)
492

    
493
    for instance in self.instances:
494
      self._CheckInstanceAlive(instance)
495

    
496
  def StopInstance(self, instance):
497
    """Stop given instance."""
498
    op = opcodes.OpShutdownInstance(instance_name=instance)
499
    Log("shutdown", indent=2)
500
    self.ExecOp(op)
501

    
502
  def StartInstance(self, instance):
503
    """Start given instance."""
504
    op = opcodes.OpStartupInstance(instance_name=instance, force=False)
505
    Log("startup", indent=2)
506
    self.ExecOp(op)
507

    
508
  def RenameInstance(self, instance, instance_new):
509
    """Rename instance."""
510
    op = opcodes.OpRenameInstance(instance_name=instance,
511
                                  new_name=instance_new)
512
    Log("rename to %s" % instance_new, indent=2)
513
    self.ExecOp(op)
514

    
515
  def StopStart(self):
516
    """Stop/start the instances."""
517
    Log("Stopping and starting instances")
518
    for instance in self.instances:
519
      Log("instance %s" % instance, indent=1)
520
      self.StopInstance(instance)
521
      self.StartInstance(instance)
522

    
523
    for instance in self.instances:
524
      self._CheckInstanceAlive(instance)
525

    
526
  def Remove(self):
527
    """Remove the instances."""
528
    Log("Removing instances")
529
    for instance in self.to_rem:
530
      Log("instance %s" % instance, indent=1)
531
      op = opcodes.OpRemoveInstance(instance_name=instance,
532
                                    ignore_failures=True)
533
      self.ExecOp(op)
534

    
535
  def Rename(self):
536
    """Rename the instances."""
537
    Log("Renaming instances")
538
    rename = self.opts.rename
539
    for instance in self.instances:
540
      Log("instance %s" % instance, indent=1)
541
      self.StopInstance(instance)
542
      self.RenameInstance(instance, rename)
543
      self.StartInstance(rename)
544
      self._CheckInstanceAlive(rename)
545
      self.StopInstance(rename)
546
      self.RenameInstance(rename, instance)
547
      self.StartInstance(instance)
548

    
549
    for instance in self.instances:
550
      self._CheckInstanceAlive(instance)
551

    
552
  def Reinstall(self):
553
    """Reinstall the instances."""
554
    Log("Reinstalling instances")
555
    for instance in self.instances:
556
      Log("instance %s" % instance, indent=1)
557
      self.StopInstance(instance)
558
      op = opcodes.OpReinstallInstance(instance_name=instance)
559
      Log("reinstall without passing the OS", indent=2)
560
      self.ExecOp(op)
561
      op = opcodes.OpReinstallInstance(instance_name=instance,
562
                                       os_type=self.opts.os)
563
      Log("reinstall specifying the OS", indent=2)
564
      self.ExecOp(op)
565
      self.StartInstance(instance)
566
    for instance in self.instances:
567
      self._CheckInstanceAlive(instance)
568

    
569
  def Reboot(self):
570
    """Reboot the instances."""
571
    Log("Rebooting instances")
572
    for instance in self.instances:
573
      Log("instance %s" % instance, indent=1)
574
      for reboot_type in constants.REBOOT_TYPES:
575
        op = opcodes.OpRebootInstance(instance_name=instance,
576
                                      reboot_type=reboot_type,
577
                                      ignore_secondaries=False)
578
        Log("reboot with type '%s'" % reboot_type, indent=2)
579
        self.ExecOp(op)
580
        self._CheckInstanceAlive(instance)
581

    
582
  def ActivateDisks(self):
583
    """Activate and deactivate disks of the instances."""
584
    Log("Activating/deactivating disks")
585
    for instance in self.instances:
586
      Log("instance %s" % instance, indent=1)
587
      op_act = opcodes.OpActivateInstanceDisks(instance_name=instance)
588
      op_deact = opcodes.OpDeactivateInstanceDisks(instance_name=instance)
589
      Log("activate disks when online", indent=2)
590
      self.ExecOp(op_act)
591
      self.StopInstance(instance)
592
      Log("activate disks when offline", indent=2)
593
      self.ExecOp(op_act)
594
      Log("deactivate disks (when offline)", indent=2)
595
      self.ExecOp(op_deact)
596
      self.StartInstance(instance)
597
    for instance in self.instances:
598
      self._CheckInstanceAlive(instance)
599

    
600
  def AddRemoveDisks(self):
601
    """Add and remove an extra disk for the instances."""
602
    Log("Adding and removing disks")
603
    for instance in self.instances:
604
      Log("instance %s" % instance, indent=1)
605
      op_add = opcodes.OpSetInstanceParams(\
606
        instance_name=instance,
607
        disks=[(constants.DDM_ADD, {"size": self.disk_size[0]})])
608
      op_rem = opcodes.OpSetInstanceParams(\
609
        instance_name=instance, disks=[(constants.DDM_REMOVE, {})])
610
      Log("adding a disk", indent=2)
611
      self.ExecOp(op_add)
612
      self.StopInstance(instance)
613
      Log("removing last disk", indent=2)
614
      self.ExecOp(op_rem)
615
      self.StartInstance(instance)
616
    for instance in self.instances:
617
      self._CheckInstanceAlive(instance)
618

    
619
  def AddRemoveNICs(self):
620
    """Add and remove an extra NIC for the instances."""
621
    Log("Adding and removing NICs")
622
    for instance in self.instances:
623
      Log("instance %s" % instance, indent=1)
624
      op_add = opcodes.OpSetInstanceParams(\
625
        instance_name=instance, nics=[(constants.DDM_ADD, {})])
626
      op_rem = opcodes.OpSetInstanceParams(\
627
        instance_name=instance, nics=[(constants.DDM_REMOVE, {})])
628
      Log("adding a NIC", indent=2)
629
      self.ExecOp(op_add)
630
      Log("removing last NIC", indent=2)
631
      self.ExecOp(op_rem)
632

    
633
  def _CheckInstanceAlive(self, instance):
634
    """Check if an instance is alive by doing http checks.
635

    
636
    This will try to retrieve the url on the instance /hostname.txt
637
    and check that it contains the hostname of the instance. In case
638
    we get ECONNREFUSED, we retry up to the net timeout seconds, for
639
    any other error we abort.
640

    
641
    """
642
    if not self.opts.http_check:
643
      return
644
    try:
645
      for retries in range(self.opts.net_timeout):
646
        try:
647
          url = urllib2.urlopen("http://%s/hostname.txt" % instance)
648
        except urllib2.URLError, err:
649
          if err.args[0][0] == errno.ECONNREFUSED:
650
            time.sleep(1)
651
            continue
652
          raise
653
    except urllib2.URLError, err:
654
      raise InstanceDown(instance, str(err))
655
    hostname = url.read().strip()
656
    if hostname != instance:
657
      raise InstanceDown(instance, ("Hostname mismatch, expected %s, got %s" %
658
                                    (instance, hostname)))
659

    
660
  def BurninCluster(self):
661
    """Test a cluster intensively.
662

    
663
    This will create instances and then start/stop/failover them.
664
    It is safe for existing instances but could impact performance.
665

    
666
    """
667

    
668
    opts = self.opts
669

    
670
    Log("Testing global parameters")
671

    
672
    if (len(self.nodes) == 1 and
673
        opts.disk_template not in (constants.DT_DISKLESS, constants.DT_PLAIN,
674
                                   constants.DT_FILE)):
675
      Err("When one node is available/selected the disk template must"
676
          " be 'diskless', 'file' or 'plain'")
677

    
678
    has_err = True
679
    try:
680
      self.CreateInstances()
681
      if opts.do_replace1 and opts.disk_template in constants.DTS_NET_MIRROR:
682
        self.ReplaceDisks1D8()
683
      if (opts.do_replace2 and len(self.nodes) > 2 and
684
          opts.disk_template in constants.DTS_NET_MIRROR) :
685
        self.ReplaceDisks2()
686

    
687
      if opts.disk_template != constants.DT_DISKLESS:
688
        self.GrowDisks()
689

    
690
      if opts.do_failover and opts.disk_template in constants.DTS_NET_MIRROR:
691
        self.Failover()
692

    
693
      if opts.do_migrate and opts.disk_template == constants.DT_DRBD8:
694
        self.Migrate()
695

    
696
      if (opts.do_importexport and
697
          opts.disk_template not in (constants.DT_DISKLESS,
698
                                     constants.DT_FILE)):
699
        self.ImportExport()
700

    
701
      if opts.do_reinstall:
702
        self.Reinstall()
703

    
704
      if opts.do_reboot:
705
        self.Reboot()
706

    
707
      if opts.do_addremove_disks:
708
        self.AddRemoveDisks()
709

    
710
      if opts.do_addremove_nics:
711
        self.AddRemoveNICs()
712

    
713
      if opts.do_activate_disks:
714
        self.ActivateDisks()
715

    
716
      if opts.rename:
717
        self.Rename()
718

    
719
      if opts.do_startstop:
720
        self.StopStart()
721

    
722
      has_err = False
723
    finally:
724
      if has_err:
725
        Log("Error detected: opcode buffer follows:\n\n")
726
        Log(self.GetFeedbackBuf())
727
        Log("\n\n")
728
      self.Remove()
729

    
730
    return 0
731

    
732

    
733
def main():
734
  """Main function"""
735

    
736
  burner = Burner()
737
  return burner.BurninCluster()
738

    
739

    
740
if __name__ == "__main__":
741
  main()