Statistics
| Branch: | Tag: | Revision:

root / testing / ganeti.qa.py @ 53b78ba4

History | View | Annotate | Download (18.2 kB)

1
#!/usr/bin/python
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Script for doing Q&A on Ganeti"""
23

    
24
import os
25
import re
26
import sys
27
import yaml
28
import time
29

    
30
from datetime import datetime
31
from optparse import OptionParser
32

    
33
# I want more flexibility for testing over SSH, therefore I'm not using
34
# Ganeti's ssh module.
35
import subprocess
36

    
37
from ganeti import utils
38
from ganeti import constants
39

    
40
# {{{ Global variables
41
cfg = None
42
options = None
43
# }}}
44

    
45
# {{{ Errors
46
class Error(Exception):
47
  """An error occurred during Q&A testing.
48

49
  """
50
  pass
51

    
52

    
53
class OutOfNodesError(Error):
54
  """Out of nodes.
55

56
  """
57
  pass
58

    
59

    
60
class OutOfInstancesError(Error):
61
  """Out of instances.
62

63
  """
64
  pass
65
# }}}
66

    
67
# {{{ Utilities
68
def TestEnabled(test):
69
  """Returns True if the given test is enabled."""
70
  return cfg.get('tests', {}).get(test, False)
71

    
72

    
73
def RunTest(callable, *args):
74
  """Runs a test after printing a header.
75

76
  """
77
  if callable.__doc__:
78
    desc = callable.__doc__.splitlines()[0].strip()
79
  else:
80
    desc = '%r' % callable
81

    
82
  now = str(datetime.now())
83

    
84
  print
85
  print '---', now, ('-' * (55 - len(now)))
86
  print desc
87
  print '-' * 60
88

    
89
  return callable(*args)
90

    
91

    
92
def AssertEqual(first, second, msg=None):
93
  """Raises an error when values aren't equal.
94

95
  """
96
  if not first == second:
97
    raise Error, (msg or '%r == %r' % (first, second))
98

    
99

    
100
def GetSSHCommand(node, cmd, strict=True):
101
  """Builds SSH command to be executed.
102

103
  """
104
  args = [ 'ssh', '-oEscapeChar=none', '-oBatchMode=yes', '-l', 'root' ]
105

    
106
  if strict:
107
    tmp = 'yes'
108
  else:
109
    tmp = 'no'
110
  args.append('-oStrictHostKeyChecking=%s' % tmp)
111
  args.append(node)
112

    
113
  if options.dry_run:
114
    prefix = 'exit 0; '
115
  else:
116
    prefix = ''
117

    
118
  args.append(prefix + cmd)
119

    
120
  if options.verbose:
121
    print 'SSH:', utils.ShellQuoteArgs(args)
122

    
123
  return args
124

    
125

    
126
def StartSSH(node, cmd, strict=True):
127
  """Starts SSH.
128

129
  """
130
  args = GetSSHCommand(node, cmd, strict=strict)
131
  return subprocess.Popen(args, shell=False)
132

    
133

    
134
def UploadFile(node, file):
135
  """Uploads a file to a node and returns the filename.
136

137
  Caller needs to remove the file when it's not needed anymore.
138
  """
139
  if os.stat(file).st_mode & 0100:
140
    mode = '0700'
141
  else:
142
    mode = '0600'
143

    
144
  cmd = ('tmp=$(tempfile --mode %s --prefix gnt) && '
145
         '[[ -f "${tmp}" ]] && '
146
         'cat > "${tmp}" && '
147
         'echo "${tmp}"') % mode
148

    
149
  f = open(file, 'r')
150
  try:
151
    p = subprocess.Popen(GetSSHCommand(node, cmd), shell=False, stdin=f,
152
                         stdout=subprocess.PIPE)
153
    AssertEqual(p.wait(), 0)
154

    
155
    name = p.stdout.read().strip()
156

    
157
    return name
158
  finally:
159
    f.close()
160
# }}}
161

    
162
# {{{ Config helpers
163
def GetMasterNode():
164
  return cfg['nodes'][0]
165

    
166

    
167
def AcquireInstance():
168
  """Returns an instance which isn't in use.
169

170
  """
171
  # Filter out unwanted instances
172
  tmp_flt = lambda inst: not inst.get('_used', False)
173
  instances = filter(tmp_flt, cfg['instances'])
174
  del tmp_flt
175

    
176
  if len(instances) == 0:
177
    raise OutOfInstancesError, ("No instances left")
178

    
179
  inst = instances[0]
180
  inst['_used'] = True
181
  return inst
182

    
183

    
184
def ReleaseInstance(inst):
185
  inst['_used'] = False
186

    
187

    
188
def AcquireNode(exclude=None):
189
  """Returns the least used node.
190

191
  """
192
  master = GetMasterNode()
193

    
194
  # Filter out unwanted nodes
195
  # TODO: Maybe combine filters
196
  if exclude is None:
197
    nodes = cfg['nodes'][:]
198
  else:
199
    nodes = filter(lambda node: node != exclude, cfg['nodes'])
200

    
201
  tmp_flt = lambda node: node.get('_added', False) or node == master
202
  nodes = filter(tmp_flt, nodes)
203
  del tmp_flt
204

    
205
  if len(nodes) == 0:
206
    raise OutOfNodesError, ("No nodes left")
207

    
208
  # Get node with least number of uses
209
  def compare(a, b):
210
    result = cmp(a.get('_count', 0), b.get('_count', 0))
211
    if result == 0:
212
      result = cmp(a['primary'], b['primary'])
213
    return result
214

    
215
  nodes.sort(cmp=compare)
216

    
217
  node = nodes[0]
218
  node['_count'] = node.get('_count', 0) + 1
219
  return node
220

    
221

    
222
def ReleaseNode(node):
223
  node['_count'] = node.get('_count', 0) - 1
224
# }}}
225

    
226
# {{{ Environment tests
227
def TestConfig():
228
  """Test configuration for sanity.
229

230
  """
231
  if len(cfg['nodes']) < 1:
232
    raise Error, ("Need at least one node")
233
  if len(cfg['instances']) < 1:
234
    raise Error, ("Need at least one instance")
235
  # TODO: Add more checks
236

    
237

    
238
def TestSshConnection():
239
  """Test SSH connection.
240

241
  """
242
  for node in cfg['nodes']:
243
    AssertEqual(StartSSH(node['primary'], 'exit').wait(), 0)
244

    
245

    
246
def TestGanetiCommands():
247
  """Test availibility of Ganeti commands.
248

249
  """
250
  cmds = ( ['gnt-cluster', '--version'],
251
           ['gnt-os', '--version'],
252
           ['gnt-node', '--version'],
253
           ['gnt-instance', '--version'],
254
           ['gnt-backup', '--version'],
255
           ['ganeti-noded', '--version'],
256
           ['ganeti-watcher', '--version'] )
257

    
258
  cmd = ' && '.join([utils.ShellQuoteArgs(i) for i in cmds])
259

    
260
  for node in cfg['nodes']:
261
    AssertEqual(StartSSH(node['primary'], cmd).wait(), 0)
262

    
263

    
264
def TestIcmpPing():
265
  """ICMP ping each node.
266

267
  """
268
  for node in cfg['nodes']:
269
    check = []
270
    for i in cfg['nodes']:
271
      check.append(i['primary'])
272
      if i.has_key('secondary'):
273
        check.append(i['secondary'])
274

    
275
    ping = lambda ip: utils.ShellQuoteArgs(['ping', '-w', '3', '-c', '1', ip])
276
    cmd = ' && '.join([ping(i) for i in check])
277

    
278
    AssertEqual(StartSSH(node['primary'], cmd).wait(), 0)
279
# }}}
280

    
281
# {{{ Cluster tests
282
def TestClusterInit():
283
  """gnt-cluster init"""
284
  master = GetMasterNode()
285

    
286
  cmd = ['gnt-cluster', 'init']
287
  if master.get('secondary', None):
288
    cmd.append('--secondary-ip=%s' % master['secondary'])
289
  if cfg.get('bridge', None):
290
    cmd.append('--bridge=%s' % cfg['bridge'])
291
    cmd.append('--master-netdev=%s' % cfg['bridge'])
292
  cmd.append(cfg['name'])
293

    
294
  AssertEqual(StartSSH(master['primary'],
295
                       utils.ShellQuoteArgs(cmd)).wait(), 0)
296

    
297

    
298
def TestClusterVerify():
299
  """gnt-cluster verify"""
300
  cmd = ['gnt-cluster', 'verify']
301
  AssertEqual(StartSSH(GetMasterNode()['primary'],
302
                       utils.ShellQuoteArgs(cmd)).wait(), 0)
303

    
304

    
305
def TestClusterInfo():
306
  """gnt-cluster info"""
307
  cmd = ['gnt-cluster', 'info']
308
  AssertEqual(StartSSH(GetMasterNode()['primary'],
309
                       utils.ShellQuoteArgs(cmd)).wait(), 0)
310

    
311

    
312
def TestClusterBurnin():
313
  """Burnin"""
314
  master = GetMasterNode()
315

    
316
  # Get as many instances as we need
317
  instances = []
318
  try:
319
    for _ in xrange(0, cfg.get('options', {}).get('burnin-instances', 1)):
320
      instances.append(AcquireInstance())
321
  except OutOfInstancesError:
322
    print "Not enough instances, continuing anyway."
323

    
324
  if len(instances) < 1:
325
    raise Error, ("Burnin needs at least one instance")
326

    
327
  # Run burnin
328
  try:
329
    script = UploadFile(master['primary'], '../tools/burnin')
330
    try:
331
      cmd = [script, '--os=%s' % cfg['os']]
332
      cmd += [inst['name'] for inst in instances]
333
      AssertEqual(StartSSH(master['primary'],
334
                           utils.ShellQuoteArgs(cmd)).wait(), 0)
335
    finally:
336
      cmd = ['rm', '-f', script]
337
      AssertEqual(StartSSH(master['primary'],
338
                           utils.ShellQuoteArgs(cmd)).wait(), 0)
339
  finally:
340
    for inst in instances:
341
      ReleaseInstance(inst)
342

    
343

    
344
def TestClusterMasterFailover():
345
  """gnt-cluster masterfailover"""
346
  master = GetMasterNode()
347

    
348
  failovermaster = AcquireNode(exclude=master)
349
  try:
350
    cmd = ['gnt-cluster', 'masterfailover']
351
    AssertEqual(StartSSH(failovermaster['primary'],
352
                         utils.ShellQuoteArgs(cmd)).wait(), 0)
353

    
354
    cmd = ['gnt-cluster', 'masterfailover']
355
    AssertEqual(StartSSH(master['primary'],
356
                         utils.ShellQuoteArgs(cmd)).wait(), 0)
357
  finally:
358
    ReleaseNode(failovermaster)
359

    
360

    
361
def TestClusterDestroy():
362
  """gnt-cluster destroy"""
363
  cmd = ['gnt-cluster', 'destroy', '--yes-do-it']
364
  AssertEqual(StartSSH(GetMasterNode()['primary'],
365
                       utils.ShellQuoteArgs(cmd)).wait(), 0)
366
# }}}
367

    
368
# {{{ Node tests
369
def _NodeAdd(node):
370
  if node.get('_added', False):
371
    raise Error, ("Node %s already in cluster" % node['primary'])
372

    
373
  cmd = ['gnt-node', 'add']
374
  if node.get('secondary', None):
375
    cmd.append('--secondary-ip=%s' % node['secondary'])
376
  cmd.append(node['primary'])
377
  AssertEqual(StartSSH(GetMasterNode()['primary'],
378
                       utils.ShellQuoteArgs(cmd)).wait(), 0)
379

    
380
  node['_added'] = True
381

    
382

    
383
def TestNodeAddAll():
384
  """Adding all nodes to cluster."""
385
  master = GetMasterNode()
386
  for node in cfg['nodes']:
387
    if node != master:
388
      _NodeAdd(node)
389

    
390

    
391
def _NodeRemove(node):
392
  cmd = ['gnt-node', 'remove', node['primary']]
393
  AssertEqual(StartSSH(GetMasterNode()['primary'],
394
                       utils.ShellQuoteArgs(cmd)).wait(), 0)
395
  node['_added'] = False
396

    
397

    
398
def TestNodeRemoveAll():
399
  """Removing all nodes from cluster."""
400
  master = GetMasterNode()
401
  for node in cfg['nodes']:
402
    if node != master:
403
      _NodeRemove(node)
404

    
405

    
406
def TestNodeInfo():
407
  """gnt-node info"""
408
  cmd = ['gnt-node', 'info']
409
  AssertEqual(StartSSH(GetMasterNode()['primary'],
410
                       utils.ShellQuoteArgs(cmd)).wait(), 0)
411
# }}}
412

    
413
# {{{ Instance tests
414
def _DiskTest(node, instance, args):
415
  cmd = ['gnt-instance', 'add',
416
         '--os-type=%s' % cfg['os'],
417
         '--os-size=%s' % cfg['os-size'],
418
         '--swap-size=%s' % cfg['swap-size'],
419
         '--memory=%s' % cfg['mem'],
420
         '--node=%s' % node['primary']]
421
  if args:
422
    cmd += args
423
  cmd.append(instance['name'])
424

    
425
  AssertEqual(StartSSH(GetMasterNode()['primary'],
426
                       utils.ShellQuoteArgs(cmd)).wait(), 0)
427
  return instance
428

    
429

    
430
def TestInstanceAddWithPlainDisk(node):
431
  """gnt-instance add -t plain"""
432
  return _DiskTest(node, AcquireInstance(), ['--disk-template=plain'])
433

    
434

    
435
def TestInstanceAddWithLocalMirrorDisk(node):
436
  """gnt-instance add -t local_raid1"""
437
  return _DiskTest(node, AcquireInstance(), ['--disk-template=local_raid1'])
438

    
439

    
440
def TestInstanceAddWithRemoteRaidDisk(node, node2):
441
  """gnt-instance add -t remote_raid1"""
442
  return _DiskTest(node, AcquireInstance(),
443
                   ['--disk-template=remote_raid1',
444
                    '--secondary-node=%s' % node2['primary']])
445

    
446

    
447
def TestInstanceRemove(instance):
448
  """gnt-instance remove"""
449
  cmd = ['gnt-instance', 'remove', '-f', instance['name']]
450
  AssertEqual(StartSSH(GetMasterNode()['primary'],
451
                       utils.ShellQuoteArgs(cmd)).wait(), 0)
452

    
453
  ReleaseInstance(instance)
454

    
455

    
456
def TestInstanceStartup(instance):
457
  """gnt-instance startup"""
458
  cmd = ['gnt-instance', 'startup', instance['name']]
459
  AssertEqual(StartSSH(GetMasterNode()['primary'],
460
                       utils.ShellQuoteArgs(cmd)).wait(), 0)
461

    
462

    
463
def TestInstanceShutdown(instance):
464
  """gnt-instance shutdown"""
465
  cmd = ['gnt-instance', 'shutdown', instance['name']]
466
  AssertEqual(StartSSH(GetMasterNode()['primary'],
467
                       utils.ShellQuoteArgs(cmd)).wait(), 0)
468

    
469

    
470
def TestInstanceFailover(instance):
471
  """gnt-instance failover"""
472
  cmd = ['gnt-instance', 'failover', '--force', instance['name']]
473
  AssertEqual(StartSSH(GetMasterNode()['primary'],
474
                       utils.ShellQuoteArgs(cmd)).wait(), 0)
475

    
476

    
477
def TestInstanceInfo(instance):
478
  """gnt-instance info"""
479
  cmd = ['gnt-instance', 'info', instance['name']]
480
  AssertEqual(StartSSH(GetMasterNode()['primary'],
481
                       utils.ShellQuoteArgs(cmd)).wait(), 0)
482
# }}}
483

    
484
# {{{ Daemon tests
485
def _ResolveInstanceName(instance):
486
  """Gets the full Xen name of an instance.
487

488
  """
489
  master = GetMasterNode()
490

    
491
  info_cmd = utils.ShellQuoteArgs(['gnt-instance', 'info', instance['name']])
492
  sed_cmd = utils.ShellQuoteArgs(['sed', '-n', '-e', 's/^Instance name: *//p'])
493

    
494
  cmd = '%s | %s' % (info_cmd, sed_cmd)
495
  p = subprocess.Popen(GetSSHCommand(master['primary'], cmd), shell=False,
496
                       stdout=subprocess.PIPE)
497
  AssertEqual(p.wait(), 0)
498

    
499
  return p.stdout.read().strip()
500

    
501

    
502
def _InstanceRunning(node, name):
503
  """Checks whether an instance is running.
504

505
  Args:
506
    node: Node the instance runs on
507
    name: Full name of Xen instance
508
  """
509
  cmd = utils.ShellQuoteArgs(['xm', 'list', name]) + ' >/dev/null'
510
  ret = StartSSH(node['primary'], cmd).wait()
511
  return ret == 0
512

    
513

    
514
def _XmShutdownInstance(node, name):
515
  """Shuts down instance using "xm" and waits for completion.
516

517
  Args:
518
    node: Node the instance runs on
519
    name: Full name of Xen instance
520
  """
521
  cmd = ['xm', 'shutdown', name]
522
  AssertEqual(StartSSH(GetMasterNode()['primary'],
523
                       utils.ShellQuoteArgs(cmd)).wait(), 0)
524

    
525
  # Wait up to a minute
526
  end = time.time() + 60
527
  while time.time() <= end:
528
    if not _InstanceRunning(node, name):
529
      break
530
    time.sleep(5)
531
  else:
532
    raise Error, ("xm shutdown failed")
533

    
534

    
535
def _ResetWatcherDaemon(node):
536
  """Removes the watcher daemon's state file.
537

538
  Args:
539
    node: Node to be reset
540
  """
541
  cmd = ['rm', '-f', constants.WATCHER_STATEFILE]
542
  AssertEqual(StartSSH(node['primary'],
543
                       utils.ShellQuoteArgs(cmd)).wait(), 0)
544

    
545

    
546
def TestInstanceAutomaticRestart(node, instance):
547
  """Test automatic restart of instance by ganeti-watcher.
548

549
  Note: takes up to 6 minutes to complete.
550
  """
551
  master = GetMasterNode()
552
  inst_name = _ResolveInstanceName(instance)
553

    
554
  _ResetWatcherDaemon(node)
555
  _XmShutdownInstance(node, inst_name)
556

    
557
  # Give it a bit more than five minutes to start again
558
  restart_at = time.time() + 330
559

    
560
  # Wait until it's running again
561
  while time.time() <= restart_at:
562
    if _InstanceRunning(node, inst_name):
563
      break
564
    time.sleep(15)
565
  else:
566
    raise Error, ("Daemon didn't restart instance in time")
567

    
568
  cmd = ['gnt-instance', 'info', inst_name]
569
  AssertEqual(StartSSH(master['primary'],
570
                       utils.ShellQuoteArgs(cmd)).wait(), 0)
571

    
572

    
573
def TestInstanceConsecutiveFailures(node, instance):
574
  """Test five consecutive instance failures.
575

576
  Note: takes at least 35 minutes to complete.
577
  """
578
  master = GetMasterNode()
579
  inst_name = _ResolveInstanceName(instance)
580

    
581
  _ResetWatcherDaemon(node)
582
  _XmShutdownInstance(node, inst_name)
583

    
584
  # Do shutdowns for 30 minutes
585
  finished_at = time.time() + (35 * 60)
586

    
587
  while time.time() <= finished_at:
588
    if _InstanceRunning(node, inst_name):
589
      _XmShutdownInstance(node, inst_name)
590
    time.sleep(30)
591

    
592
  # Check for some time whether the instance doesn't start again
593
  check_until = time.time() + 330
594
  while time.time() <= check_until:
595
    if _InstanceRunning(node, inst_name):
596
      raise Error, ("Instance started when it shouldn't")
597
    time.sleep(30)
598

    
599
  cmd = ['gnt-instance', 'info', inst_name]
600
  AssertEqual(StartSSH(master['primary'],
601
                       utils.ShellQuoteArgs(cmd)).wait(), 0)
602
# }}}
603

    
604
# {{{ Main program
605
if __name__ == '__main__':
606
  # {{{ Option parsing
607
  parser = OptionParser(usage="%prog [options] <configfile>")
608
  parser.add_option('--cleanup', dest='cleanup',
609
      action="store_true",
610
      help="Clean up cluster after testing?")
611
  parser.add_option('--dry-run', dest='dry_run',
612
      action="store_true",
613
      help="Show what would be done")
614
  parser.add_option('--verbose', dest='verbose',
615
      action="store_true",
616
      help="Verbose output")
617
  parser.add_option('--yes-do-it', dest='yes_do_it',
618
      action="store_true",
619
      help="Really execute the tests")
620
  (options, args) = parser.parse_args()
621
  # }}}
622

    
623
  if len(args) == 1:
624
    config_file = args[0]
625
  else:
626
    raise SyntaxError, ("Exactly one configuration file is expected")
627

    
628
  if not options.yes_do_it:
629
    print ("Executing this script irreversibly destroys any Ganeti\n"
630
           "configuration on all nodes involved. If you really want\n"
631
           "to start testing, supply the --yes-do-it option.")
632
    sys.exit(1)
633

    
634
  f = open(config_file, 'r')
635
  try:
636
    cfg = yaml.load(f.read())
637
  finally:
638
    f.close()
639

    
640
  RunTest(TestConfig)
641

    
642
  if TestEnabled('env'):
643
    RunTest(TestSshConnection)
644
    RunTest(TestIcmpPing)
645
    RunTest(TestGanetiCommands)
646

    
647
  RunTest(TestClusterInit)
648

    
649
  if TestEnabled('cluster-verify'):
650
    RunTest(TestClusterVerify)
651

    
652
  if TestEnabled('cluster-info'):
653
    RunTest(TestClusterInfo)
654

    
655
  RunTest(TestNodeAddAll)
656

    
657
  if TestEnabled('node-info'):
658
    RunTest(TestNodeInfo)
659

    
660
  if TestEnabled('cluster-burnin'):
661
    RunTest(TestClusterBurnin)
662

    
663
  if TestEnabled('cluster-master-failover'):
664
    RunTest(TestClusterMasterFailover)
665

    
666
  node = AcquireNode()
667
  try:
668
    if TestEnabled('instance-add-plain-disk'):
669
      instance = RunTest(TestInstanceAddWithPlainDisk, node)
670
      RunTest(TestInstanceShutdown, instance)
671
      RunTest(TestInstanceStartup, instance)
672

    
673
      if TestEnabled('instance-info'):
674
        RunTest(TestInstanceInfo, instance)
675

    
676
      if TestEnabled('instance-automatic-restart'):
677
        RunTest(TestInstanceAutomaticRestart, node, instance)
678

    
679
      if TestEnabled('instance-consecutive-failures'):
680
        RunTest(TestInstanceConsecutiveFailures, node, instance)
681

    
682
      RunTest(TestInstanceRemove, instance)
683
      del instance
684

    
685
    if TestEnabled('instance-add-local-mirror-disk'):
686
      instance = RunTest(TestInstanceAddWithLocalMirrorDisk, node)
687
      RunTest(TestInstanceShutdown, instance)
688
      RunTest(TestInstanceStartup, instance)
689

    
690
      if TestEnabled('instance-info'):
691
        RunTest(TestInstanceInfo, instance)
692

    
693
      RunTest(TestInstanceRemove, instance)
694
      del instance
695

    
696
    if TestEnabled('instance-add-remote-raid-disk'):
697
      node2 = AcquireNode(exclude=node)
698
      try:
699
        instance = RunTest(TestInstanceAddWithRemoteRaidDisk, node, node2)
700
        RunTest(TestInstanceShutdown, instance)
701
        RunTest(TestInstanceStartup, instance)
702

    
703
        if TestEnabled('instance-info'):
704
          RunTest(TestInstanceInfo, instance)
705

    
706
        if TestEnabled('instance-failover'):
707
          RunTest(TestInstanceFailover, instance)
708

    
709
        RunTest(TestInstanceRemove, instance)
710
        del instance
711
      finally:
712
        ReleaseNode(node2)
713

    
714
  finally:
715
    ReleaseNode(node)
716

    
717
  RunTest(TestNodeRemoveAll)
718

    
719
  if TestEnabled('cluster-destroy'):
720
    RunTest(TestClusterDestroy)
721
# }}}
722

    
723
# vim: foldmethod=marker :