Rename OpActivateInstanceDisks and LUActivateInstanceDisks
[ganeti-local] / tools / burnin
index a1dfc91..3adbe83 100755 (executable)
@@ -1,7 +1,7 @@
 #!/usr/bin/python
 #
 
-# Copyright (C) 2006, 2007 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -36,6 +36,8 @@ from ganeti import constants
 from ganeti import cli
 from ganeti import errors
 from ganeti import utils
+from ganeti import hypervisor
+from ganeti import compat
 
 from ganeti.confd import client as confd_client
 
@@ -111,6 +113,8 @@ OPTIONS = [
                  help="OS to use during burnin",
                  metavar="<OS>",
                  completion_suggest=cli.OPT_COMPL_ONE_OS),
+  cli.HYPERVISOR_OPT,
+  cli.OSPARAMS_OPT,
   cli.cli_option("--disk-size", dest="disk_size",
                  help="Disk size (determines disk count)",
                  default="128m", type="string", metavar="<size,size,...>",
@@ -154,6 +158,8 @@ OPTIONS = [
   cli.cli_option("--no-reboot", dest="do_reboot",
                  help="Skip instance reboot", action="store_false",
                  default=True),
+  cli.cli_option("--reboot-types", dest="reboot_types",
+                 help="Specify the reboot types", default=None),
   cli.cli_option("--no-activate-disks", dest="do_activate_disks",
                  help="Skip disk activation/deactivation",
                  action="store_false", default=True),
@@ -369,7 +375,7 @@ class Burner(object):
 
   def CommitQueue(self):
     """Execute all submitted opcodes in case of parallel burnin"""
-    if not self.opts.parallel:
+    if not self.opts.parallel or not self.queued_ops:
       return
 
     if self.queue_retry:
@@ -473,7 +479,19 @@ class Burner(object):
       constants.BE_MEMORY: options.mem_size,
       constants.BE_VCPUS: 1,
       }
+
+    self.hypervisor = None
     self.hvp = {}
+    if options.hypervisor:
+      self.hypervisor, self.hvp = options.hypervisor
+
+    if options.reboot_types is None:
+      options.reboot_types = constants.REBOOT_TYPES
+    else:
+      options.reboot_types = options.reboot_types.split(",")
+      rt_diff = set(options.reboot_types).difference(constants.REBOOT_TYPES)
+      if rt_diff:
+        Err("Invalid reboot types specified: %s" % utils.CommaJoin(rt_diff))
 
     socket.setdefaulttimeout(options.net_timeout)
 
@@ -492,16 +510,18 @@ class Burner(object):
       Err(msg, exit_code=err_code)
     self.nodes = [data[0] for data in result if not (data[1] or data[2])]
 
-    op_diagnose = opcodes.OpDiagnoseOS(output_fields=["name", "valid",
-                                                      "variants"], names=[])
+    op_diagnose = opcodes.OpDiagnoseOS(output_fields=["name",
+                                                      "variants",
+                                                      "hidden"],
+                                       names=[])
     result = self.ExecOp(True, op_diagnose)
 
     if not result:
       Err("Can't get the OS list")
 
     found = False
-    for (name, valid, variants) in result:
-      if valid and self.opts.os in cli.CalculateOSNames(name, variants):
+    for (name, variants, _) in result:
+      if self.opts.os in cli.CalculateOSNames(name, variants):
         found = True
         break
 
@@ -515,6 +535,9 @@ class Burner(object):
 
     default_nic_params = self.cluster_info["nicparams"][constants.PP_DEFAULT]
     self.cluster_default_nicparams = default_nic_params
+    if self.hypervisor is None:
+      self.hypervisor = self.cluster_info["default_hypervisor"]
+    self.hv_class = hypervisor.GetHypervisorClass(self.hypervisor)
 
   @_DoCheckInstances
   @_DoBatch(False)
@@ -559,6 +582,8 @@ class Burner(object):
                                     iallocator=self.opts.iallocator,
                                     beparams=self.bep,
                                     hvparams=self.hvp,
+                                    hypervisor=self.hypervisor,
+                                    osparams=self.opts.osparams,
                                     )
       remove_instance = lambda name: lambda: self.to_rem.append(name)
       self.ExecOrQueue(instance, [op], post_process=remove_instance(instance))
@@ -646,10 +671,10 @@ class Burner(object):
     Log("Migrating instances")
     for instance in self.instances:
       Log("instance %s", instance, indent=1)
-      op1 = opcodes.OpMigrateInstance(instance_name=instance, live=True,
+      op1 = opcodes.OpMigrateInstance(instance_name=instance, mode=None,
                                       cleanup=False)
 
-      op2 = opcodes.OpMigrateInstance(instance_name=instance, live=True,
+      op2 = opcodes.OpMigrateInstance(instance_name=instance, mode=None,
                                       cleanup=True)
       Log("migration and migration cleanup", indent=2)
       self.ExecOrQueue(instance, [op1, op2])
@@ -686,9 +711,10 @@ class Burner(object):
         import_log_msg = ("import from %s to %s, %s" %
                           (enode, pnode, snode))
 
-      exp_op = opcodes.OpExportInstance(instance_name=instance,
-                                           target_node=enode,
-                                           shutdown=True)
+      exp_op = opcodes.OpBackupExport(instance_name=instance,
+                                      target_node=enode,
+                                      mode=constants.EXPORT_MODE_LOCAL,
+                                      shutdown=True)
       rem_op = opcodes.OpRemoveInstance(instance_name=instance,
                                         ignore_failures=True)
       imp_dir = utils.PathJoin(constants.EXPORT_DIR, full_name)
@@ -711,9 +737,10 @@ class Burner(object):
                                         iallocator=self.opts.iallocator,
                                         beparams=self.bep,
                                         hvparams=self.hvp,
+                                        osparams=self.opts.osparams,
                                         )
 
-      erem_op = opcodes.OpRemoveExport(instance_name=instance)
+      erem_op = opcodes.OpBackupRemove(instance_name=instance)
 
       Log("export to node %s", enode, indent=2)
       Log("remove instance", indent=2)
@@ -804,7 +831,7 @@ class Burner(object):
     for instance in self.instances:
       Log("instance %s", instance, indent=1)
       ops = []
-      for reboot_type in constants.REBOOT_TYPES:
+      for reboot_type in self.opts.reboot_types:
         op = opcodes.OpRebootInstance(instance_name=instance,
                                       reboot_type=reboot_type,
                                       ignore_secondaries=False)
@@ -820,7 +847,7 @@ class Burner(object):
     for instance in self.instances:
       Log("instance %s", instance, indent=1)
       op_start = self.StartInstanceOp(instance)
-      op_act = opcodes.OpActivateInstanceDisks(instance_name=instance)
+      op_act = opcodes.OpInstanceActivateDisks(instance_name=instance)
       op_deact = opcodes.OpDeactivateInstanceDisks(instance_name=instance)
       op_stop = self.StopInstanceOp(instance)
       Log("activate disks when online", indent=2)
@@ -972,14 +999,20 @@ class Burner(object):
         self.BurnReplaceDisks2()
 
       if (opts.disk_template in constants.DTS_GROWABLE and
-          utils.any(self.disk_growth, lambda n: n > 0)):
+          compat.any(n > 0 for n in self.disk_growth)):
         self.BurnGrowDisks()
 
       if opts.do_failover and opts.disk_template in constants.DTS_NET_MIRROR:
         self.BurnFailover()
 
-      if opts.do_migrate and opts.disk_template == constants.DT_DRBD8:
-        self.BurnMigrate()
+      if opts.do_migrate:
+        if opts.disk_template != constants.DT_DRBD8:
+          Log("Skipping migration (disk template not DRBD8)")
+        elif not self.hv_class.CAN_MIGRATE:
+          Log("Skipping migration (hypervisor %s does not support it)",
+              self.hypervisor)
+        else:
+          self.BurnMigrate()
 
       if (opts.do_move and len(self.nodes) > 1 and
           opts.disk_template in [constants.DT_PLAIN, constants.DT_FILE]):