Update NEWS for 2.5
[ganeti-local] / tools / burnin
index 7722c02..7d28c36 100755 (executable)
@@ -126,6 +126,9 @@ OPTIONS = [
                  default=128, type="unit", metavar="<size>",
                  completion_suggest=("128M 256M 512M 1G 4G 8G"
                                      " 12G 16G").split()),
+  cli.cli_option("--vcpu-count", dest="vcpu_count", help="VCPU count",
+                 default=3, type="unit", metavar="<count>",
+                 completion_suggest=("1 2 3 4").split()),
   cli.DEBUG_OPT,
   cli.VERBOSE_OPT,
   cli.NOIPCHECK_OPT,
@@ -182,7 +185,8 @@ OPTIONS = [
   cli.cli_option("-t", "--disk-template", dest="disk_template",
                  choices=list(constants.DISK_TEMPLATES),
                  default=constants.DT_DRBD8,
-                 help="Disk template (diskless, file, plain or drbd) [drbd]"),
+                 help="Disk template (diskless, file, plain, sharedfile"
+                 " or drbd) [drbd]"),
   cli.cli_option("-n", "--nodes", dest="nodes", default="",
                  help=("Comma separated list of nodes to perform"
                        " the burnin on (defaults to all nodes)"),
@@ -258,7 +262,6 @@ class Burner(object):
 
   def __init__(self):
     """Constructor."""
-    utils.SetupLogging(constants.LOG_BURNIN, debug=False, stderr_logging=True)
     self.url_opener = SimpleOpener()
     self._feed_buf = StringIO()
     self.nodes = []
@@ -320,11 +323,6 @@ class Burner(object):
             msg, MAX_RETRIES - retry_count + 1, MAX_RETRIES, err)
         self.MaybeRetry(retry_count - 1, msg, fn, *args)
 
-  def _SetDebug(self, ops):
-    """Set the debug value on the given opcodes"""
-    for op in ops:
-      op.debug_level = self.opts.debug
-
   def _ExecOp(self, *ops):
     """Execute one or more opcodes and manage the exec buffer.
 
@@ -350,13 +348,13 @@ class Burner(object):
       rval = MAX_RETRIES
     else:
       rval = 0
-    self._SetDebug(ops)
+    cli.SetGenericOpcodeOpts(ops, self.opts)
     return self.MaybeRetry(rval, "opcode", self._ExecOp, *ops)
 
   def ExecOrQueue(self, name, ops, post_process=None):
     """Execute an opcode and manage the exec buffer."""
     if self.opts.parallel:
-      self._SetDebug(ops)
+      cli.SetGenericOpcodeOpts(ops, self.opts)
       self.queued_ops.append((ops, name, post_process))
     else:
       val = self.ExecOp(self.queue_retry, *ops) # pylint: disable-msg=W0142
@@ -445,6 +443,7 @@ class Burner(object):
 
     supported_disk_templates = (constants.DT_DISKLESS,
                                 constants.DT_FILE,
+                                constants.DT_SHARED_FILE,
                                 constants.DT_PLAIN,
                                 constants.DT_DRBD8)
     if options.disk_template not in supported_disk_templates:
@@ -477,7 +476,7 @@ class Burner(object):
     self.instances = args
     self.bep = {
       constants.BE_MEMORY: options.mem_size,
-      constants.BE_VCPUS: 1,
+      constants.BE_VCPUS: options.vcpu_count,
       }
 
     self.hypervisor = None
@@ -502,15 +501,15 @@ class Burner(object):
     else:
       names = []
     try:
-      op = opcodes.OpQueryNodes(output_fields=["name", "offline", "drained"],
-                                names=names, use_locking=True)
+      op = opcodes.OpNodeQuery(output_fields=["name", "offline", "drained"],
+                               names=names, use_locking=True)
       result = self.ExecOp(True, op)
     except errors.GenericError, err:
       err_code, msg = cli.FormatError(err)
       Err(msg, exit_code=err_code)
     self.nodes = [data[0] for data in result if not (data[1] or data[2])]
 
-    op_diagnose = opcodes.OpDiagnoseOS(output_fields=["name",
+    op_diagnose = opcodes.OpOsDiagnose(output_fields=["name",
                                                       "variants",
                                                       "hidden"],
                                        names=[])
@@ -556,7 +555,7 @@ class Burner(object):
       if self.opts.iallocator:
         pnode = snode = None
         msg = "with iallocator %s" % self.opts.iallocator
-      elif self.opts.disk_template not in constants.DTS_NET_MIRROR:
+      elif self.opts.disk_template not in constants.DTS_INT_MIRROR:
         snode = None
         msg = "on %s" % pnode
       else:
@@ -605,14 +604,15 @@ class Burner(object):
   def BurnReplaceDisks1D8(self):
     """Replace disks on primary and secondary for drbd8."""
     Log("Replacing disks on the same nodes")
+    early_release = self.opts.early_release
     for instance in self.instances:
       Log("instance %s", instance, indent=1)
       ops = []
       for mode in constants.REPLACE_DISK_SEC, constants.REPLACE_DISK_PRI:
-        op = opcodes.OpReplaceDisks(instance_name=instance,
-                                    mode=mode,
-                                    disks=[i for i in range(self.disk_count)],
-                                    early_release=self.opts.early_release)
+        op = opcodes.OpInstanceReplaceDisks(instance_name=instance,
+                                            mode=mode,
+                                            disks=list(range(self.disk_count)),
+                                            early_release=early_release)
         Log("run %s", mode, indent=2)
         ops.append(op)
       self.ExecOrQueue(instance, ops)
@@ -632,12 +632,12 @@ class Burner(object):
         msg = "with iallocator %s" % self.opts.iallocator
       else:
         msg = tnode
-      op = opcodes.OpReplaceDisks(instance_name=instance,
-                                  mode=mode,
-                                  remote_node=tnode,
-                                  iallocator=self.opts.iallocator,
-                                  disks=[],
-                                  early_release=self.opts.early_release)
+      op = opcodes.OpInstanceReplaceDisks(instance_name=instance,
+                                          mode=mode,
+                                          remote_node=tnode,
+                                          iallocator=self.opts.iallocator,
+                                          disks=[],
+                                          early_release=self.opts.early_release)
       Log("run %s %s", mode, msg, indent=2)
       self.ExecOrQueue(instance, [op])
 
@@ -703,7 +703,7 @@ class Burner(object):
         import_log_msg = ("import from %s"
                           " with iallocator %s" %
                           (enode, self.opts.iallocator))
-      elif self.opts.disk_template not in constants.DTS_NET_MIRROR:
+      elif self.opts.disk_template not in constants.DTS_INT_MIRROR:
         snode = None
         import_log_msg = ("import from %s to %s" %
                           (enode, pnode))
@@ -751,17 +751,17 @@ class Burner(object):
   @staticmethod
   def StopInstanceOp(instance):
     """Stop given instance."""
-    return opcodes.OpShutdownInstance(instance_name=instance)
+    return opcodes.OpInstanceShutdown(instance_name=instance)
 
   @staticmethod
   def StartInstanceOp(instance):
     """Start given instance."""
-    return opcodes.OpStartupInstance(instance_name=instance, force=False)
+    return opcodes.OpInstanceStartup(instance_name=instance, force=False)
 
   @staticmethod
   def RenameInstanceOp(instance, instance_new):
     """Rename instance."""
-    return opcodes.OpRenameInstance(instance_name=instance,
+    return opcodes.OpInstanceRename(instance_name=instance,
                                     new_name=instance_new)
 
   @_DoCheckInstances
@@ -862,10 +862,10 @@ class Burner(object):
     Log("Adding and removing disks")
     for instance in self.instances:
       Log("instance %s", instance, indent=1)
-      op_add = opcodes.OpSetInstanceParams(\
+      op_add = opcodes.OpInstanceSetParams(\
         instance_name=instance,
         disks=[(constants.DDM_ADD, {"size": self.disk_size[0]})])
-      op_rem = opcodes.OpSetInstanceParams(\
+      op_rem = opcodes.OpInstanceSetParams(\
         instance_name=instance, disks=[(constants.DDM_REMOVE, {})])
       op_stop = self.StopInstanceOp(instance)
       op_start = self.StartInstanceOp(instance)
@@ -879,9 +879,9 @@ class Burner(object):
     Log("Adding and removing NICs")
     for instance in self.instances:
       Log("instance %s", instance, indent=1)
-      op_add = opcodes.OpSetInstanceParams(\
+      op_add = opcodes.OpInstanceSetParams(\
         instance_name=instance, nics=[(constants.DDM_ADD, {})])
-      op_rem = opcodes.OpSetInstanceParams(\
+      op_rem = opcodes.OpInstanceSetParams(\
         instance_name=instance, nics=[(constants.DDM_REMOVE, {})])
       Log("adding a NIC", indent=2)
       Log("removing last NIC", indent=2)
@@ -985,29 +985,31 @@ class Burner(object):
 
     if (len(self.nodes) == 1 and
         opts.disk_template not in (constants.DT_DISKLESS, constants.DT_PLAIN,
-                                   constants.DT_FILE)):
+                                   constants.DT_FILE,
+                                   constants.DT_SHARED_FILE)):
       Err("When one node is available/selected the disk template must"
           " be 'diskless', 'file' or 'plain'")
 
     has_err = True
     try:
       self.BurnCreateInstances()
-      if opts.do_replace1 and opts.disk_template in constants.DTS_NET_MIRROR:
+      if opts.do_replace1 and opts.disk_template in constants.DTS_INT_MIRROR:
         self.BurnReplaceDisks1D8()
       if (opts.do_replace2 and len(self.nodes) > 2 and
-          opts.disk_template in constants.DTS_NET_MIRROR) :
+          opts.disk_template in constants.DTS_INT_MIRROR) :
         self.BurnReplaceDisks2()
 
       if (opts.disk_template in constants.DTS_GROWABLE and
           compat.any(n > 0 for n in self.disk_growth)):
         self.BurnGrowDisks()
 
-      if opts.do_failover and opts.disk_template in constants.DTS_NET_MIRROR:
+      if opts.do_failover and opts.disk_template in constants.DTS_MIRRORED:
         self.BurnFailover()
 
       if opts.do_migrate:
-        if opts.disk_template != constants.DT_DRBD8:
-          Log("Skipping migration (disk template not DRBD8)")
+        if opts.disk_template not in constants.DTS_MIRRORED:
+          Log("Skipping migration (disk template %s does not support it)",
+              opts.disk_template)
         elif not self.hv_class.CAN_MIGRATE:
           Log("Skipping migration (hypervisor %s does not support it)",
               self.hypervisor)
@@ -1020,6 +1022,7 @@ class Burner(object):
 
       if (opts.do_importexport and
           opts.disk_template not in (constants.DT_DISKLESS,
+                                     constants.DT_SHARED_FILE,
                                      constants.DT_FILE)):
         self.BurnImportExport()
 
@@ -1069,14 +1072,17 @@ class Burner(object):
           else: # non-expected error
             raise
 
-    return 0
+    return constants.EXIT_SUCCESS
 
 
 def main():
-  """Main function"""
+  """Main function.
+
+  """
+  utils.SetupLogging(constants.LOG_BURNIN, sys.argv[0],
+                     debug=False, stderr_logging=True)
 
-  burner = Burner()
-  return burner.BurninCluster()
+  return Burner().BurninCluster()
 
 
 if __name__ == "__main__":