#!/usr/bin/python
#
-# Copyright (C) 2006, 2007 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
from ganeti import cli
from ganeti import errors
from ganeti import utils
+from ganeti import hypervisor
+from ganeti import compat
from ganeti.confd import client as confd_client
help="OS to use during burnin",
metavar="<OS>",
completion_suggest=cli.OPT_COMPL_ONE_OS),
+ cli.HYPERVISOR_OPT,
+ cli.OSPARAMS_OPT,
cli.cli_option("--disk-size", dest="disk_size",
help="Disk size (determines disk count)",
default="128m", type="string", metavar="<size,size,...>",
cli.cli_option("--no-reboot", dest="do_reboot",
help="Skip instance reboot", action="store_false",
default=True),
+ cli.cli_option("--reboot-types", dest="reboot_types",
+ help="Specify the reboot types", default=None),
cli.cli_option("--no-activate-disks", dest="do_activate_disks",
help="Skip disk activation/deactivation",
action="store_false", default=True),
def _ExecOp(self, *ops):
"""Execute one or more opcodes and manage the exec buffer.
- @result: if only opcode has been passed, we return its result;
+ @return: if only opcode has been passed, we return its result;
otherwise we return the list of results
"""
def ExecOp(self, retry, *ops):
"""Execute one or more opcodes and manage the exec buffer.
- @result: if only opcode has been passed, we return its result;
+ @return: if only opcode has been passed, we return its result;
otherwise we return the list of results
"""
def CommitQueue(self):
"""Execute all submitted opcodes in case of parallel burnin"""
- if not self.opts.parallel:
+ if not self.opts.parallel or not self.queued_ops:
return
if self.queue_retry:
constants.BE_MEMORY: options.mem_size,
constants.BE_VCPUS: 1,
}
+
+ self.hypervisor = None
self.hvp = {}
+ if options.hypervisor:
+ self.hypervisor, self.hvp = options.hypervisor
+
+ if options.reboot_types is None:
+ options.reboot_types = constants.REBOOT_TYPES
+ else:
+ options.reboot_types = options.reboot_types.split(",")
+ rt_diff = set(options.reboot_types).difference(constants.REBOOT_TYPES)
+ if rt_diff:
+ Err("Invalid reboot types specified: %s" % utils.CommaJoin(rt_diff))
socket.setdefaulttimeout(options.net_timeout)
else:
names = []
try:
- op = opcodes.OpQueryNodes(output_fields=["name", "offline", "drained"],
- names=names, use_locking=True)
+ op = opcodes.OpNodeQuery(output_fields=["name", "offline", "drained"],
+ names=names, use_locking=True)
result = self.ExecOp(True, op)
except errors.GenericError, err:
err_code, msg = cli.FormatError(err)
Err(msg, exit_code=err_code)
self.nodes = [data[0] for data in result if not (data[1] or data[2])]
- op_diagnose = opcodes.OpDiagnoseOS(output_fields=["name", "valid",
- "variants"], names=[])
+ op_diagnose = opcodes.OpOsDiagnose(output_fields=["name",
+ "variants",
+ "hidden"],
+ names=[])
result = self.ExecOp(True, op_diagnose)
if not result:
Err("Can't get the OS list")
found = False
- for (name, valid, variants) in result:
- if valid and self.opts.os in cli.CalculateOSNames(name, variants):
+ for (name, variants, _) in result:
+ if self.opts.os in cli.CalculateOSNames(name, variants):
found = True
break
default_nic_params = self.cluster_info["nicparams"][constants.PP_DEFAULT]
self.cluster_default_nicparams = default_nic_params
+ if self.hypervisor is None:
+ self.hypervisor = self.cluster_info["default_hypervisor"]
+ self.hv_class = hypervisor.GetHypervisorClass(self.hypervisor)
@_DoCheckInstances
@_DoBatch(False)
Log(msg, indent=2)
- op = opcodes.OpCreateInstance(instance_name=instance,
+ op = opcodes.OpInstanceCreate(instance_name=instance,
disks = [ {"size": size}
for size in self.disk_size],
disk_template=self.opts.disk_template,
iallocator=self.opts.iallocator,
beparams=self.bep,
hvparams=self.hvp,
+ hypervisor=self.hypervisor,
+ osparams=self.opts.osparams,
)
-
- self.ExecOrQueue(instance, [op])
- self.to_rem.append(instance)
+ remove_instance = lambda name: lambda: self.to_rem.append(name)
+ self.ExecOrQueue(instance, [op], post_process=remove_instance(instance))
@_DoBatch(False)
def BurnGrowDisks(self):
Log("instance %s", instance, indent=1)
for idx, growth in enumerate(self.disk_growth):
if growth > 0:
- op = opcodes.OpGrowDisk(instance_name=instance, disk=idx,
- amount=growth, wait_for_sync=True)
+ op = opcodes.OpInstanceGrowDisk(instance_name=instance, disk=idx,
+ amount=growth, wait_for_sync=True)
Log("increase disk/%s by %s MB", idx, growth, indent=2)
self.ExecOrQueue(instance, [op])
def BurnReplaceDisks1D8(self):
"""Replace disks on primary and secondary for drbd8."""
Log("Replacing disks on the same nodes")
+ early_release = self.opts.early_release
for instance in self.instances:
Log("instance %s", instance, indent=1)
ops = []
for mode in constants.REPLACE_DISK_SEC, constants.REPLACE_DISK_PRI:
- op = opcodes.OpReplaceDisks(instance_name=instance,
- mode=mode,
- disks=[i for i in range(self.disk_count)],
- early_release=self.opts.early_release)
+ op = opcodes.OpInstanceReplaceDisks(instance_name=instance,
+ mode=mode,
+ disks=list(range(self.disk_count)),
+ early_release=early_release)
Log("run %s", mode, indent=2)
ops.append(op)
self.ExecOrQueue(instance, ops)
msg = "with iallocator %s" % self.opts.iallocator
else:
msg = tnode
- op = opcodes.OpReplaceDisks(instance_name=instance,
- mode=mode,
- remote_node=tnode,
- iallocator=self.opts.iallocator,
- disks=[],
- early_release=self.opts.early_release)
+ op = opcodes.OpInstanceReplaceDisks(instance_name=instance,
+ mode=mode,
+ remote_node=tnode,
+ iallocator=self.opts.iallocator,
+ disks=[],
+ early_release=self.opts.early_release)
Log("run %s %s", mode, msg, indent=2)
self.ExecOrQueue(instance, [op])
Log("Failing over instances")
for instance in self.instances:
Log("instance %s", instance, indent=1)
- op = opcodes.OpFailoverInstance(instance_name=instance,
+ op = opcodes.OpInstanceFailover(instance_name=instance,
ignore_consistency=False)
self.ExecOrQueue(instance, [op])
self.instances)
for tnode, instance in mytor:
Log("instance %s", instance, indent=1)
- op = opcodes.OpMoveInstance(instance_name=instance,
+ op = opcodes.OpInstanceMove(instance_name=instance,
target_node=tnode)
self.ExecOrQueue(instance, [op])
Log("Migrating instances")
for instance in self.instances:
Log("instance %s", instance, indent=1)
- op1 = opcodes.OpMigrateInstance(instance_name=instance, live=True,
+ op1 = opcodes.OpInstanceMigrate(instance_name=instance, mode=None,
cleanup=False)
- op2 = opcodes.OpMigrateInstance(instance_name=instance, live=True,
+ op2 = opcodes.OpInstanceMigrate(instance_name=instance, mode=None,
cleanup=True)
Log("migration and migration cleanup", indent=2)
self.ExecOrQueue(instance, [op1, op2])
for pnode, snode, enode, instance in mytor:
Log("instance %s", instance, indent=1)
# read the full name of the instance
- nam_op = opcodes.OpQueryInstances(output_fields=["name"],
- names=[instance], use_locking=True)
+ nam_op = opcodes.OpInstanceQuery(output_fields=["name"],
+ names=[instance], use_locking=True)
full_name = self.ExecOp(False, nam_op)[0][0]
if self.opts.iallocator:
import_log_msg = ("import from %s to %s, %s" %
(enode, pnode, snode))
- exp_op = opcodes.OpExportInstance(instance_name=instance,
- target_node=enode,
- shutdown=True)
- rem_op = opcodes.OpRemoveInstance(instance_name=instance,
+ exp_op = opcodes.OpBackupExport(instance_name=instance,
+ target_node=enode,
+ mode=constants.EXPORT_MODE_LOCAL,
+ shutdown=True)
+ rem_op = opcodes.OpInstanceRemove(instance_name=instance,
ignore_failures=True)
imp_dir = utils.PathJoin(constants.EXPORT_DIR, full_name)
- imp_op = opcodes.OpCreateInstance(instance_name=instance,
+ imp_op = opcodes.OpInstanceCreate(instance_name=instance,
disks = [ {"size": size}
for size in self.disk_size],
disk_template=self.opts.disk_template,
iallocator=self.opts.iallocator,
beparams=self.bep,
hvparams=self.hvp,
+ osparams=self.opts.osparams,
)
- erem_op = opcodes.OpRemoveExport(instance_name=instance)
+ erem_op = opcodes.OpBackupRemove(instance_name=instance)
Log("export to node %s", enode, indent=2)
Log("remove instance", indent=2)
@staticmethod
def StopInstanceOp(instance):
"""Stop given instance."""
- return opcodes.OpShutdownInstance(instance_name=instance)
+ return opcodes.OpInstanceShutdown(instance_name=instance)
@staticmethod
def StartInstanceOp(instance):
"""Start given instance."""
- return opcodes.OpStartupInstance(instance_name=instance, force=False)
+ return opcodes.OpInstanceStartup(instance_name=instance, force=False)
@staticmethod
def RenameInstanceOp(instance, instance_new):
"""Rename instance."""
- return opcodes.OpRenameInstance(instance_name=instance,
+ return opcodes.OpInstanceRename(instance_name=instance,
new_name=instance_new)
@_DoCheckInstances
Log("Removing instances")
for instance in self.to_rem:
Log("instance %s", instance, indent=1)
- op = opcodes.OpRemoveInstance(instance_name=instance,
+ op = opcodes.OpInstanceRemove(instance_name=instance,
ignore_failures=True)
self.ExecOrQueue(instance, [op])
for instance in self.instances:
Log("instance %s", instance, indent=1)
op1 = self.StopInstanceOp(instance)
- op2 = opcodes.OpReinstallInstance(instance_name=instance)
+ op2 = opcodes.OpInstanceReinstall(instance_name=instance)
Log("reinstall without passing the OS", indent=2)
- op3 = opcodes.OpReinstallInstance(instance_name=instance,
+ op3 = opcodes.OpInstanceReinstall(instance_name=instance,
os_type=self.opts.os)
Log("reinstall specifying the OS", indent=2)
op4 = self.StartInstanceOp(instance)
for instance in self.instances:
Log("instance %s", instance, indent=1)
ops = []
- for reboot_type in constants.REBOOT_TYPES:
- op = opcodes.OpRebootInstance(instance_name=instance,
+ for reboot_type in self.opts.reboot_types:
+ op = opcodes.OpInstanceReboot(instance_name=instance,
reboot_type=reboot_type,
ignore_secondaries=False)
Log("reboot with type '%s'", reboot_type, indent=2)
for instance in self.instances:
Log("instance %s", instance, indent=1)
op_start = self.StartInstanceOp(instance)
- op_act = opcodes.OpActivateInstanceDisks(instance_name=instance)
- op_deact = opcodes.OpDeactivateInstanceDisks(instance_name=instance)
+ op_act = opcodes.OpInstanceActivateDisks(instance_name=instance)
+ op_deact = opcodes.OpInstanceDeactivateDisks(instance_name=instance)
op_stop = self.StopInstanceOp(instance)
Log("activate disks when online", indent=2)
Log("activate disks when offline", indent=2)
Log("Adding and removing disks")
for instance in self.instances:
Log("instance %s", instance, indent=1)
- op_add = opcodes.OpSetInstanceParams(\
+ op_add = opcodes.OpInstanceSetParams(\
instance_name=instance,
disks=[(constants.DDM_ADD, {"size": self.disk_size[0]})])
- op_rem = opcodes.OpSetInstanceParams(\
+ op_rem = opcodes.OpInstanceSetParams(\
instance_name=instance, disks=[(constants.DDM_REMOVE, {})])
op_stop = self.StopInstanceOp(instance)
op_start = self.StartInstanceOp(instance)
Log("Adding and removing NICs")
for instance in self.instances:
Log("instance %s", instance, indent=1)
- op_add = opcodes.OpSetInstanceParams(\
+ op_add = opcodes.OpInstanceSetParams(\
instance_name=instance, nics=[(constants.DDM_ADD, {})])
- op_rem = opcodes.OpSetInstanceParams(\
+ op_rem = opcodes.OpInstanceSetParams(\
instance_name=instance, nics=[(constants.DDM_REMOVE, {})])
Log("adding a NIC", indent=2)
Log("removing last NIC", indent=2)
"""Run confd queries for our instances.
The following confd queries are tested:
- - CONFD_REQ_PING: simple ping
- - CONFD_REQ_CLUSTER_MASTER: cluster master
- - CONFD_REQ_NODE_ROLE_BYNAME: node role, for the master
+ - CONFD_REQ_PING: simple ping
+ - CONFD_REQ_CLUSTER_MASTER: cluster master
+ - CONFD_REQ_NODE_ROLE_BYNAME: node role, for the master
"""
Log("Checking confd results")
self.BurnReplaceDisks2()
if (opts.disk_template in constants.DTS_GROWABLE and
- utils.any(self.disk_growth, lambda n: n > 0)):
+ compat.any(n > 0 for n in self.disk_growth)):
self.BurnGrowDisks()
if opts.do_failover and opts.disk_template in constants.DTS_NET_MIRROR:
self.BurnFailover()
- if opts.do_migrate and opts.disk_template == constants.DT_DRBD8:
- self.BurnMigrate()
+ if opts.do_migrate:
+ if opts.disk_template != constants.DT_DRBD8:
+ Log("Skipping migration (disk template not DRBD8)")
+ elif not self.hv_class.CAN_MIGRATE:
+ Log("Skipping migration (hypervisor %s does not support it)",
+ self.hypervisor)
+ else:
+ self.BurnMigrate()
if (opts.do_move and len(self.nodes) > 1 and
opts.disk_template in [constants.DT_PLAIN, constants.DT_FILE]):