"""
HPATH = "cluster-init"
HTYPE = constants.HTYPE_CLUSTER
- _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
+ _OP_REQP = ["cluster_name", "hypervisor_type", "mac_prefix",
"def_bridge", "master_netdev", "file_storage_dir"]
REQ_CLUSTER = False
secondary_ip)
self.secondary_ip = secondary_ip
- # checks presence of the volume group given
- vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
-
- if vgstatus:
- raise errors.OpPrereqError("Error: %s" % vgstatus)
+ if not hasattr(self.op, "vg_name"):
+ self.op.vg_name = None
+ # if vg_name not None, checks if volume group is valid
+ if self.op.vg_name:
+ vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
+ if vgstatus:
+ raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
+ " you are not using lvm" % vgstatus)
self.op.file_storage_dir = os.path.normpath(self.op.file_storage_dir)
# compares ganeti version
local_version = constants.PROTOCOL_VERSION
if not remote_version:
- feedback_fn(" - ERROR: connection to %s failed" % (node))
+ feedback_fn(" - ERROR: connection to %s failed" % (node))
return True
if local_version != remote_version:
" please restart manually.")
+def _RecursiveCheckIfLVMBased(disk):
+ """Check if the given disk or its children are lvm-based.
+
+ Args:
+ disk: ganeti.objects.Disk object
+
+ Returns:
+ boolean indicating whether a LD_LV dev_type was found or not
+
+ """
+ if disk.children:
+ for chdisk in disk.children:
+ if _RecursiveCheckIfLVMBased(chdisk):
+ return True
+ return disk.dev_type == constants.LD_LV
+
+
+class LUSetClusterParams(LogicalUnit):
+ """Change the parameters of the cluster.
+
+ """
+ HPATH = "cluster-modify"
+ HTYPE = constants.HTYPE_CLUSTER
+ _OP_REQP = []
+
+ def BuildHooksEnv(self):
+ """Build hooks env.
+
+ """
+ env = {
+ "OP_TARGET": self.sstore.GetClusterName(),
+ "NEW_VG_NAME": self.op.vg_name,
+ }
+ mn = self.sstore.GetMasterNode()
+ return env, [mn], [mn]
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ This checks whether the given params don't conflict and
+ if the given volume group is valid.
+
+ """
+ if not self.op.vg_name:
+ instances = [self.cfg.GetInstanceInfo(name)
+ for name in self.cfg.GetInstanceList()]
+ for inst in instances:
+ for disk in inst.disks:
+ if _RecursiveCheckIfLVMBased(disk):
+ raise errors.OpPrereqError("Cannot disable lvm storage while"
+ " lvm-based instances exist")
+
+ # if vg_name not None, checks given volume group on all nodes
+ if self.op.vg_name:
+ node_list = self.cfg.GetNodeList()
+ vglist = rpc.call_vg_list(node_list)
+ for node in node_list:
+ vgstatus = _HasValidVG(vglist[node], self.op.vg_name)
+ if vgstatus:
+ raise errors.OpPrereqError("Error on node '%s': %s" %
+ (node, vgstatus))
+
+ def Exec(self, feedback_fn):
+ """Change the parameters of the cluster.
+
+ """
+ if self.op.vg_name != self.cfg.GetVGName():
+ self.cfg.SetVGName(self.op.vg_name)
+ else:
+ feedback_fn("Cluster LVM configuration already in desired"
+ " state, not changing")
+
+
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
"""Sleep and poll for an instance's disk to sync.
if on_primary or dev.AssembleOnSecondary():
rstats = rpc.call_blockdev_find(node, dev)
if not rstats:
- logger.ToStderr("Can't get any data from node %s" % node)
+ logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
result = False
else:
result = result and (not rstats[idx])
"""Run a command on some nodes.
"""
+ # put the master at the end of the nodes list
+ master_node = self.sstore.GetMasterNode()
+ if master_node in self.nodes:
+ self.nodes.remove(master_node)
+ self.nodes.append(master_node)
+
data = []
for node in self.nodes:
result = self.ssh.Run(node, "root", self.op.command)
force = self.op.force
extra_args = getattr(self.op, "extra_args", "")
+ self.cfg.MarkInstanceUp(instance.name)
+
node_current = instance.primary_node
_StartInstanceDisks(self.cfg, instance, force)
_ShutdownInstanceDisks(instance, self.cfg)
raise errors.OpExecError("Could not start instance")
- self.cfg.MarkInstanceUp(instance.name)
-
class LURebootInstance(LogicalUnit):
"""Reboot an instance.
"""
instance = self.instance
node_current = instance.primary_node
+ self.cfg.MarkInstanceDown(instance.name)
if not rpc.call_instance_shutdown(node_current, instance):
logger.Error("could not shutdown instance")
- self.cfg.MarkInstanceDown(instance.name)
_ShutdownInstanceDisks(instance, self.cfg)
inst = self.instance
old_name = inst.name
+ if inst.disk_template == constants.DT_FILE:
+ old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
+
self.cfg.RenameInstance(inst.name, self.op.new_name)
# re-read the instance from the configuration after rename
inst = self.cfg.GetInstanceInfo(self.op.new_name)
+ if inst.disk_template == constants.DT_FILE:
+ new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
+ result = rpc.call_file_storage_dir_rename(inst.primary_node,
+ old_file_storage_dir,
+ new_file_storage_dir)
+
+ if not result:
+ raise errors.OpExecError("Could not connect to node '%s' to rename"
+ " directory '%s' to '%s' (but the instance"
+ " has been renamed in Ganeti)" % (
+ inst.primary_node, old_file_storage_dir,
+ new_file_storage_dir))
+
+ if not result[0]:
+ raise errors.OpExecError("Could not rename directory '%s' to '%s'"
+ " (but the instance has been renamed in"
+ " Ganeti)" % (old_file_storage_dir,
+ new_file_storage_dir))
+
_StartInstanceDisks(self.cfg, inst, None)
try:
if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
for dev in instance.disks:
# for remote_raid1, these are md over drbd
if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
- if not self.op.ignore_consistency:
+ if instance.status == "up" and not self.op.ignore_consistency:
raise errors.OpExecError("Disk %s is degraded on target node,"
" aborting failover." % dev.iv_name)
# distribute new instance config to the other nodes
self.cfg.AddInstance(instance)
- feedback_fn("* activating the instance's disks on target node")
- logger.Info("Starting instance %s on node %s" %
- (instance.name, target_node))
+ # Only start the instance if it's marked as up
+ if instance.status == "up":
+ feedback_fn("* activating the instance's disks on target node")
+ logger.Info("Starting instance %s on node %s" %
+ (instance.name, target_node))
- disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
- ignore_secondaries=True)
- if not disks_ok:
- _ShutdownInstanceDisks(instance, self.cfg)
- raise errors.OpExecError("Can't activate the instance's disks")
+ disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
+ ignore_secondaries=True)
+ if not disks_ok:
+ _ShutdownInstanceDisks(instance, self.cfg)
+ raise errors.OpExecError("Can't activate the instance's disks")
- feedback_fn("* starting the instance on the target node")
- if not rpc.call_instance_start(target_node, instance, None):
- _ShutdownInstanceDisks(instance, self.cfg)
- raise errors.OpExecError("Could not start instance %s on node %s." %
- (instance.name, target_node))
+ feedback_fn("* starting the instance on the target node")
+ if not rpc.call_instance_start(target_node, instance, None):
+ _ShutdownInstanceDisks(instance, self.cfg)
+ raise errors.OpExecError("Could not start instance %s on node %s." %
+ (instance.name, target_node))
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
def _GenerateDiskTemplate(cfg, template_name,
instance_name, primary_node,
- secondary_nodes, disk_sz, swap_sz):
+ secondary_nodes, disk_sz, swap_sz,
+ file_storage_dir, file_driver):
"""Generate the entire disk layout for a given template type.
"""
logical_id=(vgname, names[1]),
iv_name = "sdb")
disks = [sda_dev, sdb_dev]
- elif template_name == constants.DT_LOCAL_RAID1:
- if len(secondary_nodes) != 0:
- raise errors.ProgrammerError("Wrong template configuration")
-
-
- names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
- ".sdb_m1", ".sdb_m2"])
- sda_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
- logical_id=(vgname, names[0]))
- sda_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
- logical_id=(vgname, names[1]))
- md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sda",
- size=disk_sz,
- children = [sda_dev_m1, sda_dev_m2])
- sdb_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
- logical_id=(vgname, names[2]))
- sdb_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
- logical_id=(vgname, names[3]))
- md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sdb",
- size=swap_sz,
- children = [sdb_dev_m1, sdb_dev_m2])
- disks = [md_sda_dev, md_sdb_dev]
- elif template_name == constants.DT_REMOTE_RAID1:
- if len(secondary_nodes) != 1:
- raise errors.ProgrammerError("Wrong template configuration")
- remote_node = secondary_nodes[0]
- names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
- ".sdb_data", ".sdb_meta"])
- drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
- disk_sz, names[0:2])
- md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sda",
- children = [drbd_sda_dev], size=disk_sz)
- drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
- swap_sz, names[2:4])
- md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sdb",
- children = [drbd_sdb_dev], size=swap_sz)
- disks = [md_sda_dev, md_sdb_dev]
elif template_name == constants.DT_DRBD8:
if len(secondary_nodes) != 1:
raise errors.ProgrammerError("Wrong template configuration")
drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
swap_sz, names[2:4], "sdb")
disks = [drbd_sda_dev, drbd_sdb_dev]
+ elif template_name == constants.DT_FILE:
+ if len(secondary_nodes) != 0:
+ raise errors.ProgrammerError("Wrong template configuration")
+
+ file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
+ iv_name="sda", logical_id=(file_driver,
+ "%s/sda" % file_storage_dir))
+ file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
+ iv_name="sdb", logical_id=(file_driver,
+ "%s/sdb" % file_storage_dir))
+ disks = [file_sda_dev, file_sdb_dev]
else:
raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
return disks
"""
info = _GetInstanceInfoText(instance)
+ if instance.disk_template == constants.DT_FILE:
+ file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
+ result = rpc.call_file_storage_dir_create(instance.primary_node,
+ file_storage_dir)
+
+ if not result:
+ logger.Error("Could not connect to node '%s'" % inst.primary_node)
+ return False
+
+ if not result[0]:
+ logger.Error("failed to create directory '%s'" % file_storage_dir)
+ return False
+
for device in instance.disks:
logger.Info("creating volume %s for instance %s" %
- (device.iv_name, instance.name))
+ (device.iv_name, instance.name))
#HARDCODE
for secondary_node in instance.secondary_nodes:
if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
logger.Error("failed to create volume %s on primary!" %
device.iv_name)
return False
+
return True
" continuing anyway" %
(device.iv_name, node))
result = False
+
+ if instance.disk_template == constants.DT_FILE:
+ file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
+ if not rpc.call_file_storage_dir_remove(instance.primary_node,
+ file_storage_dir):
+ logger.Error("could not remove directory '%s'" % file_storage_dir)
+ result = False
+
return result
raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
self.op.mode)
+ if (not self.cfg.GetVGName() and
+ self.op.disk_template not in constants.DTS_NOT_LVM):
+ raise errors.OpPrereqError("Cluster does not support lvm-based"
+ " instances")
+
if self.op.mode == constants.INSTANCE_IMPORT:
src_node = getattr(self.op, "src_node", None)
src_path = getattr(self.op, "src_path", None)
if self.op.disk_template not in constants.DISK_TEMPLATES:
raise errors.OpPrereqError("Invalid disk template name")
+ if (self.op.file_driver and
+ not self.op.file_driver in constants.FILE_DRIVER):
+ raise errors.OpPrereqError("Invalid file driver name '%s'" %
+ self.op.file_driver)
+
+ if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
+ raise errors.OpPrereqError("File storage directory not a relative"
+ " path")
+
if self.op.disk_template in constants.DTS_NET_MIRROR:
if getattr(self.op, "snode", None) is None:
raise errors.OpPrereqError("The networked disk templates need"
req_size_dict = {
constants.DT_DISKLESS: None,
constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
- constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
# 256 MB are added for drbd metadata, 128MB for each drbd device
- constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
constants.DT_DRBD8: self.op.disk_size + self.op.swap_size + 256,
+ constants.DT_FILE: None,
}
if self.op.disk_template not in req_size_dict:
else:
network_port = None
+ # this is needed because os.path.join does not accept None arguments
+ if self.op.file_storage_dir is None:
+ string_file_storage_dir = ""
+ else:
+ string_file_storage_dir = self.op.file_storage_dir
+
+ # build the full file storage dir path
+ file_storage_dir = os.path.normpath(os.path.join(
+ self.sstore.GetFileStorageDir(),
+ string_file_storage_dir, instance))
+
+
disks = _GenerateDiskTemplate(self.cfg,
self.op.disk_template,
instance, pnode_name,
self.secondaries, self.op.disk_size,
- self.op.swap_size)
+ self.op.swap_size,
+ file_storage_dir,
+ self.op.file_driver)
iobj = objects.Instance(name=instance, os=self.op.os_type,
primary_node=pnode_name,
console_cmd = hyper.GetShellCommandForConsole(instance)
# build ssh cmdline
- cmd = self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
- return cmd[0], cmd
+ return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
class LUReplaceDisks(LogicalUnit):
return result
-class LUSetInstanceParms(LogicalUnit):
+class LUSetInstanceParams(LogicalUnit):
"""Modifies an instances's parameters.
"""
self.kernel_path = getattr(self.op, "kernel_path", None)
self.initrd_path = getattr(self.op, "initrd_path", None)
self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
- all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
- self.kernel_path, self.initrd_path, self.hvm_boot_order]
- if all_parms.count(None) == len(all_parms):
+ all_params = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
+ self.kernel_path, self.initrd_path, self.hvm_boot_order]
+ if all_params.count(None) == len(all_params):
raise errors.OpPrereqError("No changes submitted")
if self.mem is not None:
try:
instance = self.instance
dst_node = self.dst_node
src_node = instance.primary_node
- # shutdown the instance, unless requested not to do so
if self.op.shutdown:
- op = opcodes.OpShutdownInstance(instance_name=instance.name)
- self.proc.ChainOpCode(op)
+ # shutdown the instance, but not the disks
+ if not rpc.call_instance_shutdown(src_node, instance):
+ raise errors.OpExecError("Could not shutdown instance %s on node %s" %
+ (instance.name, source_node))
vgname = self.cfg.GetVGName()
snap_disks.append(new_dev)
finally:
- if self.op.shutdown:
- op = opcodes.OpStartupInstance(instance_name=instance.name,
- force=False)
- self.proc.ChainOpCode(op)
+ if self.op.shutdown and instance.status == "up":
+ if not rpc.call_instance_start(src_node, instance, None):
+ _ShutdownInstanceDisks(instance, self.cfg)
+ raise errors.OpExecError("Could not start instance")
# TODO: check for size
for dev in snap_disks:
- if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
- instance):
- logger.Error("could not export block device %s from node"
- " %s to node %s" %
- (dev.logical_id[1], src_node, dst_node.name))
+ if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance):
+ logger.Error("could not export block device %s from node %s to node %s"
+ % (dev.logical_id[1], src_node, dst_node.name))
if not rpc.call_blockdev_remove(src_node, dev):
- logger.Error("could not remove snapshot block device %s from"
- " node %s" % (dev.logical_id[1], src_node))
+ logger.Error("could not remove snapshot block device %s from node %s" %
+ (dev.logical_id[1], src_node))
if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
logger.Error("could not finalize export for instance %s on node %s" %