#: Attribute holding field definitions
FIELDS = None
- def __init__(self, names, fields, use_locking):
+ def __init__(self, filter_, fields, use_locking):
"""Initializes this class.
"""
- self.names = names
self.use_locking = use_locking
- self.query = query.Query(self.FIELDS, fields)
+ self.query = query.Query(self.FIELDS, fields, filter_=filter_,
+ namefield="name")
self.requested_data = self.query.RequestedData()
+ self.names = self.query.RequestedNames()
+
+ # Sort only if no names were requested
+ self.sort_by_name = not self.names
self.do_locking = None
self.wanted = None
"""Collect data and execute query.
"""
- return query.GetQueryResponse(self.query, self._GetQueryData(lu))
+ return query.GetQueryResponse(self.query, self._GetQueryData(lu),
+ sort_by_name=self.sort_by_name)
def OldStyleQuery(self, lu):
"""Collect data and execute query.
"""
- return self.query.OldStyleQuery(self._GetQueryData(lu))
+ return self.query.OldStyleQuery(self._GetQueryData(lu),
+ sort_by_name=self.sort_by_name)
def _GetWantedNodes(lu, nodes):
# Special case for file storage
if storage_type == constants.ST_FILE:
# storage.FileStorage wants a list of storage directories
- return [[cfg.GetFileStorageDir()]]
+ return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
return []
@ivar instances: a list of running instances (runtime)
@ivar pinst: list of configured primary instances (config)
@ivar sinst: list of configured secondary instances (config)
- @ivar sbp: diction of {secondary-node: list of instances} of all peers
- of this node (config)
+ @ivar sbp: dictionary of {primary-node: list of instances} for all
+ instances for which this node is secondary (config)
@ivar mfree: free memory, as reported by hypervisor (runtime)
@ivar dfree: free disk, as reported by the node (runtime)
@ivar offline: the offline status (config)
instances it was primary for.
"""
+ cluster_info = self.cfg.GetClusterInfo()
for node, n_img in node_image.items():
# This code checks that every node which is now listed as
# secondary has enough memory to host all instances it is
for prinode, instances in n_img.sbp.items():
needed_mem = 0
for instance in instances:
- bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
+ bep = cluster_info.FillBE(instance_cfg[instance])
if bep[constants.BE_AUTO_BALANCE]:
needed_mem += bep[constants.BE_MEMORY]
test = n_img.mfree < needed_mem
"""
REG_BGL = False
+ _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
def CheckPrereq(self):
"""Check prerequisites.
"""
self.nodes = []
+ self.master_node = self.cfg.GetMasterNode()
+
+ if self.op.node_names:
+ if self.op.command in self._SKIP_MASTER:
+ if self.master_node in self.op.node_names:
+ master_node_obj = self.cfg.GetNodeInfo(self.master_node)
+ master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
+
+ if master_oob_handler:
+ additional_text = ("Run '%s %s %s' if you want to operate on the"
+ " master regardless") % (master_oob_handler,
+ self.op.command,
+ self.master_node)
+ else:
+ additional_text = "The master node does not support out-of-band"
+
+ raise errors.OpPrereqError(("Operating on the master node %s is not"
+ " allowed for %s\n%s") %
+ (self.master_node, self.op.command,
+ additional_text), errors.ECODE_INVAL)
+ else:
+ self.op.node_names = self.cfg.GetNodeList()
+ if self.op.command in self._SKIP_MASTER:
+ self.op.node_names.remove(self.master_node)
+
+ if self.op.command in self._SKIP_MASTER:
+ assert self.master_node not in self.op.node_names
+
for node_name in self.op.node_names:
node = self.cfg.GetNodeInfo(node_name)
else:
self.nodes.append(node)
- if (self.op.command == constants.OOB_POWER_OFF and not node.offline):
+ if (not self.op.ignore_status and
+ (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
raise errors.OpPrereqError(("Cannot power off node %s because it is"
" not marked offline") % node_name,
errors.ECODE_STATE)
if self.op.node_names:
self.op.node_names = [_ExpandNodeName(self.cfg, name)
for name in self.op.node_names]
+ lock_names = self.op.node_names
else:
- self.op.node_names = self.cfg.GetNodeList()
+ lock_names = locking.ALL_SET
self.needed_locks = {
- locking.LEVEL_NODE: self.op.node_names,
+ locking.LEVEL_NODE: lock_names,
}
def Exec(self, feedback_fn):
"""Execute OOB and return result if we expect any.
"""
- master_node = self.cfg.GetMasterNode()
+ master_node = self.master_node
ret = []
for node in self.nodes:
REQ_BGL = False
def CheckArguments(self):
- self.nq = _NodeQuery(self.op.names, self.op.output_fields,
- self.op.use_locking)
+ self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
+ self.op.output_fields, self.op.use_locking)
def ExpandNames(self):
self.nq.ExpandNames(self)
def CheckArguments(self):
qcls = _GetQueryImplementation(self.op.what)
- names = qlang.ReadSimpleFilter("name", self.op.filter)
- self.impl = qcls(names, self.op.fields, False)
+ self.impl = qcls(self.op.filter, self.op.fields, False)
def ExpandNames(self):
self.impl.ExpandNames(self)
"volume_group_name": cluster.volume_group_name,
"drbd_usermode_helper": cluster.drbd_usermode_helper,
"file_storage_dir": cluster.file_storage_dir,
+ "shared_file_storage_dir": cluster.shared_file_storage_dir,
"maintain_node_health": cluster.maintain_node_health,
"ctime": cluster.ctime,
"mtime": cluster.mtime,
ignore_secondaries = self.op.ignore_secondaries
reboot_type = self.op.reboot_type
+ remote_info = self.rpc.call_instance_info(instance.primary_node,
+ instance.name,
+ instance.hypervisor)
+ remote_info.Raise("Error checking node %s" % instance.primary_node)
+ instance_running = bool(remote_info.payload)
+
node_current = instance.primary_node
- if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
- constants.INSTANCE_REBOOT_HARD]:
+ if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
+ constants.INSTANCE_REBOOT_HARD]:
for disk in instance.disks:
self.cfg.SetDiskID(disk, node_current)
result = self.rpc.call_instance_reboot(node_current, instance,
self.op.shutdown_timeout)
result.Raise("Could not reboot instance")
else:
- result = self.rpc.call_instance_shutdown(node_current, instance,
- self.op.shutdown_timeout)
- result.Raise("Could not shutdown instance for full reboot")
- _ShutdownInstanceDisks(self, instance)
+ if instance_running:
+ result = self.rpc.call_instance_shutdown(node_current, instance,
+ self.op.shutdown_timeout)
+ result.Raise("Could not shutdown instance for full reboot")
+ _ShutdownInstanceDisks(self, instance)
+ else:
+ self.LogInfo("Instance %s was already stopped, starting now",
+ instance.name)
_StartInstanceDisks(self, instance, ignore_secondaries)
result = self.rpc.call_instance_start(node_current, instance, None, None)
msg = result.fail_msg
hostname = netutils.GetHostname(name=new_name)
self.LogInfo("Resolved given name '%s' to '%s'", new_name,
hostname.name)
+ if not utils.MatchNameComponent(self.op.new_name, [hostname.name]):
+ raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
+ " same as given hostname '%s'") %
+ (hostname.name, self.op.new_name),
+ errors.ECODE_INVAL)
new_name = self.op.new_name = hostname.name
if (self.op.ip_check and
netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
old_name = inst.name
rename_file_storage = False
- if (inst.disk_template == constants.DT_FILE and
+ if (inst.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE) and
self.op.new_name != inst.name):
old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
rename_file_storage = True
REQ_BGL = False
def CheckArguments(self):
- self.iq = _InstanceQuery(self.op.names, self.op.output_fields,
- self.op.use_locking)
+ self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
+ self.op.output_fields, self.op.use_locking)
def ExpandNames(self):
self.iq.ExpandNames(self)
disk_index)),
mode=disk["mode"])
disks.append(disk_dev)
+ elif template_name == constants.DT_SHARED_FILE:
+ if len(secondary_nodes) != 0:
+ raise errors.ProgrammerError("Wrong template configuration")
+
+ opcodes.RequireSharedFileStorage()
+
+ for idx, disk in enumerate(disk_info):
+ disk_index = idx + base_index
+ disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
+ iv_name="disk/%d" % disk_index,
+ logical_id=(file_driver,
+ "%s/disk%d" % (file_storage_dir,
+ disk_index)),
+ mode=disk["mode"])
+ disks.append(disk_dev)
+ elif template_name == constants.DT_BLOCK:
+ if len(secondary_nodes) != 0:
+ raise errors.ProgrammerError("Wrong template configuration")
+
+ for idx, disk in enumerate(disk_info):
+ disk_index = idx + base_index
+ disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV, size=disk["size"],
+ logical_id=(constants.BLOCKDEV_DRIVER_MANUAL,
+ disk["adopt"]),
+ iv_name="disk/%d" % disk_index,
+ mode=disk["mode"])
+ disks.append(disk_dev)
+
else:
raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
return disks
"""
node = instance.primary_node
+
+ for device in instance.disks:
+ lu.cfg.SetDiskID(device, node)
+
logging.info("Pause sync of instance %s disks", instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
try:
for idx, device in enumerate(instance.disks):
lu.LogInfo("* Wiping disk %d", idx)
- logging.info("Wiping disk %d for instance %s", idx, instance.name)
+ logging.info("Wiping disk %d for instance %s, node %s",
+ idx, instance.name, node)
# The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
# MAX_WIPE_CHUNK at max
pnode = target_node
all_nodes = [pnode]
- if instance.disk_template == constants.DT_FILE:
+ if instance.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
# 128 MB are added for drbd metadata for each disk
constants.DT_DRBD8: _compute(disks, 128),
constants.DT_FILE: {},
+ constants.DT_SHARED_FILE: {},
}
if disk_template not in req_size_dict:
# 128 MB are added for drbd metadata for each disk
constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
constants.DT_FILE: None,
+ constants.DT_SHARED_FILE: 0,
+ constants.DT_BLOCK: 0,
}
if disk_template not in req_size_dict:
if self.op.mode == constants.INSTANCE_IMPORT:
raise errors.OpPrereqError("Disk adoption not allowed for"
" instance import", errors.ECODE_INVAL)
+ else:
+ if self.op.disk_template in constants.DTS_MUST_ADOPT:
+ raise errors.OpPrereqError("Disk template %s requires disk adoption,"
+ " but no 'adopt' parameter given" %
+ self.op.disk_template,
+ errors.ECODE_INVAL)
self.adopt_disks = has_adopt
" in cluster" % mac,
errors.ECODE_NOTUNIQUE)
- # bridge verification
- bridge = nic.get("bridge", None)
- link = nic.get("link", None)
- if bridge and link:
- raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
- " at the same time", errors.ECODE_INVAL)
- elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
- raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
- errors.ECODE_INVAL)
- elif bridge:
- link = bridge
-
+ # Build nic parameters
+ link = nic.get(constants.INIC_LINK, None)
nicparams = {}
if nic_mode_req:
nicparams[constants.NIC_MODE] = nic_mode_req
req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
_CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
- else: # instead, we must check the adoption data
+ elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
all_lvs = set([i["vg"] + "/" + i["adopt"] for i in self.disks])
if len(all_lvs) != len(self.disks):
raise errors.OpPrereqError("Duplicate volume names given for adoption",
for dsk in self.disks:
dsk["size"] = int(float(node_lvs[dsk["vg"] + "/" + dsk["adopt"]][0]))
+ elif self.op.disk_template == constants.DT_BLOCK:
+ # Normalize and de-duplicate device paths
+ all_disks = set([os.path.abspath(i["adopt"]) for i in self.disks])
+ if len(all_disks) != len(self.disks):
+ raise errors.OpPrereqError("Duplicate disk names given for adoption",
+ errors.ECODE_INVAL)
+ baddisks = [d for d in all_disks
+ if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
+ if baddisks:
+ raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
+ " cannot be adopted" %
+ (", ".join(baddisks),
+ constants.ADOPTABLE_BLOCKDEV_ROOT),
+ errors.ECODE_INVAL)
+
+ node_disks = self.rpc.call_bdev_sizes([pnode.name],
+ list(all_disks))[pnode.name]
+ node_disks.Raise("Cannot get block device information from node %s" %
+ pnode.name)
+ node_disks = node_disks.payload
+ delta = all_disks.difference(node_disks.keys())
+ if delta:
+ raise errors.OpPrereqError("Missing block device(s): %s" %
+ utils.CommaJoin(delta),
+ errors.ECODE_INVAL)
+ for dsk in self.disks:
+ dsk["size"] = int(float(node_disks[dsk["adopt"]]))
+
_CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
_CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
else:
network_port = None
- if constants.ENABLE_FILE_STORAGE:
+ if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
# this is needed because os.path.join does not accept None arguments
if self.op.file_storage_dir is None:
string_file_storage_dir = ""
string_file_storage_dir = self.op.file_storage_dir
# build the full file storage dir path
- file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
+ if self.op.disk_template == constants.DT_SHARED_FILE:
+ get_fsd_fn = self.cfg.GetSharedFileStorageDir
+ else:
+ get_fsd_fn = self.cfg.GetFileStorageDir
+
+ file_storage_dir = utils.PathJoin(get_fsd_fn(),
string_file_storage_dir, instance)
else:
file_storage_dir = ""
)
if self.adopt_disks:
- # rename LVs to the newly-generated names; we need to construct
- # 'fake' LV disks with the old data, plus the new unique_id
- tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
- rename_to = []
- for t_dsk, a_dsk in zip (tmp_disks, self.disks):
- rename_to.append(t_dsk.logical_id)
- t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
- self.cfg.SetDiskID(t_dsk, pnode_name)
- result = self.rpc.call_blockdev_rename(pnode_name,
- zip(tmp_disks, rename_to))
- result.Raise("Failed to rename adoped LVs")
+ if self.op.disk_template == constants.DT_PLAIN:
+ # rename LVs to the newly-generated names; we need to construct
+ # 'fake' LV disks with the old data, plus the new unique_id
+ tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
+ rename_to = []
+ for t_dsk, a_dsk in zip (tmp_disks, self.disks):
+ rename_to.append(t_dsk.logical_id)
+ t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
+ self.cfg.SetDiskID(t_dsk, pnode_name)
+ result = self.rpc.call_blockdev_rename(pnode_name,
+ zip(tmp_disks, rename_to))
+ result.Raise("Failed to rename adoped LVs")
else:
feedback_fn("* creating instance disks...")
try:
if instance.name not in node_insts.payload:
if instance.admin_up:
- state = "ERROR_down"
+ state = constants.INSTST_ERRORDOWN
else:
- state = "ADMIN_down"
+ state = constants.INSTST_ADMINDOWN
raise errors.OpExecError("Instance %s is not running (state %s)" %
(instance.name, state))
self.disk = instance.FindDisk(self.op.disk)
- if instance.disk_template != constants.DT_FILE:
- # TODO: check the free disk space for file, when that feature
- # will be supported
+ if instance.disk_template not in (constants.DT_FILE,
+ constants.DT_SHARED_FILE):
+ # TODO: check the free disk space for file, when that feature will be
+ # supported
_CheckNodesFreeDiskPerVG(self, nodenames,
self.disk.ComputeGrowth(self.op.amount))
result.append(("disk/%d" % device_idx, "remove"))
elif disk_op == constants.DDM_ADD:
# add a new disk
- if instance.disk_template == constants.DT_FILE:
+ if instance.disk_template in (constants.DT_FILE,
+ constants.DT_SHARED_FILE):
file_driver, file_path = instance.disks[0].logical_id
file_path = os.path.dirname(file_path)
else:
class _GroupQuery(_QueryBase):
-
FIELDS = query.GROUP_FIELDS
def ExpandNames(self, lu):
REQ_BGL = False
def CheckArguments(self):
- self.gq = _GroupQuery(self.op.names, self.op.output_fields, False)
+ self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
+ self.op.output_fields, False)
def ExpandNames(self):
self.gq.ExpandNames(self)
def ExpandNames(self):
# This raises errors.OpPrereqError on its own:
- self.group_uuid = self.cfg.LookupNodeGroup(self.op.old_name)
+ self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
self.needed_locks = {
locking.LEVEL_NODEGROUP: [self.group_uuid],
def CheckPrereq(self):
"""Check prerequisites.
- This checks that the given old_name exists as a node group, and that
- new_name doesn't.
+ Ensures requested new name is not yet used.
"""
try:
"""
env = {
- "OLD_NAME": self.op.old_name,
+ "OLD_NAME": self.op.group_name,
"NEW_NAME": self.op.new_name,
}
if group is None:
raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
- (self.op.old_name, self.group_uuid))
+ (self.op.group_name, self.group_uuid))
group.name = self.op.new_name
self.cfg.Update(group, feedback_fn)