import os.path
import time
import re
-import platform
import logging
import copy
import OpenSSL
from ganeti import opcodes
from ganeti import ht
from ganeti import rpc
+from ganeti import runtime
import ganeti.masterd.instance # pylint: disable=W0611
DRBD_META_SIZE = 128
# States of instance
-INSTANCE_UP = [constants.ADMINST_UP]
INSTANCE_DOWN = [constants.ADMINST_DOWN]
-INSTANCE_OFFLINE = [constants.ADMINST_OFFLINE]
INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
+#: Instance status in which an instance can be marked as offline/online
+CAN_CHANGE_INSTANCE_OFFLINE = (frozenset(INSTANCE_DOWN) | frozenset([
+ constants.ADMINST_OFFLINE,
+ ]))
+
class ResultWithJobs:
"""Data container for LU results with jobs.
Instances of this class returned from L{LogicalUnit.Exec} will be recognized
- by L{mcpu.Processor._ProcessResult}. The latter will then submit the jobs
+ by L{mcpu._ProcessResult}. The latter will then submit the jobs
contained in the C{jobs} attribute and include the job IDs in the opcode
result.
as values. Rules:
- use an empty dict if you don't need any lock
- - if you don't need any lock at a particular level omit that level
+ - if you don't need any lock at a particular level omit that
+ level (note that in this case C{DeclareLocks} won't be called
+ at all for that level)
+ - if you need locks at a level, but you can't calculate it in
+ this function, initialise that level with an empty list and do
+ further processing in L{LogicalUnit.DeclareLocks} (see that
+ function's docstring)
- don't put anything for the BGL level
- - if you want all locks at a level use locking.ALL_SET as a value
+ - if you want all locks at a level use L{locking.ALL_SET} as a value
If you need to share locks (rather than acquire them exclusively) at one
level you can modify self.share_locks, setting a true value (usually 1) for
self.needed_locks for the level.
@param level: Locking level which is going to be locked
- @type level: member of ganeti.locking.LEVELS
+ @type level: member of L{ganeti.locking.LEVELS}
"""
#: Attribute holding field definitions
FIELDS = None
+ #: Field to sort by
+ SORT_FIELD = "name"
+
def __init__(self, qfilter, fields, use_locking):
"""Initializes this class.
self.use_locking = use_locking
self.query = query.Query(self.FIELDS, fields, qfilter=qfilter,
- namefield="name")
+ namefield=self.SORT_FIELD)
self.requested_data = self.query.RequestedData()
self.names = self.query.RequestedNames()
})
+def _AnnotateDiskParams(instance, devs, cfg):
+ """Little helper wrapper to the rpc annotation method.
+
+ @param instance: The instance object
+ @type devs: List of L{objects.Disk}
+ @param devs: The root devices (not any of its children!)
+ @param cfg: The config object
+ @returns The annotated disk copies
+ @see L{rpc.AnnotateDiskParams}
+
+ """
+ return rpc.AnnotateDiskParams(instance.disk_template, devs,
+ cfg.GetInstanceDiskParams(instance))
+
+
+def _CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
+ cur_group_uuid):
+ """Checks if node groups for locked instances are still correct.
+
+ @type cfg: L{config.ConfigWriter}
+ @param cfg: Cluster configuration
+ @type instances: dict; string as key, L{objects.Instance} as value
+ @param instances: Dictionary, instance name as key, instance object as value
+ @type owned_groups: iterable of string
+ @param owned_groups: List of owned groups
+ @type owned_nodes: iterable of string
+ @param owned_nodes: List of owned nodes
+ @type cur_group_uuid: string or None
+ @param cur_group_uuid: Optional group UUID to check against instance's groups
+
+ """
+ for (name, inst) in instances.items():
+ assert owned_nodes.issuperset(inst.all_nodes), \
+ "Instance %s's nodes changed while we kept the lock" % name
+
+ inst_groups = _CheckInstanceNodeGroups(cfg, name, owned_groups)
+
+ assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
+ "Instance %s has no node in group %s" % (name, cur_group_uuid)
+
+
def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups):
"""Checks if the owned node groups are still correct for an instance.
return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
+def _CopyLockList(names):
+ """Makes a copy of a list of lock names.
+
+ Handles L{locking.ALL_SET} correctly.
+
+ """
+ if names == locking.ALL_SET:
+ return locking.ALL_SET
+ else:
+ return names[:]
+
+
def _GetWantedNodes(lu, nodes):
"""Returns list of checked and expanded node names.
use_none=use_none,
use_default=use_default)
else:
- if not value or value == [constants.VALUE_DEFAULT]:
+ if (not value or value == [constants.VALUE_DEFAULT] or
+ value == constants.VALUE_DEFAULT):
if group_policy:
del ipolicy[key]
else:
# in a nicer way
ipolicy[key] = list(value)
try:
- objects.InstancePolicy.CheckParameterSyntax(ipolicy)
+ objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
except errors.ConfigurationError, err:
raise errors.OpPrereqError("Invalid instance policy: %s" % err,
errors.ECODE_INVAL)
hm = lu.proc.BuildHooksManager(lu)
try:
hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
- except:
- # pylint: disable=W0702
- lu.LogWarning("Errors occurred running hooks on %s" % node_name)
+ except Exception, err: # pylint: disable=W0703
+ lu.LogWarning("Errors occurred running hooks on %s: %s" % (node_name, err))
def _CheckOutputFields(static, dynamic, selected):
(instance.name, msg), errors.ECODE_STATE)
-def _ComputeMinMaxSpec(name, ipolicy, value):
+def _ComputeMinMaxSpec(name, qualifier, ipolicy, value):
"""Computes if value is in the desired range.
@param name: name of the parameter for which we perform the check
+ @param qualifier: a qualifier used in the error message (e.g. 'disk/1',
+ not just 'disk')
@param ipolicy: dictionary containing min, max and std values
@param value: actual value that we want to use
@return: None or element not meeting the criteria
max_v = ipolicy[constants.ISPECS_MAX].get(name, value)
min_v = ipolicy[constants.ISPECS_MIN].get(name, value)
if value > max_v or min_v > value:
+ if qualifier:
+ fqn = "%s/%s" % (name, qualifier)
+ else:
+ fqn = name
return ("%s value %s is not in range [%s, %s]" %
- (name, value, min_v, max_v))
+ (fqn, value, min_v, max_v))
return None
def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
- nic_count, disk_sizes,
+ nic_count, disk_sizes, spindle_use,
_compute_fn=_ComputeMinMaxSpec):
"""Verifies ipolicy against provided specs.
@param nic_count: Number of nics used
@type disk_sizes: list of ints
@param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
+ @type spindle_use: int
+ @param spindle_use: The number of spindles this instance uses
@param _compute_fn: The compute function (unittest only)
@return: A list of violations, or an empty list of no violations are found
assert disk_count == len(disk_sizes)
test_settings = [
- (constants.ISPEC_MEM_SIZE, mem_size),
- (constants.ISPEC_CPU_COUNT, cpu_count),
- (constants.ISPEC_DISK_COUNT, disk_count),
- (constants.ISPEC_NIC_COUNT, nic_count),
- ] + map((lambda d: (constants.ISPEC_DISK_SIZE, d)), disk_sizes)
+ (constants.ISPEC_MEM_SIZE, "", mem_size),
+ (constants.ISPEC_CPU_COUNT, "", cpu_count),
+ (constants.ISPEC_DISK_COUNT, "", disk_count),
+ (constants.ISPEC_NIC_COUNT, "", nic_count),
+ (constants.ISPEC_SPINDLE_USE, "", spindle_use),
+ ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
+ for idx, d in enumerate(disk_sizes)]
return filter(None,
- (_compute_fn(name, ipolicy, value)
- for (name, value) in test_settings))
+ (_compute_fn(name, qualifier, ipolicy, value)
+ for (name, qualifier, value) in test_settings))
def _ComputeIPolicyInstanceViolation(ipolicy, instance,
"""
mem_size = instance.beparams.get(constants.BE_MAXMEM, None)
cpu_count = instance.beparams.get(constants.BE_VCPUS, None)
+ spindle_use = instance.beparams.get(constants.BE_SPINDLE_USE, None)
disk_count = len(instance.disks)
disk_sizes = [disk.size for disk in instance.disks]
nic_count = len(instance.nics)
return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
- disk_sizes)
+ disk_sizes, spindle_use)
def _ComputeIPolicyInstanceSpecViolation(ipolicy, instance_spec,
disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
+ spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
- disk_sizes)
+ disk_sizes, spindle_use)
def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
@param old_ipolicy: The current (still in-place) ipolicy
@param new_ipolicy: The new (to become) ipolicy
@param instances: List of instances to verify
- @return: A list of instances which violates the new ipolicy but did not before
+ @return: A list of instances which violates the new ipolicy but
+ did not before
"""
- return (_ComputeViolatingInstances(old_ipolicy, instances) -
- _ComputeViolatingInstances(new_ipolicy, instances))
+ return (_ComputeViolatingInstances(new_ipolicy, instances) -
+ _ComputeViolatingInstances(old_ipolicy, instances))
def _ExpandItemName(fn, name, kind):
for dev in instance.disks:
cfg.SetDiskID(dev, node_name)
- result = rpc_runner.call_blockdev_getmirrorstatus(node_name, instance.disks)
+ result = rpc_runner.call_blockdev_getmirrorstatus(node_name, (instance.disks,
+ instance))
result.Raise("Failed to get disk status from node %s" % node_name,
prereq=prereq, ecode=errors.ECODE_ENVIRON)
"""Verifies the cluster config.
"""
- REQ_BGL = True
+ REQ_BGL = False
def _VerifyHVP(self, hvp_data):
"""Verifies locally the syntax of the hypervisor parameters.
self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg % str(err))
def ExpandNames(self):
- # Information can be safely retrieved as the BGL is acquired in exclusive
- # mode
- assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
+ self.needed_locks = dict.fromkeys(locking.LEVELS, locking.ALL_SET)
+ self.share_locks = _ShareAll()
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ """
+ # Retrieve all information
self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
self.all_node_info = self.cfg.GetAllNodesInfo()
self.all_inst_info = self.cfg.GetAllInstancesInfo()
- self.needed_locks = {}
def Exec(self, feedback_fn):
"""Verify integrity of cluster, performing various test on nodes.
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
# Get instances in node group; this is unsafe and needs verification later
- inst_names = self.cfg.GetNodeGroupInstances(self.group_uuid)
+ inst_names = \
+ self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
self.needed_locks = {
locking.LEVEL_INSTANCE: inst_names,
self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
group_nodes = set(self.group_info.members)
- group_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
+ group_instances = \
+ self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
unlocked_nodes = \
group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
if unlocked_nodes:
raise errors.OpPrereqError("Missing lock for nodes: %s" %
- utils.CommaJoin(unlocked_nodes))
+ utils.CommaJoin(unlocked_nodes),
+ errors.ECODE_STATE)
if unlocked_instances:
raise errors.OpPrereqError("Missing lock for instances: %s" %
- utils.CommaJoin(unlocked_instances))
+ utils.CommaJoin(unlocked_instances),
+ errors.ECODE_STATE)
self.all_node_info = self.cfg.GetAllNodesInfo()
self.all_inst_info = self.cfg.GetAllInstancesInfo()
for inst in self.my_inst_info.values():
if inst.disk_template in constants.DTS_INT_MIRROR:
- group = self.my_node_info[inst.primary_node].group
- for nname in inst.secondary_nodes:
- if self.all_node_info[nname].group != group:
+ for nname in inst.all_nodes:
+ if self.all_node_info[nname].group != self.group_uuid:
extra_lv_nodes.add(nname)
unlocked_lv_nodes = \
extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
if unlocked_lv_nodes:
- raise errors.OpPrereqError("these nodes could be locked: %s" %
- utils.CommaJoin(unlocked_lv_nodes))
+ raise errors.OpPrereqError("Missing node locks for LV check: %s" %
+ utils.CommaJoin(unlocked_lv_nodes),
+ errors.ECODE_STATE)
self.extra_lv_nodes = list(extra_lv_nodes)
def _VerifyNode(self, ninfo, nresult):
ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(), self.group_info)
err = _ComputeIPolicyInstanceViolation(ipolicy, instanceconfig)
- _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, err)
+ _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err))
for node in node_vol_should:
n_img = node_image[node]
"""
for node, n_img in node_image.items():
- if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
+ if (n_img.offline or n_img.rpc_fail or n_img.lvm_fail or
+ self.all_node_info[node].group != self.group_uuid):
# skip non-healthy nodes
continue
for volume in n_img.volumes:
# WARNING: we currently take into account down instances as well
# as up ones, considering that even if they're down someone
# might want to start them even in the event of a node failure.
- if n_img.offline:
- # we're skipping offline nodes from the N+1 warning, since
- # most likely we don't have good memory infromation from them;
- # we already list instances living on such nodes, and that's
- # enough warning
+ if n_img.offline or self.all_node_info[node].group != self.group_uuid:
+ # we're skipping nodes marked offline and nodes in other groups from
+ # the N+1 warning, since most likely we don't have good memory
+ # infromation from them; we already list instances living on such
+ # nodes, and that's enough warning
continue
- #TODO(dynmem): use MINMEM for checking
#TODO(dynmem): also consider ballooning out other instances
for prinode, instances in n_img.sbp.items():
needed_mem = 0
for instance in instances:
bep = cluster_info.FillBE(instance_cfg[instance])
if bep[constants.BE_AUTO_BALANCE]:
- needed_mem += bep[constants.BE_MAXMEM]
+ needed_mem += bep[constants.BE_MINMEM]
test = n_img.mfree < needed_mem
self._ErrorIf(test, constants.CV_ENODEN1, node,
"not enough memory to accomodate instance failovers"
node_disks[nname] = disks
- # Creating copies as SetDiskID below will modify the objects and that can
- # lead to incorrect data returned from nodes
- devonly = [dev.Copy() for (_, dev) in disks]
-
- for dev in devonly:
- self.cfg.SetDiskID(dev, nname)
+ # _AnnotateDiskParams makes already copies of the disks
+ devonly = []
+ for (inst, dev) in disks:
+ (anno_disk,) = _AnnotateDiskParams(instanceinfo[inst], [dev], self.cfg)
+ self.cfg.SetDiskID(anno_disk, nname)
+ devonly.append(anno_disk)
node_disks_devonly[nname] = devonly
for instance in self.my_inst_names:
inst_config = self.my_inst_info[instance]
+ if inst_config.admin_state == constants.ADMINST_OFFLINE:
+ i_offline += 1
for nname in inst_config.all_nodes:
if nname not in node_image:
if master_node not in self.my_node_info:
additional_nodes.append(master_node)
vf_node_info.append(self.all_node_info[master_node])
- # Add the first vm_capable node we find which is not included
+ # Add the first vm_capable node we find which is not included,
+ # excluding the master node (which we already have)
for node in absent_nodes:
nodeinfo = self.all_node_info[node]
- if nodeinfo.vm_capable and not nodeinfo.offline:
+ if (nodeinfo.vm_capable and not nodeinfo.offline and
+ node != master_node):
additional_nodes.append(node)
vf_node_info.append(self.all_node_info[node])
break
non_primary_inst = set(nimg.instances).difference(nimg.pinst)
for inst in non_primary_inst:
- # FIXME: investigate best way to handle offline insts
- if inst.admin_state == constants.ADMINST_OFFLINE:
- if verbose:
- feedback_fn("* Skipping offline instance %s" % inst.name)
- i_offline += 1
- continue
test = inst in self.all_inst_info
_ErrorIf(test, constants.CV_EINSTANCEWRONGNODE, inst,
"instance should not run on node %s", node_i.name)
self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
# Check if node groups for locked instances are still correct
- for (instance_name, inst) in self.instances.items():
- assert owned_nodes.issuperset(inst.all_nodes), \
- "Instance %s's nodes changed while we kept the lock" % instance_name
-
- inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
- owned_groups)
-
- assert self.group_uuid in inst_groups, \
- "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
+ _CheckInstancesNodeGroups(self.cfg, self.instances,
+ owned_groups, owned_nodes, self.group_uuid)
def Exec(self, feedback_fn):
"""Verify integrity of cluster disks.
if self.op.diskparams:
for dt_params in self.op.diskparams.values():
utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
+ try:
+ utils.VerifyDictOptions(self.op.diskparams, constants.DISK_DT_DEFAULTS)
+ except errors.OpPrereqError, err:
+ raise errors.OpPrereqError("While verify diskparams options: %s" % err,
+ errors.ECODE_INVAL)
def ExpandNames(self):
# FIXME: in the future maybe other cluster params won't require checking on
if violations:
self.LogWarning("After the ipolicy change the following instances"
" violate them: %s",
- utils.CommaJoin(violations))
+ utils.CommaJoin(utils.NiceSort(violations)))
if self.op.nicparams:
utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
if cluster.modify_etc_hosts:
files_all.add(constants.ETC_HOSTS)
+ if cluster.use_external_mip_script:
+ files_all.add(constants.EXTERNAL_MASTER_SETUP_SCRIPT)
+
# Files which are optional, these must:
# - be present in one other category as well
# - either exist or not exist on all nodes of that category (mc, vm all)
if not redist:
files_mc.add(constants.CLUSTER_CONF_FILE)
- # FIXME: this should also be replicated but Ganeti doesn't support files_mc
- # replication
- files_mc.add(constants.DEFAULT_MASTER_SETUP_SCRIPT)
-
# Files which should only be on VM-capable nodes
files_vm = set(filename
for hv_name in cluster.enabled_hypervisors
master_info = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
online_nodes = lu.cfg.GetOnlineNodeList()
- vm_nodes = lu.cfg.GetVmCapableNodeList()
+ online_set = frozenset(online_nodes)
+ vm_nodes = list(online_set.intersection(lu.cfg.GetVmCapableNodeList()))
if additional_nodes is not None:
online_nodes.extend(additional_nodes)
max_time = 0
done = True
cumul_degraded = False
- rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
+ rstats = lu.rpc.call_blockdev_getmirrorstatus(node, (disks, instance))
msg = rstats.fail_msg
if msg:
lu.LogWarning("Can't get any data from node %s: %s", node, msg)
return not cumul_degraded
-def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
+def _BlockdevFind(lu, node, dev, instance):
+ """Wrapper around call_blockdev_find to annotate diskparams.
+
+ @param lu: A reference to the lu object
+ @param node: The node to call out
+ @param dev: The device to find
+ @param instance: The instance object the device belongs to
+ @returns The result of the rpc call
+
+ """
+ (disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
+ return lu.rpc.call_blockdev_find(node, disk)
+
+
+def _CheckDiskConsistency(lu, instance, dev, node, on_primary, ldisk=False):
+ """Wrapper around L{_CheckDiskConsistencyInner}.
+
+ """
+ (disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
+ return _CheckDiskConsistencyInner(lu, instance, disk, node, on_primary,
+ ldisk=ldisk)
+
+
+def _CheckDiskConsistencyInner(lu, instance, dev, node, on_primary,
+ ldisk=False):
"""Check that mirrors are not degraded.
+ @attention: The device has to be annotated already.
+
The ldisk parameter, if True, will change the test from the
is_degraded attribute (which represents overall non-ok status for
the device(s)) to the ldisk (representing the local storage status).
if dev.children:
for child in dev.children:
- result = result and _CheckDiskConsistency(lu, child, node, on_primary)
+ result = result and _CheckDiskConsistencyInner(lu, instance, child, node,
+ on_primary)
return result
"""Logical unit for OOB handling.
"""
- REG_BGL = False
+ REQ_BGL = False
_SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
def ExpandNames(self):
def BuildHooksEnv(self):
"""Build hooks env.
- This doesn't run on the target node in the pre phase as a failed
- node would then be impossible to remove.
-
"""
return {
"OP_TARGET": self.op.node_name,
def BuildHooksNodes(self):
"""Build hooks nodes.
+ This doesn't run on the target node in the pre phase as a failed
+ node would then be impossible to remove.
+
"""
all_nodes = self.cfg.GetNodeList()
try:
all_nodes.remove(self.op.node_name)
except ValueError:
- logging.warning("Node '%s', which is about to be removed, was not found"
- " in the list of all nodes", self.op.node_name)
+ pass
return (all_nodes, all_nodes)
def CheckPrereq(self):
if self.op.disk_state:
self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
+ # TODO: If we need to have multiple DnsOnlyRunner we probably should make
+ # it a property on the base class.
+ result = rpc.DnsOnlyRunner().call_version([node])[node]
+ result.Raise("Can't get version information from node %s" % node)
+ if constants.PROTOCOL_VERSION == result.payload:
+ logging.info("Communication to node %s fine, sw version %s match",
+ node, result.payload)
+ else:
+ raise errors.OpPrereqError("Version mismatch master version %s,"
+ " node version %s" %
+ (constants.PROTOCOL_VERSION, result.payload),
+ errors.ECODE_ENVIRON)
+
def Exec(self, feedback_fn):
"""Adds the new node to the cluster.
if self.op.disk_state:
new_node.disk_state_static = self.new_disk_state
- # check connectivity
- result = self.rpc.call_version([node])[node]
- result.Raise("Can't get version information from node %s" % node)
- if constants.PROTOCOL_VERSION == result.payload:
- logging.info("Communication to node %s fine, sw version %s match",
- node, result.payload)
- else:
- raise errors.OpExecError("Version mismatch master version %s,"
- " node version %s" %
- (constants.PROTOCOL_VERSION, result.payload))
-
# Add node to our /etc/hosts, and add key to known_hosts
if self.cfg.GetClusterInfo().modify_etc_hosts:
master_node = self.cfg.GetMasterNode()
if mc_remaining < mc_should:
raise errors.OpPrereqError("Not enough master candidates, please"
" pass auto promote option to allow"
- " promotion", errors.ECODE_STATE)
+ " promotion (--auto-promote or RAPI"
+ " auto_promote=True)", errors.ECODE_STATE)
self.old_flags = old_flags = (node.master_candidate,
node.drained, node.offline)
if old_role == self._ROLE_OFFLINE and new_role != old_role:
# Trying to transition out of offline status
- # TODO: Use standard RPC runner, but make sure it works when the node is
- # still marked offline
- result = rpc.BootstrapRunner().call_version([node.name])[node.name]
+ result = self.rpc.call_version([node.name])[node.name]
if result.fail_msg:
raise errors.OpPrereqError("Node %s is being de-offlined but fails"
" to report its version: %s" %
"config_version": constants.CONFIG_VERSION,
"os_api_version": max(constants.OS_API_VERSIONS),
"export_version": constants.EXPORT_VERSION,
- "architecture": (platform.architecture()[0], platform.machine()),
+ "architecture": runtime.GetArchInfo(),
"name": cluster.cluster_name,
"master": cluster.master_node,
"default_hypervisor": cluster.primary_hypervisor,
"ipolicy": cluster.ipolicy,
"nicparams": cluster.nicparams,
"ndparams": cluster.ndparams,
+ "diskparams": cluster.diskparams,
"candidate_pool_size": cluster.candidate_pool_size,
"master_netdev": cluster.master_netdev,
"master_netmask": cluster.master_netmask,
"""
REQ_BGL = False
- _FIELDS_DYNAMIC = utils.FieldSet()
- _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
- "watcher_pause", "volume_group_name")
def CheckArguments(self):
- _CheckOutputFields(static=self._FIELDS_STATIC,
- dynamic=self._FIELDS_DYNAMIC,
- selected=self.op.output_fields)
+ self.cq = _ClusterQuery(None, self.op.output_fields, False)
def ExpandNames(self):
- self.needed_locks = {}
+ self.cq.ExpandNames(self)
+
+ def DeclareLocks(self, level):
+ self.cq.DeclareLocks(self, level)
def Exec(self, feedback_fn):
- """Dump a representation of the cluster config to the standard output.
-
- """
- values = []
- for field in self.op.output_fields:
- if field == "cluster_name":
- entry = self.cfg.GetClusterName()
- elif field == "master_node":
- entry = self.cfg.GetMasterNode()
- elif field == "drain_flag":
- entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
- elif field == "watcher_pause":
- entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
- elif field == "volume_group_name":
- entry = self.cfg.GetVGName()
- else:
- raise errors.ParameterError(field)
- values.append(entry)
- return values
+ result = self.cq.OldStyleQuery(self)
+
+ assert len(result) == 1
+
+ return result[0]
+
+
+class _ClusterQuery(_QueryBase):
+ FIELDS = query.CLUSTER_FIELDS
+
+ #: Do not sort (there is only one item)
+ SORT_FIELD = None
+
+ def ExpandNames(self, lu):
+ lu.needed_locks = {}
+
+ # The following variables interact with _QueryBase._GetNames
+ self.wanted = locking.ALL_SET
+ self.do_locking = self.use_locking
+
+ if self.do_locking:
+ raise errors.OpPrereqError("Can not use locking for cluster queries",
+ errors.ECODE_INVAL)
+
+ def DeclareLocks(self, lu, level):
+ pass
+
+ def _GetQueryData(self, lu):
+ """Computes the list of nodes and their attributes.
+
+ """
+ # Locking is not used
+ assert not (compat.any(lu.glm.is_owned(level)
+ for level in locking.LEVELS
+ if level != locking.LEVEL_CLUSTER) or
+ self.do_locking or self.use_locking)
+
+ if query.CQ_CONFIG in self.requested_data:
+ cluster = lu.cfg.GetClusterInfo()
+ else:
+ cluster = NotImplemented
+
+ if query.CQ_QUEUE_DRAINED in self.requested_data:
+ drain_flag = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
+ else:
+ drain_flag = NotImplemented
+
+ if query.CQ_WATCHER_PAUSE in self.requested_data:
+ watcher_pause = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
+ else:
+ watcher_pause = NotImplemented
+
+ return query.ClusterQueryData(cluster, drain_flag, watcher_pause)
class LUInstanceActivateDisks(NoHooksLU):
node_disk = node_disk.Copy()
node_disk.UnsetSize()
lu.cfg.SetDiskID(node_disk, node)
- result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx)
+ result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
+ False, idx)
msg = result.fail_msg
if msg:
+ is_offline_secondary = (node in instance.secondary_nodes and
+ result.offline)
lu.proc.LogWarning("Could not prepare block device %s on node %s"
" (is_primary=False, pass=1): %s",
inst_disk.iv_name, node, msg)
- if not ignore_secondaries:
+ if not (ignore_secondaries or is_offline_secondary):
disks_ok = False
# FIXME: race condition on drbd migration to primary
node_disk = node_disk.Copy()
node_disk.UnsetSize()
lu.cfg.SetDiskID(node_disk, node)
- result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx)
+ result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
+ True, idx)
msg = result.fail_msg
if msg:
lu.proc.LogWarning("Could not prepare block device %s on node %s"
for disk in disks:
for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
lu.cfg.SetDiskID(top_disk, node)
- result = lu.rpc.call_blockdev_shutdown(node, top_disk)
+ result = lu.rpc.call_blockdev_shutdown(node, (top_disk, instance))
msg = result.fail_msg
if msg:
lu.LogWarning("Could not shutdown block device %s on node %s: %s",
@param requested: the amount of memory in MiB to check for
@type hypervisor_name: C{str}
@param hypervisor_name: the hypervisor to ask for memory stats
+ @rtype: integer
+ @return: node current free memory
@raise errors.OpPrereqError: if the node doesn't have enough memory, or
we cannot check the node
" needed %s MiB, available %s MiB" %
(node, reason, requested, free_mem),
errors.ECODE_NORES)
+ return free_mem
def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
"Cannot retrieve locked instance %s" % self.op.instance_name
_CheckNodeOnline(self, instance.primary_node, "Instance primary node"
" offline, cannot reinstall")
- for node in instance.secondary_nodes:
- _CheckNodeOnline(self, node, "Instance secondary node offline,"
- " cannot reinstall")
if instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Instance '%s' has no disks" %
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
- self.needed_locks[locking.LEVEL_NODE][:]
+ _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
- self.needed_locks[locking.LEVEL_NODE][:]
+ _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
"""
logging.info("Removing block devices for instance %s", instance.name)
- if not _RemoveDisks(lu, instance):
+ if not _RemoveDisks(lu, instance, ignore_failures=ignore_failures):
if not ignore_failures:
raise errors.OpExecError("Can't remove instance's disks")
feedback_fn("Warning: can't remove instance's disks")
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+ self.needed_locks[locking.LEVEL_NODE_RES] = []
+ self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
+
ignore_consistency = self.op.ignore_consistency
shutdown_timeout = self.op.shutdown_timeout
self._migrater = TLMigrateInstance(self, self.op.instance_name,
del self.recalculate_locks[locking.LEVEL_NODE]
else:
self._LockInstancesNodes()
+ elif level == locking.LEVEL_NODE_RES:
+ # Copy node locks
+ self.needed_locks[locking.LEVEL_NODE_RES] = \
+ _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
- self._migrater = TLMigrateInstance(self, self.op.instance_name,
- cleanup=self.op.cleanup,
- failover=False,
- fallback=self.op.allow_failover,
- ignore_ipolicy=self.op.ignore_ipolicy)
+ self.needed_locks[locking.LEVEL_NODE] = []
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+ self._migrater = \
+ TLMigrateInstance(self, self.op.instance_name,
+ cleanup=self.op.cleanup,
+ failover=False,
+ fallback=self.op.allow_failover,
+ allow_runtime_changes=self.op.allow_runtime_changes,
+ ignore_ipolicy=self.op.ignore_ipolicy)
self.tasklets = [self._migrater]
def DeclareLocks(self, level):
del self.recalculate_locks[locking.LEVEL_NODE]
else:
self._LockInstancesNodes()
+ elif level == locking.LEVEL_NODE_RES:
+ # Copy node locks
+ self.needed_locks[locking.LEVEL_NODE_RES] = \
+ _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
"MIGRATE_CLEANUP": self.op.cleanup,
"OLD_PRIMARY": source_node,
"NEW_PRIMARY": target_node,
+ "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
})
if instance.disk_template in constants.DTS_INT_MIRROR:
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
- self.needed_locks[locking.LEVEL_NODE][:]
+ _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
# activate, get path, copy the data over
for idx, disk in enumerate(instance.disks):
self.LogInfo("Copying data for disk %d", idx)
- result = self.rpc.call_blockdev_assemble(target_node, disk,
+ result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
instance.name, True, idx)
if result.fail_msg:
self.LogWarning("Can't assemble newly created disk %d: %s",
errs.append(result.fail_msg)
break
dev_path = result.payload
- result = self.rpc.call_blockdev_export(source_node, disk,
+ result = self.rpc.call_blockdev_export(source_node, (disk, instance),
target_node, dev_path,
cluster_name)
if result.fail_msg:
"""
return {
"NODE_NAME": self.op.node_name,
+ "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
}
def BuildHooksNodes(self):
def Exec(self, feedback_fn):
# Prepare jobs for migration instances
+ allow_runtime_changes = self.op.allow_runtime_changes
jobs = [
[opcodes.OpInstanceMigrate(instance_name=inst.name,
mode=self.op.mode,
live=self.op.live,
iallocator=self.op.iallocator,
target_node=self.op.target_node,
+ allow_runtime_changes=allow_runtime_changes,
ignore_ipolicy=self.op.ignore_ipolicy)]
for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)
]
def __init__(self, lu, instance_name, cleanup=False,
failover=False, fallback=False,
ignore_consistency=False,
+ allow_runtime_changes=True,
shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT,
ignore_ipolicy=False):
"""Initializes this class.
self.ignore_consistency = ignore_consistency
self.shutdown_timeout = shutdown_timeout
self.ignore_ipolicy = ignore_ipolicy
+ self.allow_runtime_changes = allow_runtime_changes
def CheckPrereq(self):
"""Check prerequisites.
# check memory requirements on the secondary node
if (not self.cleanup and
(not self.failover or instance.admin_state == constants.ADMINST_UP)):
- _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
- instance.name, i_be[constants.BE_MAXMEM],
- instance.hypervisor)
+ self.tgt_free_mem = _CheckNodeFreeMemory(self.lu, target_node,
+ "migrating instance %s" %
+ instance.name,
+ i_be[constants.BE_MINMEM],
+ instance.hypervisor)
else:
self.lu.LogInfo("Not checking memory on the secondary node as"
" instance will not be started")
# check if failover must be forced instead of migration
if (not self.cleanup and not self.failover and
i_be[constants.BE_ALWAYS_FAILOVER]):
- if self.fallback:
- self.lu.LogInfo("Instance configured to always failover; fallback"
- " to failover")
- self.failover = True
- else:
- raise errors.OpPrereqError("This instance has been configured to"
- " always failover, please allow failover",
- errors.ECODE_STATE)
+ self.lu.LogInfo("Instance configured to always failover; fallback"
+ " to failover")
+ self.failover = True
# check bridge existance
_CheckInstanceBridgesExist(self.lu, instance, node=target_node)
# Failover is never live
self.live = False
+ if not (self.failover or self.cleanup):
+ remote_info = self.rpc.call_instance_info(instance.primary_node,
+ instance.name,
+ instance.hypervisor)
+ remote_info.Raise("Error checking instance on node %s" %
+ instance.primary_node)
+ instance_running = bool(remote_info.payload)
+ if instance_running:
+ self.current_mem = int(remote_info.payload["memory"])
+
def _RunAllocator(self):
"""Run the allocator based on input opcode.
ial = IAllocator(self.cfg, self.rpc,
mode=constants.IALLOCATOR_MODE_RELOC,
name=self.instance_name,
- # TODO See why hail breaks with a single node below
- relocate_from=[self.instance.primary_node,
- self.instance.primary_node],
+ relocate_from=[self.instance.primary_node],
)
ial.Run(self.lu.op.iallocator)
all_done = True
result = self.rpc.call_drbd_wait_sync(self.all_nodes,
self.nodes_ip,
- self.instance.disks)
+ (self.instance.disks,
+ self.instance))
min_percent = 100
for node, nres in result.items():
nres.Raise("Cannot resync disks on node %s" % node)
msg = "single-master"
self.feedback_fn("* changing disks into %s mode" % msg)
result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
- self.instance.disks,
+ (self.instance.disks, self.instance),
self.instance.name, multimaster)
for node, nres in result.items():
nres.Raise("Cannot change disks config on node %s" % node)
(src_version, dst_version))
self.feedback_fn("* checking disk consistency between source and target")
- for dev in instance.disks:
- if not _CheckDiskConsistency(self.lu, dev, target_node, False):
+ for (idx, dev) in enumerate(instance.disks):
+ if not _CheckDiskConsistency(self.lu, instance, dev, target_node, False):
raise errors.OpExecError("Disk %s is degraded or not fully"
" synchronized on target node,"
- " aborting migration" % dev.iv_name)
+ " aborting migration" % idx)
+
+ if self.current_mem > self.tgt_free_mem:
+ if not self.allow_runtime_changes:
+ raise errors.OpExecError("Memory ballooning not allowed and not enough"
+ " free memory to fit instance %s on target"
+ " node %s (have %dMB, need %dMB)" %
+ (instance.name, target_node,
+ self.tgt_free_mem, self.current_mem))
+ self.feedback_fn("* setting instance memory to %s" % self.tgt_free_mem)
+ rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
+ instance,
+ self.tgt_free_mem)
+ rpcres.Raise("Cannot modify instance runtime memory")
# First get the migration information from the remote node
result = self.rpc.call_migration_info(source_node, instance)
disks = _ExpandCheckDisks(instance, instance.disks)
self.feedback_fn("* unmapping instance's disks from %s" % source_node)
for disk in disks:
- result = self.rpc.call_blockdev_shutdown(source_node, disk)
+ result = self.rpc.call_blockdev_shutdown(source_node, (disk, instance))
msg = result.fail_msg
if msg:
logging.error("Migration was successful, but couldn't unmap the"
if instance.admin_state == constants.ADMINST_UP:
self.feedback_fn("* checking disk consistency between source and target")
- for dev in instance.disks:
+ for (idx, dev) in enumerate(instance.disks):
# for drbd, these are drbd over lvm
- if not _CheckDiskConsistency(self.lu, dev, target_node, False):
+ if not _CheckDiskConsistency(self.lu, instance, dev, target_node,
+ False):
if primary_node.offline:
self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
" target node %s" %
- (primary_node.name, dev.iv_name, target_node))
+ (primary_node.name, idx, target_node))
elif not self.ignore_consistency:
raise errors.OpExecError("Disk %s is degraded on target node,"
- " aborting failover" % dev.iv_name)
+ " aborting failover" % idx)
else:
self.feedback_fn("* not checking disk consistency as instance is not"
" running")
return self._ExecMigration()
-def _CreateBlockDev(lu, node, instance, device, force_create,
- info, force_open):
+def _CreateBlockDev(lu, node, instance, device, force_create, info,
+ force_open):
+ """Wrapper around L{_CreateBlockDevInner}.
+
+ This method annotates the root device first.
+
+ """
+ (disk,) = _AnnotateDiskParams(instance, [device], lu.cfg)
+ return _CreateBlockDevInner(lu, node, instance, disk, force_create, info,
+ force_open)
+
+
+def _CreateBlockDevInner(lu, node, instance, device, force_create,
+ info, force_open):
"""Create a tree of block devices on a given node.
If this device type has to be created on secondaries, create it and
If not, just recurse to children keeping the same 'force' value.
+ @attention: The device has to be annotated already.
+
@param lu: the lu on whose behalf we execute
@param node: the node on which to create the device
@type instance: L{objects.Instance}
if device.children:
for child in device.children:
- _CreateBlockDev(lu, node, instance, child, force_create,
- info, force_open)
+ _CreateBlockDevInner(lu, node, instance, child, force_create,
+ info, force_open)
if not force_create:
return
return results
-def _ComputeLDParams(disk_template, disk_params):
- """Computes Logical Disk parameters from Disk Template parameters.
-
- @type disk_template: string
- @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
- @type disk_params: dict
- @param disk_params: disk template parameters; dict(template_name -> parameters
- @rtype: list(dict)
- @return: a list of dicts, one for each node of the disk hierarchy. Each dict
- contains the LD parameters of the node. The tree is flattened in-order.
-
- """
- if disk_template not in constants.DISK_TEMPLATES:
- raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
-
- result = list()
- dt_params = disk_params[disk_template]
- if disk_template == constants.DT_DRBD8:
- drbd_params = {
- constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
- constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
- constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
- constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
- constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
- constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
- constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
- constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
- constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
- constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
- constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
- constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
- }
-
- drbd_params = \
- objects.FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8],
- drbd_params)
-
- result.append(drbd_params)
-
- # data LV
- data_params = {
- constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
- }
- data_params = \
- objects.FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
- data_params)
- result.append(data_params)
-
- # metadata LV
- meta_params = {
- constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
- }
- meta_params = \
- objects.FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
- meta_params)
- result.append(meta_params)
-
- elif (disk_template == constants.DT_FILE or
- disk_template == constants.DT_SHARED_FILE):
- result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
-
- elif disk_template == constants.DT_PLAIN:
- params = {
- constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
- }
- params = \
- objects.FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
- params)
- result.append(params)
-
- elif disk_template == constants.DT_BLOCK:
- result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
-
- elif disk_template == constants.DT_RBD:
- params = {
- constants.LDP_POOL: dt_params[constants.RBD_POOL]
- }
- params = \
- objects.FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD],
- params)
- result.append(params)
-
- return result
-
-
def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
- iv_name, p_minor, s_minor, drbd_params, data_params,
- meta_params):
+ iv_name, p_minor, s_minor):
"""Generate a drbd8 device complete with its children.
"""
dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
logical_id=(vgnames[0], names[0]),
- params=data_params)
+ params={})
dev_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
logical_id=(vgnames[1], names[1]),
- params=meta_params)
+ params={})
drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
logical_id=(primary, secondary, port,
p_minor, s_minor,
shared_secret),
children=[dev_data, dev_meta],
- iv_name=iv_name, params=drbd_params)
+ iv_name=iv_name, params={})
return drbd_dev
-def _GenerateDiskTemplate(lu, template_name,
- instance_name, primary_node,
- secondary_nodes, disk_info,
- file_storage_dir, file_driver,
- base_index, feedback_fn, disk_params):
+_DISK_TEMPLATE_NAME_PREFIX = {
+ constants.DT_PLAIN: "",
+ constants.DT_RBD: ".rbd",
+ }
+
+
+_DISK_TEMPLATE_DEVICE_TYPE = {
+ constants.DT_PLAIN: constants.LD_LV,
+ constants.DT_FILE: constants.LD_FILE,
+ constants.DT_SHARED_FILE: constants.LD_FILE,
+ constants.DT_BLOCK: constants.LD_BLOCKDEV,
+ constants.DT_RBD: constants.LD_RBD,
+ }
+
+
+def _GenerateDiskTemplate(lu, template_name, instance_name, primary_node,
+ secondary_nodes, disk_info, file_storage_dir, file_driver, base_index,
+ feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage,
+ _req_shr_file_storage=opcodes.RequireSharedFileStorage):
"""Generate the entire disk layout for a given template type.
"""
vgname = lu.cfg.GetVGName()
disk_count = len(disk_info)
disks = []
- ld_params = _ComputeLDParams(template_name, disk_params)
+
if template_name == constants.DT_DISKLESS:
pass
- elif template_name == constants.DT_PLAIN:
- if secondary_nodes:
- raise errors.ProgrammerError("Wrong template configuration")
-
- names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
- for i in range(disk_count)])
- for idx, disk in enumerate(disk_info):
- disk_index = idx + base_index
- vg = disk.get(constants.IDISK_VG, vgname)
- feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
- disk_dev = objects.Disk(dev_type=constants.LD_LV,
- size=disk[constants.IDISK_SIZE],
- logical_id=(vg, names[idx]),
- iv_name="disk/%d" % disk_index,
- mode=disk[constants.IDISK_MODE],
- params=ld_params[0])
- disks.append(disk_dev)
elif template_name == constants.DT_DRBD8:
- drbd_params, data_params, meta_params = ld_params
if len(secondary_nodes) != 1:
raise errors.ProgrammerError("Wrong template configuration")
remote_node = secondary_nodes[0]
minors = lu.cfg.AllocateDRBDMinor(
[primary_node, remote_node] * len(disk_info), instance_name)
+ (drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
+ full_disk_params)
+ drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
+
names = []
for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
for i in range(disk_count)]):
names.append(lv_prefix + "_meta")
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
- drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
data_vg = disk.get(constants.IDISK_VG, vgname)
meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
[data_vg, meta_vg],
names[idx * 2:idx * 2 + 2],
"disk/%d" % disk_index,
- minors[idx * 2], minors[idx * 2 + 1],
- drbd_params, data_params, meta_params)
+ minors[idx * 2], minors[idx * 2 + 1])
disk_dev.mode = disk[constants.IDISK_MODE]
disks.append(disk_dev)
- elif template_name == constants.DT_FILE:
- if secondary_nodes:
- raise errors.ProgrammerError("Wrong template configuration")
-
- opcodes.RequireFileStorage()
-
- for idx, disk in enumerate(disk_info):
- disk_index = idx + base_index
- disk_dev = objects.Disk(dev_type=constants.LD_FILE,
- size=disk[constants.IDISK_SIZE],
- iv_name="disk/%d" % disk_index,
- logical_id=(file_driver,
- "%s/disk%d" % (file_storage_dir,
- disk_index)),
- mode=disk[constants.IDISK_MODE],
- params=ld_params[0])
- disks.append(disk_dev)
- elif template_name == constants.DT_SHARED_FILE:
+ else:
if secondary_nodes:
raise errors.ProgrammerError("Wrong template configuration")
- opcodes.RequireSharedFileStorage()
-
- for idx, disk in enumerate(disk_info):
- disk_index = idx + base_index
- disk_dev = objects.Disk(dev_type=constants.LD_FILE,
- size=disk[constants.IDISK_SIZE],
- iv_name="disk/%d" % disk_index,
- logical_id=(file_driver,
- "%s/disk%d" % (file_storage_dir,
- disk_index)),
- mode=disk[constants.IDISK_MODE],
- params=ld_params[0])
- disks.append(disk_dev)
- elif template_name == constants.DT_BLOCK:
- if secondary_nodes:
- raise errors.ProgrammerError("Wrong template configuration")
+ if template_name == constants.DT_FILE:
+ _req_file_storage()
+ elif template_name == constants.DT_SHARED_FILE:
+ _req_shr_file_storage()
- for idx, disk in enumerate(disk_info):
- disk_index = idx + base_index
- disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV,
- size=disk[constants.IDISK_SIZE],
- logical_id=(constants.BLOCKDEV_DRIVER_MANUAL,
- disk[constants.IDISK_ADOPT]),
- iv_name="disk/%d" % disk_index,
- mode=disk[constants.IDISK_MODE],
- params=ld_params[0])
- disks.append(disk_dev)
- elif template_name == constants.DT_RBD:
- if secondary_nodes:
- raise errors.ProgrammerError("Wrong template configuration")
+ name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
+ if name_prefix is None:
+ names = None
+ else:
+ names = _GenerateUniqueNames(lu, ["%s.disk%s" %
+ (name_prefix, base_index + i)
+ for i in range(disk_count)])
+
+ if template_name == constants.DT_PLAIN:
+ def logical_id_fn(idx, _, disk):
+ vg = disk.get(constants.IDISK_VG, vgname)
+ return (vg, names[idx])
+ elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
+ logical_id_fn = \
+ lambda _, disk_index, disk: (file_driver,
+ "%s/disk%d" % (file_storage_dir,
+ disk_index))
+ elif template_name == constants.DT_BLOCK:
+ logical_id_fn = \
+ lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
+ disk[constants.IDISK_ADOPT])
+ elif template_name == constants.DT_RBD:
+ logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
+ else:
+ raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
- names = _GenerateUniqueNames(lu, [".rbd.disk%d" % (base_index + i)
- for i in range(disk_count)])
+ dev_type = _DISK_TEMPLATE_DEVICE_TYPE[template_name]
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
- disk_dev = objects.Disk(dev_type=constants.LD_RBD,
- size=disk[constants.IDISK_SIZE],
- logical_id=("rbd", names[idx]),
- iv_name="disk/%d" % disk_index,
- mode=disk[constants.IDISK_MODE],
- params=ld_params[0])
- disks.append(disk_dev)
+ size = disk[constants.IDISK_SIZE]
+ feedback_fn("* disk %s, size %s" %
+ (disk_index, utils.FormatUnit(size, "h")))
+ disks.append(objects.Disk(dev_type=dev_type, size=size,
+ logical_id=logical_id_fn(idx, disk_index, disk),
+ iv_name="disk/%d" % disk_index,
+ mode=disk[constants.IDISK_MODE],
+ params={}))
- else:
- raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
return disks
lu.cfg.SetDiskID(device, node)
logging.info("Pause sync of instance %s disks", instance.name)
- result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
+ result = lu.rpc.call_blockdev_pause_resume_sync(node,
+ (instance.disks, instance),
+ True)
+ result.Raise("Failed RPC to node %s for pausing the disk syncing" % node)
for idx, success in enumerate(result.payload):
if not success:
wipe_size = min(wipe_chunk_size, size - offset)
logging.debug("Wiping disk %d, offset %s, chunk %s",
idx, offset, wipe_size)
- result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
+ result = lu.rpc.call_blockdev_wipe(node, (device, instance), offset,
+ wipe_size)
result.Raise("Could not wipe disk %d at offset %d for size %d" %
(idx, offset, wipe_size))
now = time.time()
finally:
logging.info("Resume sync of instance %s disks", instance.name)
- result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
+ result = lu.rpc.call_blockdev_pause_resume_sync(node,
+ (instance.disks, instance),
+ False)
- for idx, success in enumerate(result.payload):
- if not success:
- lu.LogWarning("Resume sync of disk %d failed, please have a"
- " look at the status and troubleshoot the issue", idx)
- logging.warn("resume-sync of instance %s for disks %d failed",
- instance.name, idx)
+ if result.fail_msg:
+ lu.LogWarning("RPC call to %s for resuming disk syncing failed,"
+ " please have a look at the status and troubleshoot"
+ " the issue: %s", node, result.fail_msg)
+ else:
+ for idx, success in enumerate(result.payload):
+ if not success:
+ lu.LogWarning("Resume sync of disk %d failed, please have a"
+ " look at the status and troubleshoot the issue", idx)
+ logging.warn("resume-sync of instance %s for disks %d failed",
+ instance.name, idx)
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
for idx, device in enumerate(instance.disks):
if to_skip and idx in to_skip:
continue
- logging.info("Creating volume %s for instance %s",
- device.iv_name, instance.name)
+ logging.info("Creating disk %s for instance '%s'", idx, instance.name)
#HARDCODE
for node in all_nodes:
f_create = node == pnode
_CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
-def _RemoveDisks(lu, instance, target_node=None):
+def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
"""Remove all disks for an instance.
This abstracts away some work from `AddInstance()` and
logging.info("Removing block devices for instance %s", instance.name)
all_result = True
- for device in instance.disks:
+ ports_to_release = set()
+ anno_disks = _AnnotateDiskParams(instance, instance.disks, lu.cfg)
+ for (idx, device) in enumerate(anno_disks):
if target_node:
edata = [(target_node, device)]
else:
edata = device.ComputeNodeTree(instance.primary_node)
for node, disk in edata:
lu.cfg.SetDiskID(disk, node)
- msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
- if msg:
- lu.LogWarning("Could not remove block device %s on node %s,"
- " continuing anyway: %s", device.iv_name, node, msg)
- all_result = False
+ result = lu.rpc.call_blockdev_remove(node, disk)
+ if result.fail_msg:
+ lu.LogWarning("Could not remove disk %s on node %s,"
+ " continuing anyway: %s", idx, node, result.fail_msg)
+ if not (result.offline and node != instance.primary_node):
+ all_result = False
# if this is a DRBD disk, return its port to the pool
if device.dev_type in constants.LDS_DRBD:
- tcp_port = device.logical_id[2]
- lu.cfg.AddTcpUdpPort(tcp_port)
+ ports_to_release.add(device.logical_id[2])
+
+ if all_result or ignore_failures:
+ for port in ports_to_release:
+ lu.cfg.AddTcpUdpPort(port)
if instance.disk_template == constants.DT_FILE:
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
def _ComputeDiskSize(disk_template, disks):
- """Compute disk size requirements in the volume group
+ """Compute disk size requirements according to disk template
"""
# Required free disk space as a function of disk and swap space
# 128 MB are added for drbd metadata for each disk
constants.DT_DRBD8:
sum(d[constants.IDISK_SIZE] + DRBD_META_SIZE for d in disks),
- constants.DT_FILE: None,
- constants.DT_SHARED_FILE: 0,
+ constants.DT_FILE: sum(d[constants.IDISK_SIZE] for d in disks),
+ constants.DT_SHARED_FILE: sum(d[constants.IDISK_SIZE] for d in disks),
constants.DT_BLOCK: 0,
- constants.DT_RBD: 0,
+ constants.DT_RBD: sum(d[constants.IDISK_SIZE] for d in disks),
}
if disk_template not in req_size_dict:
os=self.op.os_type,
vcpus=self.be_full[constants.BE_VCPUS],
memory=self.be_full[constants.BE_MAXMEM],
+ spindle_use=self.be_full[constants.BE_SPINDLE_USE],
disks=self.disks,
nics=nics,
hypervisor=self.op.hypervisor,
if self.op.mode == constants.INSTANCE_IMPORT:
export_info = self._ReadExportInfo()
self._ReadExportParams(export_info)
+ self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
+ else:
+ self._old_instance_name = None
if (not self.cfg.GetVGName() and
self.op.disk_template not in constants.DTS_NOT_LVM):
self.src_images = disk_images
- old_name = export_info.get(constants.INISECT_INS, "name")
- if self.op.instance_name == old_name:
+ if self.op.instance_name == self._old_instance_name:
for idx, nic in enumerate(self.nics):
if nic.mac == constants.VALUE_AUTO:
nic_mac_ini = "nic%d_mac" % idx
nodenames = [pnode.name] + self.secondaries
# Verify instance specs
+ spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
ispec = {
constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
constants.ISPEC_DISK_COUNT: len(self.disks),
constants.ISPEC_DISK_SIZE: [disk["size"] for disk in self.disks],
constants.ISPEC_NIC_COUNT: len(self.nics),
+ constants.ISPEC_SPINDLE_USE: spindle_use,
}
group_info = self.cfg.GetNodeGroup(pnode.group)
utils.CommaJoin(res)),
errors.ECODE_INVAL)
- # disk parameters (not customizable at instance or node level)
- # just use the primary node parameters, ignoring the secondary.
- self.diskparams = group_info.diskparams
-
if not self.adopt_disks:
if self.op.disk_template == constants.DT_RBD:
# _CheckRADOSFreeSpace() is just a placeholder.
else:
network_port = None
+ # This is ugly but we got a chicken-egg problem here
+ # We can only take the group disk parameters, as the instance
+ # has no disks yet (we are generating them right here).
+ node = self.cfg.GetNodeInfo(pnode_name)
+ nodegroup = self.cfg.GetNodeGroup(node.group)
disks = _GenerateDiskTemplate(self,
self.op.disk_template,
instance, pnode_name,
self.op.file_driver,
0,
feedback_fn,
- self.diskparams)
+ self.cfg.GetGroupDiskParams(nodegroup))
iobj = objects.Instance(name=instance, os=self.op.os_type,
primary_node=pnode_name,
_ReleaseLocks(self, locking.LEVEL_NODE_RES)
if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
+ # we need to set the disks ID to the primary node, since the
+ # preceding code might or might have not done it, depending on
+ # disk template and other options
+ for disk in iobj.disks:
+ self.cfg.SetDiskID(disk, pnode_name)
if self.op.mode == constants.INSTANCE_CREATE:
if not self.op.no_install:
pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
if pause_sync:
feedback_fn("* pausing disk sync to install instance OS")
result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
- iobj.disks, True)
+ (iobj.disks,
+ iobj), True)
for idx, success in enumerate(result.payload):
if not success:
logging.warn("pause-sync of instance %s for disk %d failed",
if pause_sync:
feedback_fn("* resuming disk sync")
result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
- iobj.disks, False)
+ (iobj.disks,
+ iobj), False)
for idx, success in enumerate(result.payload):
if not success:
logging.warn("resume-sync of instance %s for disk %d failed",
os_add_result.Raise("Could not add os for instance %s"
" on node %s" % (instance, pnode_name))
- elif self.op.mode == constants.INSTANCE_IMPORT:
- feedback_fn("* running the instance OS import scripts...")
+ else:
+ if self.op.mode == constants.INSTANCE_IMPORT:
+ feedback_fn("* running the instance OS import scripts...")
+
+ transfers = []
+
+ for idx, image in enumerate(self.src_images):
+ if not image:
+ continue
+
+ # FIXME: pass debug option from opcode to backend
+ dt = masterd.instance.DiskTransfer("disk/%s" % idx,
+ constants.IEIO_FILE, (image, ),
+ constants.IEIO_SCRIPT,
+ (iobj.disks[idx], idx),
+ None)
+ transfers.append(dt)
+
+ import_result = \
+ masterd.instance.TransferInstanceData(self, feedback_fn,
+ self.op.src_node, pnode_name,
+ self.pnode.secondary_ip,
+ iobj, transfers)
+ if not compat.all(import_result):
+ self.LogWarning("Some disks for instance %s on node %s were not"
+ " imported successfully" % (instance, pnode_name))
+
+ rename_from = self._old_instance_name
+
+ elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
+ feedback_fn("* preparing remote import...")
+ # The source cluster will stop the instance before attempting to make
+ # a connection. In some cases stopping an instance can take a long
+ # time, hence the shutdown timeout is added to the connection
+ # timeout.
+ connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
+ self.op.source_shutdown_timeout)
+ timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
- transfers = []
+ assert iobj.primary_node == self.pnode.name
+ disk_results = \
+ masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
+ self.source_x509_ca,
+ self._cds, timeouts)
+ if not compat.all(disk_results):
+ # TODO: Should the instance still be started, even if some disks
+ # failed to import (valid for local imports, too)?
+ self.LogWarning("Some disks for instance %s on node %s were not"
+ " imported successfully" % (instance, pnode_name))
- for idx, image in enumerate(self.src_images):
- if not image:
- continue
+ rename_from = self.source_instance_name
- # FIXME: pass debug option from opcode to backend
- dt = masterd.instance.DiskTransfer("disk/%s" % idx,
- constants.IEIO_FILE, (image, ),
- constants.IEIO_SCRIPT,
- (iobj.disks[idx], idx),
- None)
- transfers.append(dt)
-
- import_result = \
- masterd.instance.TransferInstanceData(self, feedback_fn,
- self.op.src_node, pnode_name,
- self.pnode.secondary_ip,
- iobj, transfers)
- if not compat.all(import_result):
- self.LogWarning("Some disks for instance %s on node %s were not"
- " imported successfully" % (instance, pnode_name))
-
- elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
- feedback_fn("* preparing remote import...")
- # The source cluster will stop the instance before attempting to make a
- # connection. In some cases stopping an instance can take a long time,
- # hence the shutdown timeout is added to the connection timeout.
- connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
- self.op.source_shutdown_timeout)
- timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
-
- assert iobj.primary_node == self.pnode.name
- disk_results = \
- masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
- self.source_x509_ca,
- self._cds, timeouts)
- if not compat.all(disk_results):
- # TODO: Should the instance still be started, even if some disks
- # failed to import (valid for local imports, too)?
- self.LogWarning("Some disks for instance %s on node %s were not"
- " imported successfully" % (instance, pnode_name))
+ else:
+ # also checked in the prereq part
+ raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
+ % self.op.mode)
# Run rename script on newly imported instance
assert iobj.name == instance
feedback_fn("Running rename script for %s" % instance)
result = self.rpc.call_instance_run_rename(pnode_name, iobj,
- self.source_instance_name,
+ rename_from,
self.op.debug_level)
if result.fail_msg:
self.LogWarning("Failed to run rename script for %s on node"
" %s: %s" % (instance, pnode_name, result.fail_msg))
- else:
- # also checked in the prereq part
- raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
- % self.op.mode)
-
assert not self.owned_locks(locking.LEVEL_NODE_RES)
if self.op.start:
self.lu.LogInfo("Checking disk/%d on %s", idx, node)
self.cfg.SetDiskID(dev, node)
- result = self.rpc.call_blockdev_find(node, dev)
+ result = _BlockdevFind(self, node, dev, instance)
if result.offline:
continue
_CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
ignore=self.ignore_ipolicy)
- # TODO: compute disk parameters
- primary_node_info = self.cfg.GetNodeInfo(instance.primary_node)
- secondary_node_info = self.cfg.GetNodeInfo(secondary_node)
- if primary_node_info.group != secondary_node_info.group:
- self.lu.LogInfo("The instance primary and secondary nodes are in two"
- " different node groups; the disk parameters of the"
- " primary node's group will be applied.")
-
- self.diskparams = self.cfg.GetNodeGroup(primary_node_info.group).diskparams
-
for node in check_nodes:
_CheckNodeOnline(self.lu, node)
self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
self.cfg.SetDiskID(dev, node)
- result = self.rpc.call_blockdev_find(node, dev)
+ result = _BlockdevFind(self, node, dev, self.instance)
msg = result.fail_msg
if msg or not result.payload:
self.lu.LogInfo("Checking disk/%d consistency on node %s" %
(idx, node_name))
- if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
- ldisk=ldisk):
+ if not _CheckDiskConsistency(self.lu, self.instance, dev, node_name,
+ on_primary, ldisk=ldisk):
raise errors.OpExecError("Node %s has degraded storage, unsafe to"
" replace disks for instance %s" %
(node_name, self.instance.name))
"""
iv_names = {}
- for idx, dev in enumerate(self.instance.disks):
+ disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
+ for idx, dev in enumerate(disks):
if idx not in self.disks:
continue
lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
names = _GenerateUniqueNames(self.lu, lv_names)
- _, data_p, meta_p = _ComputeLDParams(constants.DT_DRBD8, self.diskparams)
-
- vg_data = dev.children[0].logical_id[0]
+ (data_disk, meta_disk) = dev.children
+ vg_data = data_disk.logical_id[0]
lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
- logical_id=(vg_data, names[0]), params=data_p)
- vg_meta = dev.children[1].logical_id[0]
+ logical_id=(vg_data, names[0]),
+ params=data_disk.params)
+ vg_meta = meta_disk.logical_id[0]
lv_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
- logical_id=(vg_meta, names[1]), params=meta_p)
+ logical_id=(vg_meta, names[1]),
+ params=meta_disk.params)
new_lvs = [lv_data, lv_meta]
old_lvs = [child.Copy() for child in dev.children]
# we pass force_create=True to force the LVM creation
for new_lv in new_lvs:
- _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
- _GetInstanceInfoText(self.instance), False)
+ _CreateBlockDevInner(self.lu, node_name, self.instance, new_lv, True,
+ _GetInstanceInfoText(self.instance), False)
return iv_names
for name, (dev, _, _) in iv_names.iteritems():
self.cfg.SetDiskID(dev, node_name)
- result = self.rpc.call_blockdev_find(node_name, dev)
+ result = _BlockdevFind(self, node_name, dev, self.instance)
msg = result.fail_msg
if msg or not result.payload:
# Now that the new lvs have the old name, we can add them to the device
self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
- result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
- new_lvs)
+ result = self.rpc.call_blockdev_addchildren(self.target_node,
+ (dev, self.instance), new_lvs)
msg = result.fail_msg
if msg:
for new_lv in new_lvs:
# Step: create new storage
self.lu.LogStep(3, steps_total, "Allocate new storage")
- for idx, dev in enumerate(self.instance.disks):
+ disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
+ for idx, dev in enumerate(disks):
self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
(self.new_node, idx))
# we pass force_create=True to force LVM creation
for new_lv in dev.children:
- _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
- _GetInstanceInfoText(self.instance), False)
+ _CreateBlockDevInner(self.lu, self.new_node, self.instance, new_lv,
+ True, _GetInstanceInfoText(self.instance), False)
# Step 4: dbrd minors and drbd setups changes
# after this, we must manually remove the drbd minors on both the
iv_names[idx] = (dev, dev.children, new_net_id)
logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
new_net_id)
- drbd_params, _, _ = _ComputeLDParams(constants.DT_DRBD8, self.diskparams)
new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
logical_id=new_alone_id,
children=dev.children,
size=dev.size,
- params=drbd_params)
+ params={})
+ (anno_new_drbd,) = _AnnotateDiskParams(self.instance, [new_drbd],
+ self.cfg)
try:
- _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
+ _CreateSingleBlockDev(self.lu, self.new_node, self.instance,
+ anno_new_drbd,
_GetInstanceInfoText(self.instance), False)
except errors.GenericError:
self.cfg.ReleaseDRBDMinors(self.instance.name)
for idx, dev in enumerate(self.instance.disks):
self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
self.cfg.SetDiskID(dev, self.target_node)
- msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
+ msg = self.rpc.call_blockdev_shutdown(self.target_node,
+ (dev, self.instance)).fail_msg
if msg:
self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
"node: %s" % (idx, msg),
result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
self.new_node],
self.node_secondary_ip,
- self.instance.disks,
+ (self.instance.disks, self.instance),
self.instance.name,
False)
for to_node, to_result in result.items():
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
- self.needed_locks[locking.LEVEL_NODE][:]
+ _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
env = {
"DISK": self.op.disk,
"AMOUNT": self.op.amount,
+ "ABSOLUTE": self.op.absolute,
}
env.update(_BuildInstanceHookEnvByObject(self, self.instance))
return env
self.disk = instance.FindDisk(self.op.disk)
+ if self.op.absolute:
+ self.target = self.op.amount
+ self.delta = self.target - self.disk.size
+ if self.delta < 0:
+ raise errors.OpPrereqError("Requested size (%s) is smaller than "
+ "current disk size (%s)" %
+ (utils.FormatUnit(self.target, "h"),
+ utils.FormatUnit(self.disk.size, "h")),
+ errors.ECODE_STATE)
+ else:
+ self.delta = self.op.amount
+ self.target = self.disk.size + self.delta
+ if self.delta < 0:
+ raise errors.OpPrereqError("Requested increment (%s) is negative" %
+ utils.FormatUnit(self.delta, "h"),
+ errors.ECODE_INVAL)
+
if instance.disk_template not in (constants.DT_FILE,
constants.DT_SHARED_FILE,
constants.DT_RBD):
# TODO: check the free disk space for file, when that feature will be
# supported
_CheckNodesFreeDiskPerVG(self, nodenames,
- self.disk.ComputeGrowth(self.op.amount))
+ self.disk.ComputeGrowth(self.delta))
def Exec(self, feedback_fn):
"""Execute disk grow.
if not disks_ok:
raise errors.OpExecError("Cannot activate block device to grow")
- feedback_fn("Growing disk %s of instance '%s' by %s" %
+ feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
(self.op.disk, instance.name,
- utils.FormatUnit(self.op.amount, "h")))
+ utils.FormatUnit(self.delta, "h"),
+ utils.FormatUnit(self.target, "h")))
# First run all grow ops in dry-run mode
for node in instance.all_nodes:
self.cfg.SetDiskID(disk, node)
- result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, True)
+ result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
+ True)
result.Raise("Grow request failed to node %s" % node)
# We know that (as far as we can test) operations across different
# nodes will succeed, time to run it for real
for node in instance.all_nodes:
self.cfg.SetDiskID(disk, node)
- result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, False)
+ result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
+ False)
result.Raise("Grow request failed to node %s" % node)
# TODO: Rewrite code to work properly
# time is a work-around.
time.sleep(5)
- disk.RecordGrow(self.op.amount)
+ disk.RecordGrow(self.delta)
self.cfg.Update(instance, feedback_fn)
# Changes have been recorded, release node lock
else:
self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
+ self.needed_locks[locking.LEVEL_NODEGROUP] = []
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
- if self.op.use_locking and level == locking.LEVEL_NODE:
- self._LockInstancesNodes()
+ if self.op.use_locking:
+ if level == locking.LEVEL_NODEGROUP:
+ owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
+
+ # Lock all groups used by instances optimistically; this requires going
+ # via the node before it's locked, requiring verification later on
+ self.needed_locks[locking.LEVEL_NODEGROUP] = \
+ frozenset(group_uuid
+ for instance_name in owned_instances
+ for group_uuid in
+ self.cfg.GetInstanceNodeGroups(instance_name))
+
+ elif level == locking.LEVEL_NODE:
+ self._LockInstancesNodes()
def CheckPrereq(self):
"""Check prerequisites.
This only checks the optional instance list against the existing names.
"""
+ owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+ owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+ owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
+
if self.wanted_names is None:
assert self.op.use_locking, "Locking was not used"
- self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
+ self.wanted_names = owned_instances
- self.wanted_instances = \
- map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
+ instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
+
+ if self.op.use_locking:
+ _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
+ None)
+ else:
+ assert not (owned_instances or owned_groups or owned_nodes)
- def _ComputeBlockdevStatus(self, node, instance_name, dev):
+ self.wanted_instances = instances.values()
+
+ def _ComputeBlockdevStatus(self, node, instance, dev):
"""Returns the status of a block device
"""
if result.offline:
return None
- result.Raise("Can't compute disk status for %s" % instance_name)
+ result.Raise("Can't compute disk status for %s" % instance.name)
status = result.payload
if status is None:
"""Compute block device status.
"""
+ (anno_dev,) = _AnnotateDiskParams(instance, [dev], self.cfg)
+
+ return self._ComputeDiskStatusInner(instance, snode, anno_dev)
+
+ def _ComputeDiskStatusInner(self, instance, snode, dev):
+ """Compute block device status.
+
+ @attention: The device has to be annotated already.
+
+ """
if dev.dev_type in constants.LDS_DRBD:
# we change the snode then (otherwise we use the one passed in)
if dev.logical_id[0] == instance.primary_node:
snode = dev.logical_id[0]
dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
- instance.name, dev)
- dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
+ instance, dev)
+ dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
if dev.children:
- dev_children = map(compat.partial(self._ComputeDiskStatus,
+ dev_children = map(compat.partial(self._ComputeDiskStatusInner,
instance, snode),
dev.children)
else:
cluster = self.cfg.GetClusterInfo()
- pri_nodes = self.cfg.GetMultiNodeInfo(i.primary_node
- for i in self.wanted_instances)
- for instance, (_, pnode) in zip(self.wanted_instances, pri_nodes):
- if self.op.static or pnode.offline:
+ node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
+ nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
+
+ groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
+ for node in nodes.values()))
+
+ group2name_fn = lambda uuid: groups[uuid].name
+
+ for instance in self.wanted_instances:
+ pnode = nodes[instance.primary_node]
+
+ if self.op.static or pnode.offline:
remote_state = None
if pnode.offline:
self.LogWarning("Primary node %s is marked offline, returning static"
disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
instance.disks)
+ snodes_group_uuids = [nodes[snode_name].group
+ for snode_name in instance.secondary_nodes]
+
result[instance.name] = {
"name": instance.name,
"config_state": instance.admin_state,
"run_state": remote_state,
"pnode": instance.primary_node,
+ "pnode_group_uuid": pnode.group,
+ "pnode_group_name": group2name_fn(pnode.group),
"snodes": instance.secondary_nodes,
+ "snodes_group_uuids": snodes_group_uuids,
+ "snodes_group_names": map(group2name_fn, snodes_group_uuids),
"os": instance.os,
# this happens to be the same format used for hooks
"nics": _NICListToTuple(self, instance.nics),
return result
+def PrepareContainerMods(mods, private_fn):
+ """Prepares a list of container modifications by adding a private data field.
+
+ @type mods: list of tuples; (operation, index, parameters)
+ @param mods: List of modifications
+ @type private_fn: callable or None
+ @param private_fn: Callable for constructing a private data field for a
+ modification
+ @rtype: list
+
+ """
+ if private_fn is None:
+ fn = lambda: None
+ else:
+ fn = private_fn
+
+ return [(op, idx, params, fn()) for (op, idx, params) in mods]
+
+
+#: Type description for changes as returned by L{ApplyContainerMods}'s
+#: callbacks
+_TApplyContModsCbChanges = \
+ ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
+ ht.TNonEmptyString,
+ ht.TAny,
+ ])))
+
+
+def ApplyContainerMods(kind, container, chgdesc, mods,
+ create_fn, modify_fn, remove_fn):
+ """Applies descriptions in C{mods} to C{container}.
+
+ @type kind: string
+ @param kind: One-word item description
+ @type container: list
+ @param container: Container to modify
+ @type chgdesc: None or list
+ @param chgdesc: List of applied changes
+ @type mods: list
+ @param mods: Modifications as returned by L{PrepareContainerMods}
+ @type create_fn: callable
+ @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
+ receives absolute item index, parameters and private data object as added
+ by L{PrepareContainerMods}, returns tuple containing new item and changes
+ as list
+ @type modify_fn: callable
+ @param modify_fn: Callback for modifying an existing item
+ (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
+ and private data object as added by L{PrepareContainerMods}, returns
+ changes as list
+ @type remove_fn: callable
+ @param remove_fn: Callback on removing item; receives absolute item index,
+ item and private data object as added by L{PrepareContainerMods}
+
+ """
+ for (op, idx, params, private) in mods:
+ if idx == -1:
+ # Append
+ absidx = len(container) - 1
+ elif idx < 0:
+ raise IndexError("Not accepting negative indices other than -1")
+ elif idx > len(container):
+ raise IndexError("Got %s index %s, but there are only %s" %
+ (kind, idx, len(container)))
+ else:
+ absidx = idx
+
+ changes = None
+
+ if op == constants.DDM_ADD:
+ # Calculate where item will be added
+ if idx == -1:
+ addidx = len(container)
+ else:
+ addidx = idx
+
+ if create_fn is None:
+ item = params
+ else:
+ (item, changes) = create_fn(addidx, params, private)
+
+ if idx == -1:
+ container.append(item)
+ else:
+ assert idx >= 0
+ assert idx <= len(container)
+ # list.insert does so before the specified index
+ container.insert(idx, item)
+ else:
+ # Retrieve existing item
+ try:
+ item = container[absidx]
+ except IndexError:
+ raise IndexError("Invalid %s index %s" % (kind, idx))
+
+ if op == constants.DDM_REMOVE:
+ assert not params
+
+ if remove_fn is not None:
+ remove_fn(absidx, item, private)
+
+ changes = [("%s/%s" % (kind, absidx), "remove")]
+
+ assert container[absidx] == item
+ del container[absidx]
+ elif op == constants.DDM_MODIFY:
+ if modify_fn is not None:
+ changes = modify_fn(absidx, item, params, private)
+ else:
+ raise errors.ProgrammerError("Unhandled operation '%s'" % op)
+
+ assert _TApplyContModsCbChanges(changes)
+
+ if not (chgdesc is None or changes is None):
+ chgdesc.extend(changes)
+
+
+def _UpdateIvNames(base_index, disks):
+ """Updates the C{iv_name} attribute of disks.
+
+ @type disks: list of L{objects.Disk}
+
+ """
+ for (idx, disk) in enumerate(disks):
+ disk.iv_name = "disk/%s" % (base_index + idx, )
+
+
+class _InstNicModPrivate:
+ """Data structure for network interface modifications.
+
+ Used by L{LUInstanceSetParams}.
+
+ """
+ def __init__(self):
+ self.params = None
+ self.filled = None
+
+
class LUInstanceSetParams(LogicalUnit):
"""Modifies an instances's parameters.
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
+ @staticmethod
+ def _UpgradeDiskNicMods(kind, mods, verify_fn):
+ assert ht.TList(mods)
+ assert not mods or len(mods[0]) in (2, 3)
+
+ if mods and len(mods[0]) == 2:
+ result = []
+
+ addremove = 0
+ for op, params in mods:
+ if op in (constants.DDM_ADD, constants.DDM_REMOVE):
+ result.append((op, -1, params))
+ addremove += 1
+
+ if addremove > 1:
+ raise errors.OpPrereqError("Only one %s add or remove operation is"
+ " supported at a time" % kind,
+ errors.ECODE_INVAL)
+ else:
+ result.append((constants.DDM_MODIFY, op, params))
+
+ assert verify_fn(result)
+ else:
+ result = mods
+
+ return result
+
+ @staticmethod
+ def _CheckMods(kind, mods, key_types, item_fn):
+ """Ensures requested disk/NIC modifications are valid.
+
+ """
+ for (op, _, params) in mods:
+ assert ht.TDict(params)
+
+ utils.ForceDictType(params, key_types)
+
+ if op == constants.DDM_REMOVE:
+ if params:
+ raise errors.OpPrereqError("No settings should be passed when"
+ " removing a %s" % kind,
+ errors.ECODE_INVAL)
+ elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
+ item_fn(op, params)
+ else:
+ raise errors.ProgrammerError("Unhandled operation '%s'" % op)
+
+ @staticmethod
+ def _VerifyDiskModification(op, params):
+ """Verifies a disk modification.
+
+ """
+ if op == constants.DDM_ADD:
+ mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
+ if mode not in constants.DISK_ACCESS_SET:
+ raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
+ errors.ECODE_INVAL)
+
+ size = params.get(constants.IDISK_SIZE, None)
+ if size is None:
+ raise errors.OpPrereqError("Required disk parameter '%s' missing" %
+ constants.IDISK_SIZE, errors.ECODE_INVAL)
+
+ try:
+ size = int(size)
+ except (TypeError, ValueError), err:
+ raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
+ errors.ECODE_INVAL)
+
+ params[constants.IDISK_SIZE] = size
+
+ elif op == constants.DDM_MODIFY and constants.IDISK_SIZE in params:
+ raise errors.OpPrereqError("Disk size change not possible, use"
+ " grow-disk", errors.ECODE_INVAL)
+
+ @staticmethod
+ def _VerifyNicModification(op, params):
+ """Verifies a network interface modification.
+
+ """
+ if op in (constants.DDM_ADD, constants.DDM_MODIFY):
+ ip = params.get(constants.INIC_IP, None)
+ if ip is None:
+ pass
+ elif ip.lower() == constants.VALUE_NONE:
+ params[constants.INIC_IP] = None
+ elif not netutils.IPAddress.IsValid(ip):
+ raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
+ errors.ECODE_INVAL)
+
+ bridge = params.get("bridge", None)
+ link = params.get(constants.INIC_LINK, None)
+ if bridge and link:
+ raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
+ " at the same time", errors.ECODE_INVAL)
+ elif bridge and bridge.lower() == constants.VALUE_NONE:
+ params["bridge"] = None
+ elif link and link.lower() == constants.VALUE_NONE:
+ params[constants.INIC_LINK] = None
+
+ if op == constants.DDM_ADD:
+ macaddr = params.get(constants.INIC_MAC, None)
+ if macaddr is None:
+ params[constants.INIC_MAC] = constants.VALUE_AUTO
+
+ if constants.INIC_MAC in params:
+ macaddr = params[constants.INIC_MAC]
+ if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
+ macaddr = utils.NormalizeAndValidateMac(macaddr)
+
+ if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
+ raise errors.OpPrereqError("'auto' is not a valid MAC address when"
+ " modifying an existing NIC",
+ errors.ECODE_INVAL)
+
def CheckArguments(self):
if not (self.op.nics or self.op.disks or self.op.disk_template or
self.op.hvparams or self.op.beparams or self.op.os_name or
- self.op.online_inst or self.op.offline_inst or
- self.op.runtime_mem):
+ self.op.offline is not None or self.op.runtime_mem):
raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
if self.op.hvparams:
_CheckGlobalHvParams(self.op.hvparams)
- # Disk validation
- disk_addremove = 0
- for disk_op, disk_dict in self.op.disks:
- utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
- if disk_op == constants.DDM_REMOVE:
- disk_addremove += 1
- continue
- elif disk_op == constants.DDM_ADD:
- disk_addremove += 1
- else:
- if not isinstance(disk_op, int):
- raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
- if not isinstance(disk_dict, dict):
- msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
- raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
-
- if disk_op == constants.DDM_ADD:
- mode = disk_dict.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
- if mode not in constants.DISK_ACCESS_SET:
- raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
- errors.ECODE_INVAL)
- size = disk_dict.get(constants.IDISK_SIZE, None)
- if size is None:
- raise errors.OpPrereqError("Required disk parameter size missing",
- errors.ECODE_INVAL)
- try:
- size = int(size)
- except (TypeError, ValueError), err:
- raise errors.OpPrereqError("Invalid disk size parameter: %s" %
- str(err), errors.ECODE_INVAL)
- disk_dict[constants.IDISK_SIZE] = size
- else:
- # modification of disk
- if constants.IDISK_SIZE in disk_dict:
- raise errors.OpPrereqError("Disk size change not possible, use"
- " grow-disk", errors.ECODE_INVAL)
+ self.op.disks = \
+ self._UpgradeDiskNicMods("disk", self.op.disks,
+ opcodes.OpInstanceSetParams.TestDiskModifications)
+ self.op.nics = \
+ self._UpgradeDiskNicMods("NIC", self.op.nics,
+ opcodes.OpInstanceSetParams.TestNicModifications)
- if disk_addremove > 1:
- raise errors.OpPrereqError("Only one disk add or remove operation"
- " supported at a time", errors.ECODE_INVAL)
+ # Check disk modifications
+ self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
+ self._VerifyDiskModification)
if self.op.disks and self.op.disk_template is not None:
raise errors.OpPrereqError("Disk template conversion and other disk"
" one requires specifying a secondary node",
errors.ECODE_INVAL)
- # NIC validation
- nic_addremove = 0
- for nic_op, nic_dict in self.op.nics:
- utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
- if nic_op == constants.DDM_REMOVE:
- nic_addremove += 1
- continue
- elif nic_op == constants.DDM_ADD:
- nic_addremove += 1
- else:
- if not isinstance(nic_op, int):
- raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
- if not isinstance(nic_dict, dict):
- msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
- raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
-
- # nic_dict should be a dict
- nic_ip = nic_dict.get(constants.INIC_IP, None)
- if nic_ip is not None:
- if nic_ip.lower() == constants.VALUE_NONE:
- nic_dict[constants.INIC_IP] = None
- else:
- if not netutils.IPAddress.IsValid(nic_ip):
- raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
- errors.ECODE_INVAL)
-
- nic_bridge = nic_dict.get("bridge", None)
- nic_link = nic_dict.get(constants.INIC_LINK, None)
- if nic_bridge and nic_link:
- raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
- " at the same time", errors.ECODE_INVAL)
- elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
- nic_dict["bridge"] = None
- elif nic_link and nic_link.lower() == constants.VALUE_NONE:
- nic_dict[constants.INIC_LINK] = None
-
- if nic_op == constants.DDM_ADD:
- nic_mac = nic_dict.get(constants.INIC_MAC, None)
- if nic_mac is None:
- nic_dict[constants.INIC_MAC] = constants.VALUE_AUTO
-
- if constants.INIC_MAC in nic_dict:
- nic_mac = nic_dict[constants.INIC_MAC]
- if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
- nic_mac = utils.NormalizeAndValidateMac(nic_mac)
-
- if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
- raise errors.OpPrereqError("'auto' is not a valid MAC address when"
- " modifying an existing nic",
- errors.ECODE_INVAL)
-
- if nic_addremove > 1:
- raise errors.OpPrereqError("Only one NIC add or remove operation"
- " supported at a time", errors.ECODE_INVAL)
+ # Check NIC modifications
+ self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
+ self._VerifyNicModification)
def ExpandNames(self):
self._ExpandAndLockInstance()
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
+ # TODO: Acquire group lock in shared mode (disk parameters)
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
if self.op.disk_template and self.op.remote_node:
elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
- self.needed_locks[locking.LEVEL_NODE][:]
+ _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
args["vcpus"] = self.be_new[constants.BE_VCPUS]
# TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
# information at all.
- if self.op.nics:
- args["nics"] = []
- nic_override = dict(self.op.nics)
- for idx, nic in enumerate(self.instance.nics):
- if idx in nic_override:
- this_nic_override = nic_override[idx]
- else:
- this_nic_override = {}
- if constants.INIC_IP in this_nic_override:
- ip = this_nic_override[constants.INIC_IP]
- else:
- ip = nic.ip
- if constants.INIC_MAC in this_nic_override:
- mac = this_nic_override[constants.INIC_MAC]
- else:
- mac = nic.mac
- if idx in self.nic_pnew:
- nicparams = self.nic_pnew[idx]
- else:
- nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
- mode = nicparams[constants.NIC_MODE]
- link = nicparams[constants.NIC_LINK]
- args["nics"].append((ip, mac, mode, link))
- if constants.DDM_ADD in nic_override:
- ip = nic_override[constants.DDM_ADD].get(constants.INIC_IP, None)
- mac = nic_override[constants.DDM_ADD][constants.INIC_MAC]
- nicparams = self.nic_pnew[constants.DDM_ADD]
+
+ if self._new_nics is not None:
+ nics = []
+
+ for nic in self._new_nics:
+ nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
mode = nicparams[constants.NIC_MODE]
link = nicparams[constants.NIC_LINK]
- args["nics"].append((ip, mac, mode, link))
- elif constants.DDM_REMOVE in nic_override:
- del args["nics"][-1]
+ nics.append((nic.ip, nic.mac, mode, link))
+
+ args["nics"] = nics
env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
if self.op.disk_template:
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
return (nl, nl)
+ def _PrepareNicModification(self, params, private, old_ip, old_params,
+ cluster, pnode):
+ update_params_dict = dict([(key, params[key])
+ for key in constants.NICS_PARAMETERS
+ if key in params])
+
+ if "bridge" in params:
+ update_params_dict[constants.NIC_LINK] = params["bridge"]
+
+ new_params = _GetUpdatedParams(old_params, update_params_dict)
+ utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
+
+ new_filled_params = cluster.SimpleFillNIC(new_params)
+ objects.NIC.CheckParameterSyntax(new_filled_params)
+
+ new_mode = new_filled_params[constants.NIC_MODE]
+ if new_mode == constants.NIC_MODE_BRIDGED:
+ bridge = new_filled_params[constants.NIC_LINK]
+ msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
+ if msg:
+ msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
+ if self.op.force:
+ self.warn.append(msg)
+ else:
+ raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
+
+ elif new_mode == constants.NIC_MODE_ROUTED:
+ ip = params.get(constants.INIC_IP, old_ip)
+ if ip is None:
+ raise errors.OpPrereqError("Cannot set the NIC IP address to None"
+ " on a routed NIC", errors.ECODE_INVAL)
+
+ if constants.INIC_MAC in params:
+ mac = params[constants.INIC_MAC]
+ if mac is None:
+ raise errors.OpPrereqError("Cannot unset the NIC MAC address",
+ errors.ECODE_INVAL)
+ elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
+ # otherwise generate the MAC address
+ params[constants.INIC_MAC] = \
+ self.cfg.GenerateMAC(self.proc.GetECId())
+ else:
+ # or validate/reserve the current one
+ try:
+ self.cfg.ReserveMAC(mac, self.proc.GetECId())
+ except errors.ReservationError:
+ raise errors.OpPrereqError("MAC address '%s' already in use"
+ " in cluster" % mac,
+ errors.ECODE_NOTUNIQUE)
+
+ private.params = new_params
+ private.filled = new_filled_params
+
def CheckPrereq(self):
"""Check prerequisites.
pnode = instance.primary_node
nodelist = list(instance.all_nodes)
pnode_info = self.cfg.GetNodeInfo(pnode)
- self.diskparams = self.cfg.GetNodeGroup(pnode_info.group).diskparams
+ self.diskparams = self.cfg.GetInstanceDiskParams(instance)
+
+ # Prepare disk/NIC modifications
+ self.diskmod = PrepareContainerMods(self.op.disks, None)
+ self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
# OS change
if self.op.os_name and not self.op.force:
else:
instance_os = instance.os
+ assert not (self.op.disk_template and self.op.disks), \
+ "Can't modify disk template and apply disk changes at the same time"
+
if self.op.disk_template:
if instance.disk_template == self.op.disk_template:
raise errors.OpPrereqError("Instance already has disk template %s" %
self.be_proposed = cluster.SimpleFillBE(instance.beparams)
be_old = cluster.FillBE(instance)
- # CPU param validation -- checking every time a paramtere is
+ # CPU param validation -- checking every time a parameter is
# changed to cover all cases where either CPU mask or vcpus have
# changed
if (constants.BE_VCPUS in self.be_proposed and
self.op.memory - current_memory,
instance.hypervisor)
- # NIC processing
- self.nic_pnew = {}
- self.nic_pinst = {}
- for nic_op, nic_dict in self.op.nics:
- if nic_op == constants.DDM_REMOVE:
- if not instance.nics:
- raise errors.OpPrereqError("Instance has no NICs, cannot remove",
- errors.ECODE_INVAL)
- continue
- if nic_op != constants.DDM_ADD:
- # an existing nic
- if not instance.nics:
- raise errors.OpPrereqError("Invalid NIC index %s, instance has"
- " no NICs" % nic_op,
- errors.ECODE_INVAL)
- if nic_op < 0 or nic_op >= len(instance.nics):
- raise errors.OpPrereqError("Invalid NIC index %s, valid values"
- " are 0 to %d" %
- (nic_op, len(instance.nics) - 1),
- errors.ECODE_INVAL)
- old_nic_params = instance.nics[nic_op].nicparams
- old_nic_ip = instance.nics[nic_op].ip
- else:
- old_nic_params = {}
- old_nic_ip = None
-
- update_params_dict = dict([(key, nic_dict[key])
- for key in constants.NICS_PARAMETERS
- if key in nic_dict])
-
- if "bridge" in nic_dict:
- update_params_dict[constants.NIC_LINK] = nic_dict["bridge"]
-
- new_nic_params = _GetUpdatedParams(old_nic_params,
- update_params_dict)
- utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
- new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
- objects.NIC.CheckParameterSyntax(new_filled_nic_params)
- self.nic_pinst[nic_op] = new_nic_params
- self.nic_pnew[nic_op] = new_filled_nic_params
- new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
-
- if new_nic_mode == constants.NIC_MODE_BRIDGED:
- nic_bridge = new_filled_nic_params[constants.NIC_LINK]
- msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
- if msg:
- msg = "Error checking bridges on node %s: %s" % (pnode, msg)
- if self.op.force:
- self.warn.append(msg)
- else:
- raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
- if new_nic_mode == constants.NIC_MODE_ROUTED:
- if constants.INIC_IP in nic_dict:
- nic_ip = nic_dict[constants.INIC_IP]
- else:
- nic_ip = old_nic_ip
- if nic_ip is None:
- raise errors.OpPrereqError("Cannot set the nic ip to None"
- " on a routed nic", errors.ECODE_INVAL)
- if constants.INIC_MAC in nic_dict:
- nic_mac = nic_dict[constants.INIC_MAC]
- if nic_mac is None:
- raise errors.OpPrereqError("Cannot set the nic mac to None",
- errors.ECODE_INVAL)
- elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
- # otherwise generate the mac
- nic_dict[constants.INIC_MAC] = \
- self.cfg.GenerateMAC(self.proc.GetECId())
- else:
- # or validate/reserve the current one
- try:
- self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
- except errors.ReservationError:
- raise errors.OpPrereqError("MAC address %s already in use"
- " in cluster" % nic_mac,
- errors.ECODE_NOTUNIQUE)
-
- # DISK processing
if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Disk operations not supported for"
" diskless instances",
errors.ECODE_INVAL)
- for disk_op, _ in self.op.disks:
- if disk_op == constants.DDM_REMOVE:
- if len(instance.disks) == 1:
- raise errors.OpPrereqError("Cannot remove the last disk of"
- " an instance", errors.ECODE_INVAL)
- _CheckInstanceState(self, instance, INSTANCE_DOWN,
- msg="cannot remove disks")
-
- if (disk_op == constants.DDM_ADD and
- len(instance.disks) >= constants.MAX_DISKS):
- raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
- " add more" % constants.MAX_DISKS,
- errors.ECODE_STATE)
- if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
- # an existing disk
- if disk_op < 0 or disk_op >= len(instance.disks):
- raise errors.OpPrereqError("Invalid disk index %s, valid values"
- " are 0 to %d" %
- (disk_op, len(instance.disks)),
- errors.ECODE_INVAL)
- # disabling the instance
- if self.op.offline_inst:
- _CheckInstanceState(self, instance, INSTANCE_DOWN,
- msg="cannot change instance state to offline")
+ def _PrepareNicCreate(_, params, private):
+ self._PrepareNicModification(params, private, None, {}, cluster, pnode)
+ return (None, None)
+
+ def _PrepareNicMod(_, nic, params, private):
+ self._PrepareNicModification(params, private, nic.ip,
+ nic.nicparams, cluster, pnode)
+ return None
+
+ # Verify NIC changes (operating on copy)
+ nics = instance.nics[:]
+ ApplyContainerMods("NIC", nics, None, self.nicmod,
+ _PrepareNicCreate, _PrepareNicMod, None)
+ if len(nics) > constants.MAX_NICS:
+ raise errors.OpPrereqError("Instance has too many network interfaces"
+ " (%d), cannot add more" % constants.MAX_NICS,
+ errors.ECODE_STATE)
+
+ # Verify disk changes (operating on a copy)
+ disks = instance.disks[:]
+ ApplyContainerMods("disk", disks, None, self.diskmod, None, None, None)
+ if len(disks) > constants.MAX_DISKS:
+ raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
+ " more" % constants.MAX_DISKS,
+ errors.ECODE_STATE)
- # enabling the instance
- if self.op.online_inst:
- _CheckInstanceState(self, instance, INSTANCE_OFFLINE,
- msg="cannot make instance go online")
+ if self.op.offline is not None:
+ if self.op.offline:
+ msg = "can't change to offline"
+ else:
+ msg = "can't change to online"
+ _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE, msg=msg)
+
+ # Pre-compute NIC changes (necessary to use result in hooks)
+ self._nic_chgdesc = []
+ if self.nicmod:
+ # Operate on copies as this is still in prereq
+ nics = [nic.Copy() for nic in instance.nics]
+ ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
+ self._CreateNewNic, self._ApplyNicMods, None)
+ self._new_nics = nics
+ else:
+ self._new_nics = None
def _ConvertPlainToDrbd(self, feedback_fn):
"""Converts an instance from plain to drbd.
instance.name, pnode, [snode],
disk_info, None, None, 0, feedback_fn,
self.diskparams)
+ anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
+ self.diskparams)
info = _GetInstanceInfoText(instance)
- feedback_fn("Creating aditional volumes...")
+ feedback_fn("Creating additional volumes...")
# first, create the missing data and meta devices
- for disk in new_disks:
+ for disk in anno_disks:
# unfortunately this is... not too nice
_CreateSingleBlockDev(self, pnode, instance, disk.children[1],
info, True)
feedback_fn("Initializing DRBD devices...")
# all child devices are in place, we can now create the DRBD devices
- for disk in new_disks:
+ for disk in anno_disks:
for node in [pnode, snode]:
f_create = node == pnode
_CreateSingleBlockDev(self, node, instance, disk, info, f_create)
snode = instance.secondary_nodes[0]
feedback_fn("Converting template to plain")
- old_disks = instance.disks
- new_disks = [d.children[0] for d in old_disks]
+ old_disks = _AnnotateDiskParams(instance, instance.disks, self.cfg)
+ new_disks = [d.children[0] for d in instance.disks]
# copy over size and mode
for parent, child in zip(old_disks, new_disks):
child.size = parent.size
child.mode = parent.mode
+ # this is a DRBD disk, return its port to the pool
+ # NOTE: this must be done right before the call to cfg.Update!
+ for disk in old_disks:
+ tcp_port = disk.logical_id[2]
+ self.cfg.AddTcpUdpPort(tcp_port)
+
# update instance structure
instance.disks = new_disks
instance.disk_template = constants.DT_PLAIN
self.LogWarning("Could not remove metadata for disk %d on node %s,"
" continuing anyway: %s", idx, pnode, msg)
- # this is a DRBD disk, return its port to the pool
- for disk in old_disks:
- tcp_port = disk.logical_id[2]
- self.cfg.AddTcpUdpPort(tcp_port)
+ def _CreateNewDisk(self, idx, params, _):
+ """Creates a new disk.
- # Node resource locks will be released by caller
+ """
+ instance = self.instance
+
+ # add a new disk
+ if instance.disk_template in constants.DTS_FILEBASED:
+ (file_driver, file_path) = instance.disks[0].logical_id
+ file_path = os.path.dirname(file_path)
+ else:
+ file_driver = file_path = None
+
+ disk = \
+ _GenerateDiskTemplate(self, instance.disk_template, instance.name,
+ instance.primary_node, instance.secondary_nodes,
+ [params], file_path, file_driver, idx,
+ self.Log, self.diskparams)[0]
+
+ info = _GetInstanceInfoText(instance)
+
+ logging.info("Creating volume %s for instance %s",
+ disk.iv_name, instance.name)
+ # Note: this needs to be kept in sync with _CreateDisks
+ #HARDCODE
+ for node in instance.all_nodes:
+ f_create = (node == instance.primary_node)
+ try:
+ _CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
+ except errors.OpExecError, err:
+ self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
+ disk.iv_name, disk, node, err)
+
+ return (disk, [
+ ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
+ ])
+
+ @staticmethod
+ def _ModifyDisk(idx, disk, params, _):
+ """Modifies a disk.
+
+ """
+ disk.mode = params[constants.IDISK_MODE]
+
+ return [
+ ("disk.mode/%d" % idx, disk.mode),
+ ]
+
+ def _RemoveDisk(self, idx, root, _):
+ """Removes a disk.
+
+ """
+ (anno_disk,) = _AnnotateDiskParams(self.instance, [root], self.cfg)
+ for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
+ self.cfg.SetDiskID(disk, node)
+ msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
+ if msg:
+ self.LogWarning("Could not remove disk/%d on node '%s': %s,"
+ " continuing anyway", idx, node, msg)
+
+ # if this is a DRBD disk, return its port to the pool
+ if root.dev_type in constants.LDS_DRBD:
+ self.cfg.AddTcpUdpPort(root.logical_id[2])
+
+ @staticmethod
+ def _CreateNewNic(idx, params, private):
+ """Creates data structure for a new network interface.
+
+ """
+ mac = params[constants.INIC_MAC]
+ ip = params.get(constants.INIC_IP, None)
+ nicparams = private.params
+
+ return (objects.NIC(mac=mac, ip=ip, nicparams=nicparams), [
+ ("nic.%d" % idx,
+ "add:mac=%s,ip=%s,mode=%s,link=%s" %
+ (mac, ip, private.filled[constants.NIC_MODE],
+ private.filled[constants.NIC_LINK])),
+ ])
+
+ @staticmethod
+ def _ApplyNicMods(idx, nic, params, private):
+ """Modifies a network interface.
+
+ """
+ changes = []
+
+ for key in [constants.INIC_MAC, constants.INIC_IP]:
+ if key in params:
+ changes.append(("nic.%s/%d" % (key, idx), params[key]))
+ setattr(nic, key, params[key])
+
+ if private.params:
+ nic.nicparams = private.params
+
+ for (key, val) in params.items():
+ changes.append(("nic.%s/%d" % (key, idx), val))
+
+ return changes
def Exec(self, feedback_fn):
"""Modifies an instance.
"""
# Process here the warnings from CheckPrereq, as we don't have a
# feedback_fn there.
+ # TODO: Replace with self.LogWarning
for warn in self.warn:
feedback_fn("WARNING: %s" % warn)
rpcres.Raise("Cannot modify instance runtime memory")
result.append(("runtime_memory", self.op.runtime_mem))
- # disk changes
- for disk_op, disk_dict in self.op.disks:
- if disk_op == constants.DDM_REMOVE:
- # remove the last disk
- device = instance.disks.pop()
- device_idx = len(instance.disks)
- for node, disk in device.ComputeNodeTree(instance.primary_node):
- self.cfg.SetDiskID(disk, node)
- msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
- if msg:
- self.LogWarning("Could not remove disk/%d on node %s: %s,"
- " continuing anyway", device_idx, node, msg)
- result.append(("disk/%d" % device_idx, "remove"))
-
- # if this is a DRBD disk, return its port to the pool
- if device.dev_type in constants.LDS_DRBD:
- tcp_port = device.logical_id[2]
- self.cfg.AddTcpUdpPort(tcp_port)
- elif disk_op == constants.DDM_ADD:
- # add a new disk
- if instance.disk_template in (constants.DT_FILE,
- constants.DT_SHARED_FILE):
- file_driver, file_path = instance.disks[0].logical_id
- file_path = os.path.dirname(file_path)
- else:
- file_driver = file_path = None
- disk_idx_base = len(instance.disks)
- new_disk = _GenerateDiskTemplate(self,
- instance.disk_template,
- instance.name, instance.primary_node,
- instance.secondary_nodes,
- [disk_dict],
- file_path,
- file_driver,
- disk_idx_base,
- feedback_fn,
- self.diskparams)[0]
- instance.disks.append(new_disk)
- info = _GetInstanceInfoText(instance)
-
- logging.info("Creating volume %s for instance %s",
- new_disk.iv_name, instance.name)
- # Note: this needs to be kept in sync with _CreateDisks
- #HARDCODE
- for node in instance.all_nodes:
- f_create = node == instance.primary_node
- try:
- _CreateBlockDev(self, node, instance, new_disk,
- f_create, info, f_create)
- except errors.OpExecError, err:
- self.LogWarning("Failed to create volume %s (%s) on"
- " node %s: %s",
- new_disk.iv_name, new_disk, node, err)
- result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
- (new_disk.size, new_disk.mode)))
- else:
- # change a given disk
- instance.disks[disk_op].mode = disk_dict[constants.IDISK_MODE]
- result.append(("disk.mode/%d" % disk_op,
- disk_dict[constants.IDISK_MODE]))
+ # Apply disk changes
+ ApplyContainerMods("disk", instance.disks, result, self.diskmod,
+ self._CreateNewDisk, self._ModifyDisk, self._RemoveDisk)
+ _UpdateIvNames(0, instance.disks)
if self.op.disk_template:
if __debug__:
_ReleaseLocks(self, locking.LEVEL_NODE)
_ReleaseLocks(self, locking.LEVEL_NODE_RES)
- # NIC changes
- for nic_op, nic_dict in self.op.nics:
- if nic_op == constants.DDM_REMOVE:
- # remove the last nic
- del instance.nics[-1]
- result.append(("nic.%d" % len(instance.nics), "remove"))
- elif nic_op == constants.DDM_ADD:
- # mac and bridge should be set, by now
- mac = nic_dict[constants.INIC_MAC]
- ip = nic_dict.get(constants.INIC_IP, None)
- nicparams = self.nic_pinst[constants.DDM_ADD]
- new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
- instance.nics.append(new_nic)
- result.append(("nic.%d" % (len(instance.nics) - 1),
- "add:mac=%s,ip=%s,mode=%s,link=%s" %
- (new_nic.mac, new_nic.ip,
- self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
- self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
- )))
- else:
- for key in (constants.INIC_MAC, constants.INIC_IP):
- if key in nic_dict:
- setattr(instance.nics[nic_op], key, nic_dict[key])
- if nic_op in self.nic_pinst:
- instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
- for key, val in nic_dict.iteritems():
- result.append(("nic.%s/%d" % (key, nic_op), val))
+ # Apply NIC changes
+ if self._new_nics is not None:
+ instance.nics = self._new_nics
+ result.extend(self._nic_chgdesc)
# hvparams changes
if self.op.hvparams:
for key, val in self.op.osparams.iteritems():
result.append(("os/%s" % key, val))
- # online/offline instance
- if self.op.online_inst:
- self.cfg.MarkInstanceDown(instance.name)
- result.append(("admin_state", constants.ADMINST_DOWN))
- if self.op.offline_inst:
+ if self.op.offline is None:
+ # Ignore
+ pass
+ elif self.op.offline:
+ # Mark instance as offline
self.cfg.MarkInstanceOffline(instance.name)
result.append(("admin_state", constants.ADMINST_OFFLINE))
+ else:
+ # Mark instance as online, but stopped
+ self.cfg.MarkInstanceDown(instance.name)
+ result.append(("admin_state", constants.ADMINST_DOWN))
self.cfg.Update(instance, feedback_fn)
if self.req_target_uuids:
# User requested specific target groups
- self.target_uuids = self.req_target_uuids
+ self.target_uuids = frozenset(self.req_target_uuids)
else:
# All groups except those used by the instance are potential targets
self.target_uuids = owned_groups - inst_groups
"""
REQ_BGL = False
+ def CheckArguments(self):
+ self.expq = _ExportQuery(qlang.MakeSimpleFilter("node", self.op.nodes),
+ ["node", "export"], self.op.use_locking)
+
def ExpandNames(self):
- self.needed_locks = {}
- self.share_locks[locking.LEVEL_NODE] = 1
- if not self.op.nodes:
- self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
- else:
- self.needed_locks[locking.LEVEL_NODE] = \
- _GetWantedNodes(self, self.op.nodes)
+ self.expq.ExpandNames(self)
+
+ def DeclareLocks(self, level):
+ self.expq.DeclareLocks(self, level)
def Exec(self, feedback_fn):
- """Compute the list of all the exported system images.
+ result = {}
- @rtype: dict
- @return: a dictionary with the structure node->(export-list)
- where export-list is a list of the instances exported on
- that node.
+ for (node, expname) in self.expq.OldStyleQuery(self):
+ if expname is None:
+ result[node] = False
+ else:
+ result.setdefault(node, []).append(expname)
+
+ return result
+
+
+class _ExportQuery(_QueryBase):
+ FIELDS = query.EXPORT_FIELDS
+
+ #: The node name is not a unique key for this query
+ SORT_FIELD = "node"
+
+ def ExpandNames(self, lu):
+ lu.needed_locks = {}
+
+ # The following variables interact with _QueryBase._GetNames
+ if self.names:
+ self.wanted = _GetWantedNodes(lu, self.names)
+ else:
+ self.wanted = locking.ALL_SET
+
+ self.do_locking = self.use_locking
+
+ if self.do_locking:
+ lu.share_locks = _ShareAll()
+ lu.needed_locks = {
+ locking.LEVEL_NODE: self.wanted,
+ }
+
+ def DeclareLocks(self, lu, level):
+ pass
+
+ def _GetQueryData(self, lu):
+ """Computes the list of nodes and their attributes.
"""
- self.nodes = self.owned_locks(locking.LEVEL_NODE)
- rpcresult = self.rpc.call_export_list(self.nodes)
- result = {}
- for node in rpcresult:
- if rpcresult[node].fail_msg:
- result[node] = False
+ # Locking is not used
+ # TODO
+ assert not (compat.any(lu.glm.is_owned(level)
+ for level in locking.LEVELS
+ if level != locking.LEVEL_CLUSTER) or
+ self.do_locking or self.use_locking)
+
+ nodes = self._GetNames(lu, lu.cfg.GetNodeList(), locking.LEVEL_NODE)
+
+ result = []
+
+ for (node, nres) in lu.rpc.call_export_list(nodes).items():
+ if nres.fail_msg:
+ result.append((node, None))
else:
- result[node] = rpcresult[node].payload
+ result.extend((node, expname) for expname in nres.payload)
return result
if self.op.diskparams:
for templ in constants.DISK_TEMPLATES:
- if templ not in self.op.diskparams:
- self.op.diskparams[templ] = {}
- utils.ForceDictType(self.op.diskparams[templ], constants.DISK_DT_TYPES)
+ if templ in self.op.diskparams:
+ utils.ForceDictType(self.op.diskparams[templ],
+ constants.DISK_DT_TYPES)
+ self.new_diskparams = self.op.diskparams
+ try:
+ utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
+ except errors.OpPrereqError, err:
+ raise errors.OpPrereqError("While verify diskparams options: %s" % err,
+ errors.ECODE_INVAL)
else:
- self.op.diskparams = self.cfg.GetClusterInfo().diskparams
+ self.new_diskparams = {}
if self.op.ipolicy:
cluster = self.cfg.GetClusterInfo()
full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
try:
- objects.InstancePolicy.CheckParameterSyntax(full_ipolicy)
+ objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
except errors.ConfigurationError, err:
raise errors.OpPrereqError("Invalid instance policy: %s" % err,
errors.ECODE_INVAL)
uuid=self.group_uuid,
alloc_policy=self.op.alloc_policy,
ndparams=self.op.ndparams,
- diskparams=self.op.diskparams,
+ diskparams=self.new_diskparams,
ipolicy=self.op.ipolicy,
hv_state_static=self.new_hv_state,
disk_state_static=self.new_disk_state)
return query.GroupQueryData(self._cluster,
[self._all_groups[uuid]
for uuid in self.wanted],
- group_to_nodes, group_to_instances)
+ group_to_nodes, group_to_instances,
+ query.GQ_DISKPARAMS in self.requested_data)
class LUGroupQuery(NoHooksLU):
self.needed_locks[locking.LEVEL_INSTANCE] = \
self.cfg.GetNodeGroupInstances(self.group_uuid)
+ @staticmethod
+ def _UpdateAndVerifyDiskParams(old, new):
+ """Updates and verifies disk parameters.
+
+ """
+ new_params = _GetUpdatedParams(old, new)
+ utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
+ return new_params
+
def CheckPrereq(self):
"""Check prerequisites.
if self.op.ndparams:
new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
- utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
+ utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
self.new_ndparams = new_ndparams
if self.op.diskparams:
- self.new_diskparams = dict()
- for templ in constants.DISK_TEMPLATES:
- if templ not in self.op.diskparams:
- self.op.diskparams[templ] = {}
- new_templ_params = _GetUpdatedParams(self.group.diskparams[templ],
- self.op.diskparams[templ])
- utils.ForceDictType(new_templ_params, constants.DISK_DT_TYPES)
- self.new_diskparams[templ] = new_templ_params
+ diskparams = self.group.diskparams
+ uavdp = self._UpdateAndVerifyDiskParams
+ # For each disktemplate subdict update and verify the values
+ new_diskparams = dict((dt,
+ uavdp(diskparams.get(dt, {}),
+ self.op.diskparams[dt]))
+ for dt in constants.DISK_TEMPLATES
+ if dt in self.op.diskparams)
+ # As we've all subdicts of diskparams ready, lets merge the actual
+ # dict with all updated subdicts
+ self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
+ try:
+ utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
+ except errors.OpPrereqError, err:
+ raise errors.OpPrereqError("While verify diskparams options: %s" % err,
+ errors.ECODE_INVAL)
if self.op.hv_state:
self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
# Check if node groups for locked instances are still correct
- for instance_name in owned_instances:
- inst = self.instances[instance_name]
- assert owned_nodes.issuperset(inst.all_nodes), \
- "Instance %s's nodes changed while we kept the lock" % instance_name
-
- inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
- owned_groups)
-
- assert self.group_uuid in inst_groups, \
- "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
+ _CheckInstancesNodeGroups(self.cfg, self.instances,
+ owned_groups, owned_nodes, self.group_uuid)
if self.req_target_uuids:
# User requested specific target groups
def ExpandNames(self):
self.group_uuid = None
self.needed_locks = {}
+
if self.op.kind == constants.TAG_NODE:
self.op.name = _ExpandNodeName(self.cfg, self.op.name)
- self.needed_locks[locking.LEVEL_NODE] = self.op.name
+ lock_level = locking.LEVEL_NODE
+ lock_name = self.op.name
elif self.op.kind == constants.TAG_INSTANCE:
self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
- self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
+ lock_level = locking.LEVEL_INSTANCE
+ lock_name = self.op.name
elif self.op.kind == constants.TAG_NODEGROUP:
self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
+ lock_level = locking.LEVEL_NODEGROUP
+ lock_name = self.group_uuid
+ else:
+ lock_level = None
+ lock_name = None
+
+ if lock_level and getattr(self.op, "use_locking", True):
+ self.needed_locks[lock_level] = lock_name
# FIXME: Acquire BGL for cluster tag operations (as of this writing it's
# not possible to acquire the BGL based on opcode parameters)
self.in_text = self.out_text = self.in_data = self.out_data = None
# init all input fields so that pylint is happy
self.mode = mode
- self.memory = self.disks = self.disk_template = None
+ self.memory = self.disks = self.disk_template = self.spindle_use = None
self.os = self.tags = self.nics = self.vcpus = None
self.hypervisor = None
self.relocate_from = None
"cluster_name": cfg.GetClusterName(),
"cluster_tags": list(cluster_info.GetTags()),
"enabled_hypervisors": list(cluster_info.enabled_hypervisors),
- # we don't have job IDs
+ "ipolicy": cluster_info.ipolicy,
}
ninfo = cfg.GetAllNodesInfo()
iinfo = cfg.GetAllInstancesInfo().values()
data["nodegroups"] = self._ComputeNodeGroupData(cfg)
- config_ndata = self._ComputeBasicNodeData(ninfo)
+ config_ndata = self._ComputeBasicNodeData(cfg, ninfo)
data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
i_list, config_ndata)
assert len(data["nodes"]) == len(ninfo), \
return ng
@staticmethod
- def _ComputeBasicNodeData(node_cfg):
+ def _ComputeBasicNodeData(cfg, node_cfg):
"""Compute global node data.
@rtype: dict
"group": ninfo.group,
"master_capable": ninfo.master_capable,
"vm_capable": ninfo.vm_capable,
+ "ndparams": cfg.GetNdParams(ninfo),
})
for ninfo in node_cfg.values())
"admin_state": iinfo.admin_state,
"vcpus": beinfo[constants.BE_VCPUS],
"memory": beinfo[constants.BE_MAXMEM],
+ "spindle_use": beinfo[constants.BE_SPINDLE_USE],
"os": iinfo.os,
"nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
"nics": nic_data,
"os": self.os,
"vcpus": self.vcpus,
"memory": self.memory,
+ "spindle_use": self.spindle_use,
"disks": self.disks,
"disk_space_total": disk_space,
"nics": self.nics,
[
("name", ht.TString),
("memory", ht.TInt),
+ ("spindle_use", ht.TInt),
("disks", ht.TListOf(ht.TDict)),
("disk_template", ht.TString),
("os", ht.TString),
nics=self.op.nics,
vcpus=self.op.vcpus,
hypervisor=self.op.hypervisor,
+ spindle_use=self.op.spindle_use,
)
elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
ial = IAllocator(self.cfg, self.rpc,
#: Query type implementations
_QUERY_IMPL = {
+ constants.QR_CLUSTER: _ClusterQuery,
constants.QR_INSTANCE: _InstanceQuery,
constants.QR_NODE: _NodeQuery,
constants.QR_GROUP: _GroupQuery,
constants.QR_OS: _OsQuery,
+ constants.QR_EXPORT: _ExportQuery,
}
assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP