raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
-def _CheckNodeOnline(lu, node):
+def _CheckNodeOnline(lu, node, msg=None):
"""Ensure that a given node is online.
@param lu: the LU on behalf of which we make the check
@param node: the node to check
+ @param msg: if passed, should be a message to replace the default one
@raise errors.OpPrereqError: if the node is offline
"""
+ if msg is None:
+ msg = "Can't use offline node"
if lu.cfg.GetNodeInfo(node).offline:
- raise errors.OpPrereqError("Can't use offline node %s" % node,
- errors.ECODE_STATE)
+ raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
def _CheckNodeNotDrained(lu, node):
ht.TNone)),
("hvparams", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
ht.TNone)),
- ("beparams", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
- ht.TNone)),
+ ("beparams", None, ht.TOr(ht.TDict, ht.TNone)),
("os_hvp", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
ht.TNone)),
("osparams", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
("maintain_node_health", None, ht.TMaybeBool),
("prealloc_wipe_disks", None, ht.TMaybeBool),
("nicparams", None, ht.TOr(ht.TDict, ht.TNone)),
+ ("ndparams", None, ht.TOr(ht.TDict, ht.TNone)),
("drbd_helper", None, ht.TOr(ht.TString, ht.TNone)),
("default_iallocator", None, ht.TOr(ht.TString, ht.TNone)),
("reserved_lvs", None, ht.TOr(ht.TListOf(ht.TNonEmptyString), ht.TNone)),
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
+ if self.op.ndparams:
+ utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
+ self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
+
if self.op.nicparams:
utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
if self.op.osparams:
self.cluster.osparams = self.new_osp
+ if self.op.ndparams:
+ self.cluster.ndparams = self.new_ndparams
if self.op.candidate_pool_size is not None:
self.cluster.candidate_pool_size = self.op.candidate_pool_size
for key, val in mods:
if key == constants.DDM_ADD:
if val in lst:
- feedback_fn("OS %s already in %s, ignoring", val, desc)
+ feedback_fn("OS %s already in %s, ignoring" % (val, desc))
else:
lst.append(val)
elif key == constants.DDM_REMOVE:
if val in lst:
lst.remove(val)
else:
- feedback_fn("OS %s not found in %s, ignoring", val, desc)
+ feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
else:
raise errors.ProgrammerError("Invalid modification '%s'" % key)
"pinst_cnt", "sinst_cnt",
"pinst_list", "sinst_list",
"pip", "sip", "tags",
- "master",
- "role"] + _SIMPLE_FIELDS
+ "master", "role",
+ "group.uuid", "group",
+ ] + _SIMPLE_FIELDS
)
def CheckArguments(self):
nodenames = utils.NiceSort(nodenames)
nodelist = [all_info[name] for name in nodenames]
+ if "group" in self.op.output_fields:
+ groups = self.cfg.GetAllNodeGroupsInfo()
+ else:
+ groups = {}
+
# begin data gathering
if self.do_node_query:
val = "O"
else:
val = "R"
+ elif field == "group.uuid":
+ val = node.group
+ elif field == "group":
+ ng = groups.get(node.group, None)
+ if ng is None:
+ val = "<unknown>"
+ else:
+ val = ng.name
else:
raise errors.ParameterError(field)
node_output.append(val)
"""
_OP_PARAMS = [
+ _POutputFields,
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
- ("output_fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString)),
]
REQ_BGL = False
_FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
"""
_FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
_OP_PARAMS = [
+ _POutputFields,
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
("storage_type", ht.NoDefault, _CheckStorageType),
- ("output_fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString)),
("name", None, ht.TMaybeString),
]
REQ_BGL = False
("group", None, ht.TMaybeString),
("master_capable", None, ht.TMaybeBool),
("vm_capable", None, ht.TMaybeBool),
+ ("ndparams", None, ht.TOr(ht.TDict, ht.TNone)),
]
_NFLAGS = ["master_capable", "vm_capable"]
offline=False, drained=False,
group=node_group)
+ if self.op.ndparams:
+ utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
+
def Exec(self, feedback_fn):
"""Adds the new node to the cluster.
if new_node.master_candidate:
self.LogInfo("Node will be a master candidate")
+ if self.op.ndparams:
+ new_node.ndparams = self.op.ndparams
+
# check connectivity
result = self.rpc.call_version([node])[node]
result.Raise("Can't get version information from node %s" % node)
("master_capable", None, ht.TMaybeBool),
("vm_capable", None, ht.TMaybeBool),
("secondary_ip", None, ht.TMaybeString),
+ ("ndparams", None, ht.TOr(ht.TDict, ht.TNone)),
_PForce,
]
REQ_BGL = False
" based ping to node daemon port",
errors.ECODE_ENVIRON)
+ if self.op.ndparams:
+ new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
+ utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
+ self.new_ndparams = new_ndparams
+
def Exec(self, feedback_fn):
"""Modifies a node.
result = []
+ if self.op.ndparams:
+ node.ndparams = self.new_ndparams
+
for attr in ["master_capable", "vm_capable"]:
val = getattr(self.op, attr)
if val is not None:
errors.ECODE_NORES)
-def _CheckNodesFreeDisk(lu, nodenames, requested):
- """Checks if nodes have enough free disk space in the default VG.
+def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
+ """Checks if nodes have enough free disk space in the all VGs.
+
+ This function check if all given nodes have the needed amount of
+ free disk. In case any node has less disk or we cannot get the
+ information from the node, this function raise an OpPrereqError
+ exception.
+
+ @type lu: C{LogicalUnit}
+ @param lu: a logical unit from which we get configuration data
+ @type nodenames: C{list}
+ @param nodenames: the list of node names to check
+ @type req_sizes: C{dict}
+ @param req_sizes: the hash of vg and corresponding amount of disk in
+ MiB to check for
+ @raise errors.OpPrereqError: if the node doesn't have enough disk,
+ or we cannot check the node
+
+ """
+ if req_sizes is not None:
+ for vg, req_size in req_sizes.iteritems():
+ _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
+
+
+def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
+ """Checks if nodes have enough free disk space in the specified VG.
This function check if all given nodes have the needed amount of
free disk. In case any node has less disk or we cannot get the
@param lu: a logical unit from which we get configuration data
@type nodenames: C{list}
@param nodenames: the list of node names to check
+ @type vg: C{str}
+ @param vg: the volume group to check
@type requested: C{int}
@param requested: the amount of disk in MiB to check for
- @raise errors.OpPrereqError: if the node doesn't have enough disk, or
- we cannot check the node
+ @raise errors.OpPrereqError: if the node doesn't have enough disk,
+ or we cannot check the node
"""
- nodeinfo = lu.rpc.call_node_info(nodenames, lu.cfg.GetVGName(),
+ nodeinfo = lu.rpc.call_node_info(nodenames, vg,
lu.cfg.GetHypervisorType())
for node in nodenames:
info = nodeinfo[node]
prereq=True, ecode=errors.ECODE_ENVIRON)
vg_free = info.payload.get("vg_free", None)
if not isinstance(vg_free, int):
- raise errors.OpPrereqError("Can't compute free disk space on node %s,"
- " result was '%s'" % (node, vg_free),
- errors.ECODE_ENVIRON)
+ raise errors.OpPrereqError("Can't compute free disk space on node"
+ " %s for vg %s, result was '%s'" %
+ (node, vg, vg_free), errors.ECODE_ENVIRON)
if requested > vg_free:
- raise errors.OpPrereqError("Not enough disk space on target node %s:"
- " required %d MiB, available %d MiB" %
- (node, requested, vg_free),
+ raise errors.OpPrereqError("Not enough disk space on target node %s"
+ " vg %s: required %d MiB, available %d MiB" %
+ (node, vg, requested, vg_free),
errors.ECODE_NORES)
instance = self.cfg.GetInstanceInfo(self.op.instance_name)
assert instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
- _CheckNodeOnline(self, instance.primary_node)
+ _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
+ " offline, cannot reinstall")
+ for node in instance.secondary_nodes:
+ _CheckNodeOnline(self, node, "Instance secondary node offline,"
+ " cannot reinstall")
if instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Instance '%s' has no disks" %
"""
# pylint: disable-msg=W0142
_OP_PARAMS = [
- ("output_fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString)),
+ _POutputFields,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
("use_locking", False, ht.TBool),
]
return results
-def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
+def _GenerateDRBD8Branch(lu, primary, secondary, size, vgname, names, iv_name,
p_minor, s_minor):
"""Generate a drbd8 device complete with its children.
"""
port = lu.cfg.AllocatePort()
- vgname = lu.cfg.GetVGName()
shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
logical_id=(vgname, names[0]))
instance_name, primary_node,
secondary_nodes, disk_info,
file_storage_dir, file_driver,
- base_index):
+ base_index, feedback_fn):
"""Generate the entire disk layout for a given template type.
"""
for i in range(disk_count)])
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
+ vg = disk.get("vg", vgname)
+ feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
- logical_id=(vgname, names[idx]),
+ logical_id=(vg, names[idx]),
iv_name="disk/%d" % disk_index,
mode=disk["mode"])
disks.append(disk_dev)
names.append(lv_prefix + "_meta")
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
+ vg = disk.get("vg", vgname)
disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
- disk["size"], names[idx*2:idx*2+2],
+ disk["size"], vg, names[idx*2:idx*2+2],
"disk/%d" % disk_index,
minors[idx*2], minors[idx*2+1])
disk_dev.mode = disk["mode"]
return all_result
+def _ComputeDiskSizePerVG(disk_template, disks):
+ """Compute disk size requirements in the volume group
+
+ """
+ def _compute(disks, payload):
+ """Universal algorithm
+
+ """
+ vgs = {}
+ for disk in disks:
+ vgs[disk["vg"]] = vgs.get("vg", 0) + disk["size"] + payload
+
+ return vgs
+
+ # Required free disk space as a function of disk and swap space
+ req_size_dict = {
+ constants.DT_DISKLESS: None,
+ constants.DT_PLAIN: _compute(disks, 0),
+ # 128 MB are added for drbd metadata for each disk
+ constants.DT_DRBD8: _compute(disks, 128),
+ constants.DT_FILE: None,
+ }
+
+ if disk_template not in req_size_dict:
+ raise errors.ProgrammerError("Disk template '%s' size requirement"
+ " is unknown" % disk_template)
+
+ return req_size_dict[disk_template]
+
def _ComputeDiskSize(disk_template, disks):
"""Compute disk size requirements in the volume group
except (TypeError, ValueError):
raise errors.OpPrereqError("Invalid disk size '%s'" % size,
errors.ECODE_INVAL)
- new_disk = {"size": size, "mode": mode}
+ vg = disk.get("vg", self.cfg.GetVGName())
+ new_disk = {"size": size, "mode": mode, "vg": vg}
if "adopt" in disk:
new_disk["adopt"] = disk["adopt"]
self.disks.append(new_disk)
nodenames = [pnode.name] + self.secondaries
- req_size = _ComputeDiskSize(self.op.disk_template,
- self.disks)
-
- # Check lv size requirements, if not adopting
- if req_size is not None and not self.adopt_disks:
- _CheckNodesFreeDisk(self, nodenames, req_size)
+ if not self.adopt_disks:
+ # Check lv size requirements, if not adopting
+ req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
+ _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
- if self.adopt_disks: # instead, we must check the adoption data
+ else: # instead, we must check the adoption data
all_lvs = set([i["adopt"] for i in self.disks])
if len(all_lvs) != len(self.disks):
raise errors.OpPrereqError("Duplicate volume names given for adoption",
errors.ECODE_INVAL)
for lv_name in all_lvs:
try:
+ # FIXME: VG must be provided here. Else all LVs with the
+ # same name will be locked on all VGs.
self.cfg.ReserveLV(lv_name, self.proc.GetECId())
except errors.ReservationError:
raise errors.OpPrereqError("LV named %s used by another instance" %
self.disks,
file_storage_dir,
self.op.file_driver,
- 0)
+ 0,
+ feedback_fn)
iobj = objects.Instance(name=instance, os=self.op.os_type,
primary_node=pnode_name,
self.disk = instance.FindDisk(self.op.disk)
if instance.disk_template != constants.DT_FILE:
- # TODO: check the free disk space for file, when that feature will be
- # supported
- _CheckNodesFreeDisk(self, nodenames, self.op.amount)
+ # TODO: check the free disk space for file, when that feature
+ # will be supported
+ _CheckNodesFreeDiskPerVG(self, nodenames,
+ {self.disk.physical_id[0]: self.op.amount})
def Exec(self, feedback_fn):
"""Execute disk grow.
self.op.remote_node, errors.ECODE_STATE)
_CheckNodeOnline(self, self.op.remote_node)
_CheckNodeNotDrained(self, self.op.remote_node)
- disks = [{"size": d.size} for d in instance.disks]
- required = _ComputeDiskSize(self.op.disk_template, disks)
- _CheckNodesFreeDisk(self, [self.op.remote_node], required)
+ disks = [{"size": d.size, "vg": d.vg} for d in instance.disks]
+ required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
+ _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
# hvparams processing
if self.op.hvparams:
disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
instance.name, pnode, [snode],
- disk_info, None, None, 0)
+ disk_info, None, None, 0, feedback_fn)
info = _GetInstanceInfoText(instance)
feedback_fn("Creating aditional volumes...")
# first, create the missing data and meta devices
[disk_dict],
file_path,
file_driver,
- disk_idx_base)[0]
+ disk_idx_base, feedback_fn)[0]
instance.disks.append(new_disk)
info = _GetInstanceInfoText(instance)