master_netmask, master_netdev, file_storage_dir,
shared_file_storage_dir, candidate_pool_size, secondary_ip=None,
vg_name=None, beparams=None, nicparams=None, ndparams=None,
- hvparams=None, enabled_hypervisors=None, modify_etc_hosts=True,
- modify_ssh_setup=True, maintain_node_health=False,
- drbd_helper=None, uid_pool=None, default_iallocator=None,
- primary_ip_version=None, prealloc_wipe_disks=False,
- use_external_mip_script=False):
+ hvparams=None, diskparams=None, enabled_hypervisors=None,
+ modify_etc_hosts=True, modify_ssh_setup=True,
+ maintain_node_health=False, drbd_helper=None, uid_pool=None,
+ default_iallocator=None, primary_ip_version=None,
+ prealloc_wipe_disks=False, use_external_mip_script=False):
"""Initialise the cluster.
@type candidate_pool_size: int
hv_class = hypervisor.GetHypervisor(hv_name)
hv_class.CheckParameterSyntax(hv_params)
+ # diskparams is a mapping of disk-template->diskparams dict
+ for template, dt_params in diskparams.items():
+ param_keys = set(dt_params.keys())
+ default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
+ if not (param_keys <= default_param_keys):
+ unknown_params = param_keys - default_param_keys
+ raise errors.OpPrereqError("Invalid parameters for disk template %s:"
+ " %s" % (template,
+ utils.CommaJoin(unknown_params)))
+ utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
+
# set up ssh config and /etc/hosts
sshline = utils.ReadFile(constants.SSH_HOST_RSA_PUB)
sshkey = sshline.split(" ")[1]
nicparams={constants.PP_DEFAULT: nicparams},
ndparams=ndparams,
hvparams=hvparams,
+ diskparams=diskparams,
candidate_pool_size=candidate_pool_size,
modify_etc_hosts=modify_etc_hosts,
modify_ssh_setup=modify_ssh_setup,
uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
name=constants.INITIAL_NODE_GROUP_NAME,
members=[master_node_config.name],
+ diskparams=cluster_config.diskparams,
)
nodegroups = {
default_nodegroup.uuid: default_nodegroup,
"DEBUG_SIMERR_OPT",
"DISKIDX_OPT",
"DISK_OPT",
+ "DISK_PARAMS_OPT",
"DISK_TEMPLATE_OPT",
"DRAINED_OPT",
"DRY_RUN_OPT",
default={}, dest="hvparams",
help="Hypervisor parameters")
+DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
+ help="Disk template parameters, in the format"
+ " template:option=value,option=value,...",
+ type="identkeyval", action="append", default=[])
+
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
help="Hypervisor and hypervisor options, in the"
" format hypervisor:option=value,option=value,...",
beparams = opts.beparams
nicparams = opts.nicparams
+ diskparams = dict(opts.diskparams)
+
+ # check the disk template types here, as we cannot rely on the type check done
+ # by the opcode parameter types
+ diskparams_keys = set(diskparams.keys())
+ if not (diskparams_keys <= constants.DISK_TEMPLATES):
+ unknown = utils.NiceSort(diskparams_keys - constants.DISK_TEMPLATES)
+ ToStderr("Disk templates unknown: %s" % utils.CommaJoin(unknown))
+ return 1
+
# prepare beparams dict
beparams = objects.FillDict(constants.BEC_DEFAULTS, beparams)
utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
hvparams[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], hvparams[hv])
utils.ForceDictType(hvparams[hv], constants.HVS_PARAMETER_TYPES)
+ # prepare diskparams dict
+ for templ in constants.DISK_TEMPLATES:
+ if templ not in diskparams:
+ diskparams[templ] = {}
+ diskparams[templ] = objects.FillDict(constants.DISK_DT_DEFAULTS[templ],
+ diskparams[templ])
+ utils.ForceDictType(diskparams[templ], constants.DISK_DT_TYPES)
+
if opts.candidate_pool_size is None:
opts.candidate_pool_size = constants.MASTER_POOL_SIZE_DEFAULT
beparams=beparams,
nicparams=nicparams,
ndparams=ndparams,
+ diskparams=diskparams,
candidate_pool_size=opts.candidate_pool_size,
modify_etc_hosts=opts.modify_etc_hosts,
modify_ssh_setup=opts.modify_ssh_setup,
if not (not opts.lvm_storage or opts.vg_name or
not opts.drbd_storage or opts.drbd_helper or
opts.enabled_hypervisors or opts.hvparams or
- opts.beparams or opts.nicparams or opts.ndparams or
+ opts.beparams or opts.nicparams or
+ opts.ndparams or opts.diskparams or
opts.candidate_pool_size is not None or
opts.uid_pool is not None or
opts.maintain_node_health is not None or
for hv_params in hvparams.values():
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
+ diskparams = dict(opts.diskparams)
+
+ for dt_params in hvparams.values():
+ utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
+
beparams = opts.beparams
utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
beparams=beparams,
nicparams=nicparams,
ndparams=ndparams,
+ diskparams=diskparams,
candidate_pool_size=opts.candidate_pool_size,
maintain_node_health=mnh,
uid_pool=uid_pool,
NOMODIFY_SSH_SETUP_OPT, SECONDARY_IP_OPT, VG_NAME_OPT,
MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT,
DEFAULT_IALLOCATOR_OPT, PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT,
- NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT, USE_EXTERNAL_MIP_SCRIPT],
+ NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT, USE_EXTERNAL_MIP_SCRIPT,
+ DISK_PARAMS_OPT],
"[opts...] <cluster_name>", "Initialises a new cluster configuration"),
"destroy": (
DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],
MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT,
DRBD_HELPER_OPT, NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT,
RESERVED_LVS_OPT, DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT,
- NODE_PARAMS_OPT, USE_EXTERNAL_MIP_SCRIPT],
+ NODE_PARAMS_OPT, USE_EXTERNAL_MIP_SCRIPT, DISK_PARAMS_OPT],
"[opts...]",
"Alters the parameters of the cluster"),
"renew-crypto": (
"""
(group_name,) = args
+ diskparams = dict(opts.diskparams)
op = opcodes.OpGroupAdd(group_name=group_name, ndparams=opts.ndparams,
- alloc_policy=opts.alloc_policy)
+ alloc_policy=opts.alloc_policy,
+ diskparams=diskparams)
SubmitOpCode(op, opts=opts)
@return: the desired exit code
"""
- if opts.ndparams is None and opts.alloc_policy is None:
+ if (opts.ndparams is None and opts.alloc_policy is None
+ and not opts.diskparams):
ToStderr("Please give at least one of the parameters.")
return 1
+ diskparams = dict(opts.diskparams)
op = opcodes.OpGroupSetParams(group_name=args[0],
ndparams=opts.ndparams,
- alloc_policy=opts.alloc_policy)
+ alloc_policy=opts.alloc_policy,
+ diskparams=diskparams)
result = SubmitOrSend(op, opts)
if result:
commands = {
"add": (
- AddGroup, ARGS_ONE_GROUP, [DRY_RUN_OPT, ALLOC_POLICY_OPT, NODE_PARAMS_OPT],
+ AddGroup, ARGS_ONE_GROUP,
+ [DRY_RUN_OPT, ALLOC_POLICY_OPT, NODE_PARAMS_OPT, DISK_PARAMS_OPT],
"<group_name>", "Add a new node group to the cluster"),
"assign-nodes": (
AssignNodes, ARGS_ONE_GROUP + ARGS_MANY_NODES, [DRY_RUN_OPT, FORCE_OPT],
"Lists all available fields for node groups"),
"modify": (
SetGroupParams, ARGS_ONE_GROUP,
- [DRY_RUN_OPT, SUBMIT_OPT, ALLOC_POLICY_OPT, NODE_PARAMS_OPT],
+ [DRY_RUN_OPT, SUBMIT_OPT, ALLOC_POLICY_OPT, NODE_PARAMS_OPT,
+ DISK_PARAMS_OPT],
"<group_name>", "Alters the parameters of a node group"),
"remove": (
RemoveGroup, ARGS_ONE_GROUP, [DRY_RUN_OPT],
if self.op.master_netmask is not None:
_ValidateNetmask(self.cfg, self.op.master_netmask)
+ if self.op.diskparams:
+ for dt_params in self.op.diskparams.values():
+ utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
+
def ExpandNames(self):
# FIXME: in the future maybe other cluster params won't require checking on
# all nodes to be modified.
else:
self.new_hvparams[hv_name].update(hv_dict)
+ # disk template parameters
+ self.new_diskparams = objects.FillDict(cluster.diskparams, {})
+ if self.op.diskparams:
+ for dt_name, dt_params in self.op.diskparams.items():
+ if dt_name not in self.op.diskparams:
+ self.new_diskparams[dt_name] = dt_params
+ else:
+ self.new_diskparams[dt_name].update(dt_params)
+
# os hypervisor parameters
self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
if self.op.os_hvp:
self.cluster.osparams = self.new_osp
if self.op.ndparams:
self.cluster.ndparams = self.new_ndparams
+ if self.op.diskparams:
+ self.cluster.diskparams = self.new_diskparams
if self.op.candidate_pool_size is not None:
self.cluster.candidate_pool_size = self.op.candidate_pool_size
if self.op.ndparams:
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
+ if self.op.diskparams:
+ for templ in constants.DISK_TEMPLATES:
+ if templ not in self.op.diskparams:
+ self.op.diskparams[templ] = {}
+ utils.ForceDictType(self.op.diskparams[templ], constants.DISK_DT_TYPES)
+ else:
+ self.op.diskparams = self.cfg.GetClusterInfo().diskparams
+
def BuildHooksEnv(self):
"""Build hooks env.
group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
uuid=self.group_uuid,
alloc_policy=self.op.alloc_policy,
- ndparams=self.op.ndparams)
+ ndparams=self.op.ndparams,
+ diskparams=self.op.diskparams)
self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
del self.remove_locks[locking.LEVEL_NODEGROUP]
def CheckArguments(self):
all_changes = [
self.op.ndparams,
+ self.op.diskparams,
self.op.alloc_policy,
]
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
self.new_ndparams = new_ndparams
+ if self.op.diskparams:
+ self.new_diskparams = dict()
+ for templ in constants.DISK_TEMPLATES:
+ if templ not in self.op.diskparams:
+ self.op.diskparams[templ] = {}
+ new_templ_params = _GetUpdatedParams(self.group.diskparams[templ],
+ self.op.diskparams[templ])
+ utils.ForceDictType(new_templ_params, constants.DISK_DT_TYPES)
+ self.new_diskparams[templ] = new_templ_params
+
def BuildHooksEnv(self):
"""Build hooks env.
self.group.ndparams = self.new_ndparams
result.append(("ndparams", str(self.group.ndparams)))
+ if self.op.diskparams:
+ self.group.diskparams = self.new_diskparams
+ result.append(("diskparams", str(self.group.diskparams)))
+
if self.op.alloc_policy:
self.group.alloc_policy = self.op.alloc_policy
LD_DRBD8 = "drbd8"
LD_FILE = "file"
LD_BLOCKDEV = "blockdev"
+LOGICAL_DISK_TYPES = frozenset([
+ LD_LV,
+ LD_DRBD8,
+ LD_FILE,
+ LD_BLOCKDEV,
+ ])
+
LDS_BLOCK = frozenset([LD_LV, LD_DRBD8, LD_BLOCKDEV])
# drbd constants
NDS_PARAMETERS = frozenset(NDS_PARAMETER_TYPES.keys())
+# Logical Disks parameters
+DISK_LD_TYPES = {
+ }
+DISK_LD_PARAMETERS = frozenset(DISK_LD_TYPES.keys())
+
+# Disk template parameters
+DISK_DT_TYPES = {
+ }
+
+DISK_DT_PARAMETERS = frozenset(DISK_DT_TYPES.keys())
+
# OOB supported commands
OOB_POWER_ON = "power-on"
OOB_POWER_OFF = "power-off"
ND_OOB_PROGRAM: None,
}
+DISK_LD_DEFAULTS = {
+ LD_DRBD8: {
+ },
+ LD_LV: {
+ },
+ LD_FILE: {
+ },
+ LD_BLOCKDEV: {
+ },
+ }
+
+DISK_DT_DEFAULTS = {
+ DT_PLAIN: {
+ },
+ DT_DRBD8: {
+ },
+ DT_DISKLESS: {
+ },
+ DT_FILE: {
+ },
+ DT_SHARED_FILE: {
+ },
+ DT_BLOCK: {
+ },
+ }
+
NICC_DEFAULTS = {
NIC_MODE: NIC_MODE_BRIDGED,
NIC_LINK: DEFAULT_BRIDGE,
" result '%s'", idx, src_node, result.payload)
else:
disk_id = tuple(result.payload)
+ disk_params = constants.DISK_LD_DEFAULTS[constants.LD_LV].copy()
new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
logical_id=disk_id, physical_id=disk_id,
- iv_name=disk.iv_name)
+ iv_name=disk.iv_name,
+ params=disk_params)
self._snap_disks.append(new_dev)
del target[constants.BE_MEMORY]
+def UpgradeDiskParams(diskparams):
+ """Upgrade the disk parameters.
+
+ @type diskparams: dict
+ @param diskparams: disk parameters to upgrade
+ @rtype: dict
+ @return: the upgraded disk parameters dit
+
+ """
+ result = dict()
+ if diskparams is None:
+ result = constants.DISK_DT_DEFAULTS.copy()
+ else:
+ # Update the disk parameter values for each disk template.
+ # The code iterates over constants.DISK_TEMPLATES because new templates
+ # might have been added.
+ for template in constants.DISK_TEMPLATES:
+ if template not in diskparams:
+ result[template] = constants.DISK_DT_DEFAULTS[template].copy()
+ else:
+ result[template] = FillDict(constants.DISK_DT_DEFAULTS[template],
+ diskparams[template])
+
+ return result
+
+
class ConfigObject(object):
"""A generic config object.
class Disk(ConfigObject):
"""Config object representing a block device."""
__slots__ = ["dev_type", "logical_id", "physical_id",
- "children", "iv_name", "size", "mode"]
+ "children", "iv_name", "size", "mode", "params"]
def CreateOnSecondary(self):
"""Test if this device needs to be created on a secondary node."""
if self.children:
for child in self.children:
child.UpgradeConfig()
+
+ if not self.params:
+ self.params = constants.DISK_LD_DEFAULTS[self.dev_type].copy()
+ else:
+ self.params = FillDict(constants.DISK_LD_DEFAULTS[self.dev_type],
+ self.params)
# add here config upgrade for this disk
"name",
"members",
"ndparams",
+ "diskparams",
"serial_no",
"alloc_policy",
] + _TIMESTAMPS + _UUID
if self.mtime is None:
self.mtime = time.time()
+ self.diskparams = UpgradeDiskParams(self.diskparams)
+
def FillND(self, node):
"""Return filled out ndparams for L{objects.Node}
"osparams",
"nicparams",
"ndparams",
+ "diskparams",
"candidate_pool_size",
"modify_etc_hosts",
"modify_ssh_setup",
if self.use_external_mip_script is None:
self.use_external_mip_script = False
+ self.diskparams = UpgradeDiskParams(self.diskparams)
+
def ToDict(self):
"""Custom function for cluster.
ht.TListOf(ht.TElemOf(constants.CV_ALL_ECODES_STRINGS)),
"List of error codes that should be treated as warnings")
+# Disk parameters
+_PDiskParams = ("diskparams", None,
+ ht.TOr(
+ ht.TDictOf(ht.TElemOf(constants.DISK_TEMPLATES), ht.TDict),
+ ht.TNone),
+ "Disk templates' parameter defaults")
+
#: OP_ID conversion regular expression
_OPID_RE = re.compile("([a-z])([A-Z])")
("osparams", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
ht.TNone),
"Cluster-wide OS parameter defaults"),
+ _PDiskParams,
("candidate_pool_size", None, ht.TOr(ht.TStrictPositiveInt, ht.TNone),
"Master candidate pool size"),
("uid_pool", None, ht.NoType,
_PGroupName,
_PNodeGroupAllocPolicy,
_PGroupNodeParams,
+ _PDiskParams,
]
_PGroupName,
_PNodeGroupAllocPolicy,
_PGroupNodeParams,
+ _PDiskParams,
]
OP_RESULT = _TSetParamsResult
self.failUnless(constants.OP_PRIO_NORMAL > constants.OP_PRIO_HIGH)
self.failUnless(constants.OP_PRIO_HIGH > constants.OP_PRIO_HIGHEST)
+ def testDiskDefaults(self):
+ self.failUnless(set(constants.DISK_LD_DEFAULTS.keys()) ==
+ constants.LOGICAL_DISK_TYPES)
+ self.failUnless(set(constants.DISK_DT_DEFAULTS.keys()) ==
+ constants.DISK_TEMPLATES)
+
class TestExportedNames(unittest.TestCase):
_VALID_NAME_RE = re.compile(r"^[A-Z][A-Z0-9_]+$")