ht.TListOf(ht.TElemOf(constants.CV_ALL_ECODES_STRINGS)),
"List of error codes that should be treated as warnings")
+# Disk parameters
+_PDiskParams = ("diskparams", None,
+ ht.TOr(
+ ht.TDictOf(ht.TElemOf(constants.DISK_TEMPLATES), ht.TDict),
+ ht.TNone),
+ "Disk templates' parameter defaults")
+
+# Parameters for node resource model
+_PHvState = ("hv_state", None, ht.TMaybeDict, "Set hypervisor states")
+_PDiskState = ("disk_state", None, ht.TMaybeDict, "Set disk states")
+
+
+_PIgnoreIpolicy = ("ignore_ipolicy", False, ht.TBool,
+ "Whether to ignore ipolicy violations")
+
+# Allow runtime changes while migrating
+_PAllowRuntimeChgs = ("allow_runtime_changes", True, ht.TBool,
+ "Allow runtime changes (eg. memory ballooning)")
+
+
#: OP_ID conversion regular expression
_OPID_RE = re.compile("([a-z])([A-Z])")
_TSetParamsResultItemItems = [
ht.Comment("name of changed parameter")(ht.TNonEmptyString),
- ht.TAny,
+ ht.Comment("new value")(ht.TAny),
]
_TSetParamsResult = \
ht.TListOf(ht.TAnd(ht.TIsLength(len(_TSetParamsResultItemItems)),
ht.TItems(_TSetParamsResultItemItems)))
+# TODO: Generate check from constants.IDISK_PARAMS_TYPES (however, not all users
+# of this check support all parameters)
+_TDiskParams = ht.TDictOf(ht.TElemOf(constants.IDISK_PARAMS),
+ ht.TOr(ht.TNonEmptyString, ht.TInt))
+
_SUMMARY_PREFIX = {
"CLUSTER_": "C_",
"GROUP_": "G_",
ht.TElemOf(constants.OP_PRIO_SUBMIT_VALID), "Opcode priority"),
(DEPEND_ATTR, None, _BuildJobDepCheck(True),
"Job dependencies; if used through ``SubmitManyJobs`` relative (negative)"
- " job IDs can be used"),
+ " job IDs can be used; see :doc:`design document <design-chained-jobs>`"
+ " for details"),
(COMMENT_ATTR, None, ht.TMaybeString,
"Comment describing the purpose of the opcode"),
]
ht.TAnd(ht.TIsLength(3),
ht.TItems([ht.TDictOf(ht.TString, ht.TString),
ht.TListOf(ht.TString),
- ht.TDictOf(ht.TString, ht.TListOf(ht.TString))]))
+ ht.TDictOf(ht.TString,
+ ht.TListOf(ht.TListOf(ht.TString)))]))
class OpClusterRepairDiskSizes(OpCode):
"""
OP_PARAMS = [
+ _PHvState,
+ _PDiskState,
("vg_name", None, ht.TMaybeString, "Volume group name"),
("enabled_hypervisors", None,
ht.TOr(ht.TAnd(ht.TListOf(ht.TElemOf(constants.HYPER_TYPES)), ht.TTrue),
("osparams", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
ht.TNone),
"Cluster-wide OS parameter defaults"),
+ _PDiskParams,
("candidate_pool_size", None, ht.TOr(ht.TStrictPositiveInt, ht.TNone),
"Master candidate pool size"),
("uid_pool", None, ht.NoType,
"Whether to wipe disks before allocating them to instances"),
("nicparams", None, ht.TMaybeDict, "Cluster-wide NIC parameter defaults"),
("ndparams", None, ht.TMaybeDict, "Cluster-wide node parameter defaults"),
+ ("ipolicy", None, ht.TMaybeDict,
+ "Cluster-wide :ref:`instance policy <rapi-ipolicy>` specs"),
("drbd_helper", None, ht.TOr(ht.TString, ht.TNone), "DRBD helper program"),
("default_iallocator", None, ht.TOr(ht.TString, ht.TNone),
"Default iallocator for cluster"),
"Modify list of blacklisted operating systems. Each modification must have"
" two items, the operation and the OS name. The operation can be"
" ``%s`` or ``%s``." % (constants.DDM_ADD, constants.DDM_REMOVE)),
+ ("use_external_mip_script", None, ht.TMaybeBool,
+ "Whether to use an external master IP address setup script"),
]
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
+ _PHvState,
+ _PDiskState,
("primary_ip", None, ht.NoType, "Primary IP address"),
("secondary_ip", None, ht.TMaybeString, "Secondary IP address"),
("readd", False, ht.TBool, "Whether node is re-added to cluster"),
OP_PARAMS = [
_PNodeName,
_PForce,
+ _PHvState,
+ _PDiskState,
("master_candidate", None, ht.TMaybeBool,
"Whether the node should become a master candidate"),
("offline", None, ht.TMaybeBool,
_PMigrationMode,
_PMigrationLive,
_PMigrationTargetNode,
+ _PAllowRuntimeChgs,
+ _PIgnoreIpolicy,
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding the target node for shared-storage instances"),
]
+ OP_RESULT = TJobIdListOnly
class OpNodeEvacuate(OpCode):
_PNodeName,
("remote_node", None, ht.TMaybeString, "New secondary node"),
("iallocator", None, ht.TMaybeString, "Iallocator for computing solution"),
- ("mode", ht.NoDefault, ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES),
+ ("mode", ht.NoDefault, ht.TElemOf(constants.NODE_EVAC_MODES),
"Node evacuation mode"),
]
OP_RESULT = TJobIdListOnly
_PForceVariant,
_PWaitForSync,
_PNameCheck,
+ _PIgnoreIpolicy,
("beparams", ht.EmptyDict, ht.TDict, "Backend parameters for instance"),
- ("disks", ht.NoDefault,
- # TODO: Generate check from constants.IDISK_PARAMS_TYPES
- ht.TListOf(ht.TDictOf(ht.TElemOf(constants.IDISK_PARAMS),
- ht.TOr(ht.TNonEmptyString, ht.TInt))),
+ ("disks", ht.NoDefault, ht.TListOf(_TDiskParams),
"Disk descriptions, for example ``[{\"%s\": 100}, {\"%s\": 5}]``;"
" each disk definition must contain a ``%s`` value and"
" can contain an optional ``%s`` value denoting the disk access mode"
OP_PARAMS = [
_PInstanceName,
_PEarlyRelease,
+ _PIgnoreIpolicy,
("mode", ht.NoDefault, ht.TElemOf(constants.REPLACE_MODES),
"Replacement mode"),
("disks", ht.EmptyList, ht.TListOf(ht.TPositiveInt),
_PShutdownTimeout,
_PIgnoreConsistency,
_PMigrationTargetNode,
+ _PIgnoreIpolicy,
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding the target node for shared-storage instances"),
]
_PMigrationMode,
_PMigrationLive,
_PMigrationTargetNode,
+ _PAllowRuntimeChgs,
+ _PIgnoreIpolicy,
("cleanup", False, ht.TBool,
"Whether a previously failed migration should be cleaned up"),
("iallocator", None, ht.TMaybeString,
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
+ _PIgnoreIpolicy,
("target_node", ht.NoDefault, ht.TNonEmptyString, "Target node"),
_PIgnoreConsistency,
]
class OpInstanceRecreateDisks(OpCode):
"""Recreate an instance's disks."""
+ _TDiskChanges = \
+ ht.TAnd(ht.TIsLength(2),
+ ht.TItems([ht.Comment("Disk index")(ht.TPositiveInt),
+ ht.Comment("Parameters")(_TDiskParams)]))
+
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
- ("disks", ht.EmptyList, ht.TListOf(ht.TPositiveInt),
- "List of disk indexes"),
+ ("disks", ht.EmptyList,
+ ht.TOr(ht.TListOf(ht.TPositiveInt), ht.TListOf(_TDiskChanges)),
+ "List of disk indexes (deprecated) or a list of tuples containing a disk"
+ " index and a possibly empty dictionary with disk parameter changes"),
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"New instance nodes, if relocation is desired"),
]
_PInstanceName,
_PForce,
_PForceVariant,
+ _PIgnoreIpolicy,
# TODO: Use _TestNicDef
("nics", ht.EmptyList, ht.TList,
"List of NIC changes. Each item is of the form ``(op, settings)``."
(constants.DDM_ADD, constants.DDM_REMOVE)),
("disks", ht.EmptyList, ht.TList, "List of disk changes. See ``nics``."),
("beparams", ht.EmptyDict, ht.TDict, "Per-instance backend parameters"),
+ ("runtime_mem", None, ht.TMaybeStrictPositiveInt, "New runtime memory"),
("hvparams", ht.EmptyDict, ht.TDict,
"Per-instance hypervisor parameters, hypervisor-dependent"),
("disk_template", None, ht.TOr(ht.TNone, _BuildDiskTemplateCheck(False)),
("osparams", None, ht.TMaybeDict, "Per-instance OS parameters"),
("wait_for_sync", True, ht.TBool,
"Whether to wait for the disk to synchronize, when changing template"),
+ ("offline_inst", False, ht.TBool,
+ "Whether to turn off the down instance completely"),
+ ("online_inst", False, ht.TBool,
+ "Whether to enable the offline instance"),
]
OP_RESULT = _TSetParamsResult
_PGroupName,
_PNodeGroupAllocPolicy,
_PGroupNodeParams,
+ _PDiskParams,
+ _PHvState,
+ _PDiskState,
+ ("ipolicy", None, ht.TMaybeDict,
+ "Group-wide :ref:`instance policy <rapi-ipolicy>` specs"),
]
_PGroupName,
_PNodeGroupAllocPolicy,
_PGroupNodeParams,
+ _PDiskParams,
+ _PHvState,
+ _PDiskState,
+ ("ipolicy", None, ht.TMaybeDict, "Group-wide instance policy specs"),
]
OP_RESULT = _TSetParamsResult