import logging
import copy
import OpenSSL
+import socket
+import tempfile
+import shutil
from ganeti import ssh
from ganeti import utils
from ganeti import uidpool
from ganeti import compat
from ganeti import masterd
+from ganeti import netutils
import ganeti.masterd.instance # pylint: disable-msg=W0611
return {}
+#: The without-default default value
+_NoDefault = object()
+
+
+#: The no-type (value to complex to check it in the type system)
+_NoType = object()
+
+
# Some basic types
def _TNotNone(val):
"""Checks if the given value is not None.
# Type aliases
-# non-empty string
-_TNEString = _TAnd(_TString, _TTrue)
+#: a non-empty string
+_TNonEmptyString = _TAnd(_TString, _TTrue)
+
+#: a maybe non-empty string
+_TMaybeString = _TOr(_TNonEmptyString, _TNone)
-# positive integer
-_TPInt = _TAnd(_TInt, lambda v: v >= 0)
+
+#: a maybe boolean (bool or none)
+_TMaybeBool = _TOr(_TBool, _TNone)
+
+
+#: a positive integer
+_TPositiveInt = _TAnd(_TInt, lambda v: v >= 0)
+
+#: a strictly positive integer
+_TStrictPositiveInt = _TAnd(_TInt, lambda v: v > 0)
def _TListOf(my_type):
"""
return _TAnd(_TList,
- lambda lst: compat.all(lst, my_type))
+ lambda lst: compat.all(my_type(v) for v in lst))
def _TDictOf(key_type, val_type):
"""
return _TAnd(_TDict,
- lambda my_dict: (compat.all(my_dict.keys(), key_type) and
- compat.all(my_dict.values(), val_type)))
+ lambda my_dict: (compat.all(key_type(v) for v in my_dict.keys())
+ and compat.all(val_type(v)
+ for v in my_dict.values())))
+
+
+# Common opcode attributes
+
+#: output fields for a query operation
+_POutputFields = ("output_fields", _NoDefault, _TListOf(_TNonEmptyString))
+
+
+#: the shutdown timeout
+_PShutdownTimeout = ("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT,
+ _TPositiveInt)
+
+#: the force parameter
+_PForce = ("force", False, _TBool)
+
+#: a required instance name (for single-instance LUs)
+_PInstanceName = ("instance_name", _NoDefault, _TNonEmptyString)
+
+
+#: a required node name (for single-node LUs)
+_PNodeName = ("node_name", _NoDefault, _TNonEmptyString)
+
+#: the migration type (live/non-live)
+_PMigrationLive = ("live", None, _TOr(_TNone,
+ _TElemOf(constants.HT_MIGRATION_TYPES)))
# End types
@ivar dry_run_result: the value (if any) that will be returned to the caller
in dry-run mode (signalled by opcode dry_run parameter)
- @cvar _OP_DEFS: a list of opcode attributes and the defaults values
- they should get if not already existing
+ @cvar _OP_PARAMS: a list of opcode attributes, their defaults values
+ they should get if not already defined, and types they must match
"""
HPATH = None
HTYPE = None
- _OP_REQP = []
- _OP_DEFS = []
+ _OP_PARAMS = []
REQ_BGL = True
def __init__(self, processor, op, context, rpc):
self.recalculate_locks = {}
self.__ssh = None
# logging
+ self.Log = processor.Log # pylint: disable-msg=C0103
self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
self.LogStep = processor.LogStep # pylint: disable-msg=C0103
# Tasklets
self.tasklets = None
- for aname, aval in self._OP_DEFS:
- if not hasattr(self.op, aname):
- if callable(aval):
- dval = aval()
- else:
- dval = aval
- setattr(self.op, aname, dval)
-
- for attr_name, test in self._OP_REQP:
+ # The new kind-of-type-system
+ op_id = self.op.OP_ID
+ for attr_name, aval, test in self._OP_PARAMS:
if not hasattr(op, attr_name):
- raise errors.OpPrereqError("Required parameter '%s' missing" %
- attr_name, errors.ECODE_INVAL)
- attr_val = getattr(op, attr_name, None)
+ if aval == _NoDefault:
+ raise errors.OpPrereqError("Required parameter '%s.%s' missing" %
+ (op_id, attr_name), errors.ECODE_INVAL)
+ else:
+ if callable(aval):
+ dval = aval()
+ else:
+ dval = aval
+ setattr(self.op, attr_name, dval)
+ attr_val = getattr(op, attr_name)
+ if test == _NoType:
+ # no tests here
+ continue
if not callable(test):
- raise errors.ProgrammerError("Validation for parameter '%s' failed,"
+ raise errors.ProgrammerError("Validation for parameter '%s.%s' failed,"
" given type is not a proper type (%s)" %
- (attr_name, test))
+ (op_id, attr_name, test))
if not test(attr_val):
- raise errors.OpPrereqError("Parameter '%s' has invalid type" %
- attr_name, errors.ECODE_INVAL)
+ logging.error("OpCode %s, parameter %s, has invalid type %s/value %s",
+ self.op.OP_ID, attr_name, type(attr_val), attr_val)
+ raise errors.OpPrereqError("Parameter '%s.%s' fails validation" %
+ (op_id, attr_name), errors.ECODE_INVAL)
self.CheckArguments()
# Acquire all nodes and one instance
self.needed_locks = {
locking.LEVEL_NODE: locking.ALL_SET,
- locking.LEVEL_INSTANCE: ['instance1.example.tld'],
+ locking.LEVEL_INSTANCE: ['instance1.example.com'],
}
# Acquire just two nodes
self.needed_locks = {
- locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
+ locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
}
# Acquire no locks
self.needed_locks = {} # No, you can't leave it to the default value None
% ",".join(delta), errors.ECODE_INVAL)
-def _CheckBooleanOpField(op, name):
- """Validates boolean opcode parameters.
-
- This will ensure that an opcode parameter is either a boolean value,
- or None (but that it always exists).
-
- """
- val = getattr(op, name, None)
- if not (val is None or isinstance(val, bool)):
- raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
- (name, str(val)), errors.ECODE_INVAL)
- setattr(op, name, val)
-
-
def _CheckGlobalHvParams(params):
"""Validates that given hypervisor params are not global ones.
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
if template == constants.DT_FILE:
_RequireFileStorage()
+ return True
def _CheckStorageType(storage_type):
return faulty
+def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
+ """Check the sanity of iallocator and node arguments and use the
+ cluster-wide iallocator if appropriate.
+
+ Check that at most one of (iallocator, node) is specified. If none is
+ specified, then the LU's opcode's iallocator slot is filled with the
+ cluster-wide default iallocator.
+
+ @type iallocator_slot: string
+ @param iallocator_slot: the name of the opcode iallocator slot
+ @type node_slot: string
+ @param node_slot: the name of the opcode target node slot
+
+ """
+ node = getattr(lu.op, node_slot, None)
+ iallocator = getattr(lu.op, iallocator_slot, None)
+
+ if node is not None and iallocator is not None:
+ raise errors.OpPrereqError("Do not specify both, iallocator and node.",
+ errors.ECODE_INVAL)
+ elif node is None and iallocator is None:
+ default_iallocator = lu.cfg.GetDefaultIAllocator()
+ if default_iallocator:
+ setattr(lu.op, iallocator_slot, default_iallocator)
+ else:
+ raise errors.OpPrereqError("No iallocator or node given and no"
+ " cluster-wide default iallocator found."
+ " Please specify either an iallocator or a"
+ " node, or set a cluster-wide default"
+ " iallocator.")
+
+
class LUPostInitCluster(LogicalUnit):
"""Logical unit for running hooks after cluster initialization.
"""
HPATH = "cluster-init"
HTYPE = constants.HTYPE_CLUSTER
- _OP_REQP = []
def BuildHooksEnv(self):
"""Build hooks env.
"""
HPATH = "cluster-destroy"
HTYPE = constants.HTYPE_CLUSTER
- _OP_REQP = []
def BuildHooksEnv(self):
"""Build hooks env.
"""
HPATH = "cluster-verify"
HTYPE = constants.HTYPE_CLUSTER
- _OP_REQP = [
- ("skip_checks", _TListOf(_TElemOf(constants.VERIFY_OPTIONAL_CHECKS))),
- ("verbose", _TBool),
- ("error_codes", _TBool),
- ("debug_simulate_errors", _TBool),
+ _OP_PARAMS = [
+ ("skip_checks", _EmptyList,
+ _TListOf(_TElemOf(constants.VERIFY_OPTIONAL_CHECKS))),
+ ("verbose", False, _TBool),
+ ("error_codes", False, _TBool),
+ ("debug_simulate_errors", False, _TBool),
]
REQ_BGL = False
EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
ENODEDRBD = (TNODE, "ENODEDRBD")
+ ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
ENODEHOOKS = (TNODE, "ENODEHOOKS")
ENODEHV = (TNODE, "ENODEHV")
self.bad = self.bad or cond
def _VerifyNode(self, ninfo, nresult):
- """Run multiple tests against a node.
-
- Test list:
+ """Perform some basic validation on data returned from a node.
- - compares ganeti version
- - checks vg existence and size > 20G
- - checks config file checksum
- - checks ssh to other nodes
+ - check the result data structure is well formed and has all the mandatory
+ fields
+ - check ganeti version
@type ninfo: L{objects.Node}
@param ninfo: the node to check
"file '%s' should not exist"
" on non master candidates", file_name)
- def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_map):
+ def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
+ drbd_map):
"""Verifies and the node DRBD status.
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
@param instanceinfo: the dict of instances
+ @param drbd_helper: the configured DRBD usermode helper
@param drbd_map: the DRBD map as returned by
L{ganeti.config.ConfigWriter.ComputeDRBDMap}
node = ninfo.name
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+ if drbd_helper:
+ helper_result = nresult.get(constants.NV_DRBDHELPER, None)
+ test = (helper_result == None)
+ _ErrorIf(test, self.ENODEDRBDHELPER, node,
+ "no drbd usermode helper returned")
+ if helper_result:
+ status, payload = helper_result
+ test = not status
+ _ErrorIf(test, self.ENODEDRBDHELPER, node,
+ "drbd usermode helper check unsuccessful: %s", payload)
+ test = status and (payload != drbd_helper)
+ _ErrorIf(test, self.ENODEDRBDHELPER, node,
+ "wrong drbd usermode helper: %s", payload)
+
# compute the DRBD minors
node_drbd = {}
for minor, instance in drbd_map[node].items():
remote_os = nresult.get(constants.NV_OSLIST, None)
test = (not isinstance(remote_os, list) or
- not compat.all(remote_os,
- lambda v: isinstance(v, list) and len(v) == 7))
+ not compat.all(isinstance(v, list) and len(v) == 7
+ for v in remote_os))
_ErrorIf(test, self.ENODEOS, node,
"node hasn't returned valid OS data")
"OS '%s' has multiple entries (first one shadows the rest): %s",
os_name, utils.CommaJoin([v[0] for v in os_data]))
# this will catched in backend too
- _ErrorIf(compat.any(f_api, lambda v: v >= constants.OS_API_V15)
+ _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
and not f_var, self.ENODEOS, node,
"OS %s with API at least %d does not declare any variant",
os_name, constants.OS_API_V15)
_ErrorIf(a != b, self.ENODEOS, node,
"OS %s %s differs from reference node %s: %s vs. %s",
kind, os_name, base.name,
- utils.CommaJoin(a), utils.CommaJoin(a))
+ utils.CommaJoin(a), utils.CommaJoin(b))
# check any missing OSes
missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
_ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
vg_name = self.cfg.GetVGName()
+ drbd_helper = self.cfg.GetDRBDHelper()
hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
cluster = self.cfg.GetClusterInfo()
nodelist = utils.NiceSort(self.cfg.GetNodeList())
node_verify_param[constants.NV_PVLIST] = [vg_name]
node_verify_param[constants.NV_DRBDLIST] = None
+ if drbd_helper:
+ node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
+
# Build our expected cluster state
node_image = dict((node.name, self.NodeImage(offline=node.offline,
name=node.name))
self._VerifyNodeLVM(node_i, nresult, vg_name)
self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
master_files)
- self._VerifyNodeDrbd(node_i, nresult, instanceinfo, all_drbd_map)
+ self._VerifyNodeDrbd(node_i, nresult, instanceinfo, drbd_helper,
+ all_drbd_map)
self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
"""Verifies the cluster disks status.
"""
- _OP_REQP = []
REQ_BGL = False
def ExpandNames(self):
"""Verifies the cluster disks sizes.
"""
- _OP_REQP = [("instances", _TListOf(_TNEString))]
+ _OP_PARAMS = [("instances", _EmptyList, _TListOf(_TNonEmptyString))]
REQ_BGL = False
def ExpandNames(self):
"""
HPATH = "cluster-rename"
HTYPE = constants.HTYPE_CLUSTER
- _OP_REQP = [("name", _TNEString)]
+ _OP_PARAMS = [("name", _NoDefault, _TNonEmptyString)]
def BuildHooksEnv(self):
"""Build hooks env.
"""Verify that the passed name is a valid one.
"""
- hostname = utils.GetHostInfo(self.op.name)
+ hostname = netutils.GetHostInfo(self.op.name)
new_name = hostname.name
self.ip = new_ip = hostname.ip
" cluster has changed",
errors.ECODE_INVAL)
if new_ip != old_ip:
- if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
+ if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
raise errors.OpPrereqError("The given cluster IP address (%s) is"
" reachable on the network. Aborting." %
new_ip, errors.ECODE_NOTUNIQUE)
" the master, please restart manually: %s", msg)
-def _RecursiveCheckIfLVMBased(disk):
- """Check if the given disk or its children are lvm-based.
-
- @type disk: L{objects.Disk}
- @param disk: the disk to check
- @rtype: boolean
- @return: boolean indicating whether a LD_LV dev_type was found or not
-
- """
- if disk.children:
- for chdisk in disk.children:
- if _RecursiveCheckIfLVMBased(chdisk):
- return True
- return disk.dev_type == constants.LD_LV
-
-
class LUSetClusterParams(LogicalUnit):
"""Change the parameters of the cluster.
"""
HPATH = "cluster-modify"
HTYPE = constants.HTYPE_CLUSTER
- _OP_REQP = [
- ("hvparams", _TOr(_TDictOf(_TNEString, _TDict), _TNone)),
- ("os_hvp", _TOr(_TDictOf(_TNEString, _TDict), _TNone)),
- ("osparams", _TOr(_TDictOf(_TNEString, _TDict), _TNone)),
- ("enabled_hypervisors",
+ _OP_PARAMS = [
+ ("vg_name", None, _TMaybeString),
+ ("enabled_hypervisors", None,
_TOr(_TAnd(_TListOf(_TElemOf(constants.HYPER_TYPES)), _TTrue), _TNone)),
- ]
- _OP_DEFS = [
- ("candidate_pool_size", None),
- ("uid_pool", None),
- ("add_uids", None),
- ("remove_uids", None),
- ("hvparams", None),
- ("ov_hvp", None),
+ ("hvparams", None, _TOr(_TDictOf(_TNonEmptyString, _TDict), _TNone)),
+ ("beparams", None, _TOr(_TDictOf(_TNonEmptyString, _TDict), _TNone)),
+ ("os_hvp", None, _TOr(_TDictOf(_TNonEmptyString, _TDict), _TNone)),
+ ("osparams", None, _TOr(_TDictOf(_TNonEmptyString, _TDict), _TNone)),
+ ("candidate_pool_size", None, _TOr(_TStrictPositiveInt, _TNone)),
+ ("uid_pool", None, _NoType),
+ ("add_uids", None, _NoType),
+ ("remove_uids", None, _NoType),
+ ("maintain_node_health", None, _TMaybeBool),
+ ("nicparams", None, _TOr(_TDict, _TNone)),
+ ("drbd_helper", None, _TOr(_TString, _TNone)),
+ ("default_iallocator", None, _TMaybeString),
]
REQ_BGL = False
"""Check parameters
"""
- if self.op.candidate_pool_size is not None:
- try:
- self.op.candidate_pool_size = int(self.op.candidate_pool_size)
- except (ValueError, TypeError), err:
- raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
- str(err), errors.ECODE_INVAL)
- if self.op.candidate_pool_size < 1:
- raise errors.OpPrereqError("At least one master candidate needed",
- errors.ECODE_INVAL)
-
- _CheckBooleanOpField(self.op, "maintain_node_health")
-
if self.op.uid_pool:
uidpool.CheckUidPool(self.op.uid_pool)
"""
if self.op.vg_name is not None and not self.op.vg_name:
- instances = self.cfg.GetAllInstancesInfo().values()
- for inst in instances:
- for disk in inst.disks:
- if _RecursiveCheckIfLVMBased(disk):
- raise errors.OpPrereqError("Cannot disable lvm storage while"
- " lvm-based instances exist",
- errors.ECODE_INVAL)
+ if self.cfg.HasAnyDiskOfType(constants.LD_LV):
+ raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
+ " instances exist", errors.ECODE_INVAL)
+
+ if self.op.drbd_helper is not None and not self.op.drbd_helper:
+ if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
+ raise errors.OpPrereqError("Cannot disable drbd helper while"
+ " drbd-based instances exist",
+ errors.ECODE_INVAL)
node_list = self.acquired_locks[locking.LEVEL_NODE]
raise errors.OpPrereqError("Error on node '%s': %s" %
(node, vgstatus), errors.ECODE_ENVIRON)
+ if self.op.drbd_helper:
+ # checks given drbd helper on all nodes
+ helpers = self.rpc.call_drbd_helper(node_list)
+ for node in node_list:
+ ninfo = self.cfg.GetNodeInfo(node)
+ if ninfo.offline:
+ self.LogInfo("Not checking drbd helper on offline node %s", node)
+ continue
+ msg = helpers[node].fail_msg
+ if msg:
+ raise errors.OpPrereqError("Error checking drbd helper on node"
+ " '%s': %s" % (node, msg),
+ errors.ECODE_ENVIRON)
+ node_helper = helpers[node].payload
+ if node_helper != self.op.drbd_helper:
+ raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
+ (node, node_helper), errors.ECODE_ENVIRON)
+
self.cluster = cluster = self.cfg.GetClusterInfo()
# validate params changes
if self.op.beparams:
hv_class.CheckParameterSyntax(new_osp)
_CheckHVParams(self, node_list, hv_name, new_osp)
+ if self.op.default_iallocator:
+ alloc_script = utils.FindFile(self.op.default_iallocator,
+ constants.IALLOCATOR_SEARCH_PATH,
+ os.path.isfile)
+ if alloc_script is None:
+ raise errors.OpPrereqError("Invalid default iallocator script '%s'"
+ " specified" % self.op.default_iallocator,
+ errors.ECODE_INVAL)
def Exec(self, feedback_fn):
"""Change the parameters of the cluster.
else:
feedback_fn("Cluster LVM configuration already in desired"
" state, not changing")
+ if self.op.drbd_helper is not None:
+ new_helper = self.op.drbd_helper
+ if not new_helper:
+ new_helper = None
+ if new_helper != self.cfg.GetDRBDHelper():
+ self.cfg.SetDRBDHelper(new_helper)
+ else:
+ feedback_fn("Cluster DRBD helper already in desired state,"
+ " not changing")
if self.op.hvparams:
self.cluster.hvparams = self.new_hvparams
if self.op.os_hvp:
if self.op.uid_pool is not None:
self.cluster.uid_pool = self.op.uid_pool
+ if self.op.default_iallocator is not None:
+ self.cluster.default_iallocator = self.op.default_iallocator
+
self.cfg.Update(self.cluster, feedback_fn)
This is a very simple LU.
"""
- _OP_REQP = []
REQ_BGL = False
def ExpandNames(self):
"""Logical unit for OS diagnose/query.
"""
- _OP_REQP = [
- ("output_fields", _TListOf(_TNEString)),
- ("names", _TListOf(_TNEString)),
+ _OP_PARAMS = [
+ _POutputFields,
+ ("names", _EmptyList, _TListOf(_TNonEmptyString)),
]
REQ_BGL = False
_FIELDS_STATIC = utils.FieldSet()
"""
HPATH = "node-remove"
HTYPE = constants.HTYPE_NODE
- _OP_REQP = [("node_name", _TNEString)]
+ _OP_PARAMS = [
+ _PNodeName,
+ ]
def BuildHooksEnv(self):
"""Build hooks env.
"""
# pylint: disable-msg=W0142
- _OP_REQP = [
- ("output_fields", _TListOf(_TNEString)),
- ("names", _TListOf(_TNEString)),
- ("use_locking", _TBool),
+ _OP_PARAMS = [
+ _POutputFields,
+ ("names", _EmptyList, _TListOf(_TNonEmptyString)),
+ ("use_locking", False, _TBool),
]
REQ_BGL = False
"""Logical unit for getting volumes on node(s).
"""
- _OP_REQP = [
- ("nodes", _TListOf(_TNEString)),
- ("output_fields", _TListOf(_TNEString)),
+ _OP_PARAMS = [
+ ("nodes", _EmptyList, _TListOf(_TNonEmptyString)),
+ ("output_fields", _NoDefault, _TListOf(_TNonEmptyString)),
]
REQ_BGL = False
_FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
"""
_FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
- _OP_REQP = [
- ("nodes", _TListOf(_TNEString)),
- ("storage_type", _CheckStorageType),
- ("output_fields", _TListOf(_TNEString)),
+ _OP_PARAMS = [
+ ("nodes", _EmptyList, _TListOf(_TNonEmptyString)),
+ ("storage_type", _NoDefault, _CheckStorageType),
+ ("output_fields", _NoDefault, _TListOf(_TNonEmptyString)),
+ ("name", None, _TMaybeString),
]
- _OP_DEFS = [("name", None)]
REQ_BGL = False
def CheckArguments(self):
"""Logical unit for modifying a storage volume on a node.
"""
- _OP_REQP = [
- ("node_name", _TNEString),
- ("storage_type", _CheckStorageType),
- ("name", _TNEString),
- ("changes", _TDict),
+ _OP_PARAMS = [
+ _PNodeName,
+ ("storage_type", _NoDefault, _CheckStorageType),
+ ("name", _NoDefault, _TNonEmptyString),
+ ("changes", _NoDefault, _TDict),
]
REQ_BGL = False
"""
HPATH = "node-add"
HTYPE = constants.HTYPE_NODE
- _OP_REQP = [
- ("node_name", _TNEString),
+ _OP_PARAMS = [
+ _PNodeName,
+ ("primary_ip", None, _NoType),
+ ("secondary_ip", None, _TMaybeString),
+ ("readd", False, _TBool),
]
- _OP_DEFS = [("secondary_ip", None)]
def CheckArguments(self):
# validate/normalize the node name
- self.op.node_name = utils.HostInfo.NormalizeName(self.op.node_name)
+ self.op.node_name = netutils.HostInfo.NormalizeName(self.op.node_name)
def BuildHooksEnv(self):
"""Build hooks env.
node_name = self.op.node_name
cfg = self.cfg
- dns_data = utils.GetHostInfo(node_name)
+ dns_data = netutils.GetHostInfo(node_name)
node = dns_data.name
primary_ip = self.op.primary_ip = dns_data.ip
if self.op.secondary_ip is None:
self.op.secondary_ip = primary_ip
- if not utils.IsValidIP(self.op.secondary_ip):
+ if not netutils.IsValidIP4(self.op.secondary_ip):
raise errors.OpPrereqError("Invalid secondary IP given",
errors.ECODE_INVAL)
secondary_ip = self.op.secondary_ip
errors.ECODE_INVAL)
# checks reachability
- if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
+ if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
raise errors.OpPrereqError("Node not reachable by ping",
errors.ECODE_ENVIRON)
if not newbie_singlehomed:
# check reachability from my secondary ip to newbie's secondary ip
- if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
+ if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
source=myself.secondary_ip):
raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
" based ping to noded port",
"""
HPATH = "node-modify"
HTYPE = constants.HTYPE_NODE
- _OP_REQP = [("node_name", _TNEString)]
+ _OP_PARAMS = [
+ _PNodeName,
+ ("master_candidate", None, _TMaybeBool),
+ ("offline", None, _TMaybeBool),
+ ("drained", None, _TMaybeBool),
+ ("auto_promote", False, _TBool),
+ _PForce,
+ ]
REQ_BGL = False
def CheckArguments(self):
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
- _CheckBooleanOpField(self.op, 'master_candidate')
- _CheckBooleanOpField(self.op, 'offline')
- _CheckBooleanOpField(self.op, 'drained')
- _CheckBooleanOpField(self.op, 'auto_promote')
all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
if all_mods.count(None) == 3:
raise errors.OpPrereqError("Please pass at least one modification",
"""Powercycles a node.
"""
- _OP_REQP = [
- ("node_name", _TNEString),
- ("force", _TBool),
+ _OP_PARAMS = [
+ _PNodeName,
+ _PForce,
]
REQ_BGL = False
"""Query cluster configuration.
"""
- _OP_REQP = []
REQ_BGL = False
def ExpandNames(self):
"candidate_pool_size": cluster.candidate_pool_size,
"master_netdev": cluster.master_netdev,
"volume_group_name": cluster.volume_group_name,
+ "drbd_usermode_helper": cluster.drbd_usermode_helper,
"file_storage_dir": cluster.file_storage_dir,
"maintain_node_health": cluster.maintain_node_health,
"ctime": cluster.ctime,
"uuid": cluster.uuid,
"tags": list(cluster.GetTags()),
"uid_pool": cluster.uid_pool,
+ "default_iallocator": cluster.default_iallocator,
}
return result
"""Return configuration values.
"""
- _OP_REQP = []
+ _OP_PARAMS = [_POutputFields]
REQ_BGL = False
_FIELDS_DYNAMIC = utils.FieldSet()
_FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
"""Bring up an instance's disks.
"""
- _OP_REQP = [("instance_name", _TNEString)]
- _OP_DEFS = [("ignore_size", False)]
+ _OP_PARAMS = [
+ _PInstanceName,
+ ("ignore_size", False, _TBool),
+ ]
REQ_BGL = False
def ExpandNames(self):
"""Shutdown an instance's disks.
"""
- _OP_REQP = [("instance_name", _TNEString)]
+ _OP_PARAMS = [
+ _PInstanceName,
+ ]
REQ_BGL = False
def ExpandNames(self):
"""
HPATH = "instance-start"
HTYPE = constants.HTYPE_INSTANCE
- _OP_REQP = [
- ("instance_name", _TNEString),
- ("force", _TBool),
- ("beparams", _TDict),
- ("hvparams", _TDict),
- ]
- _OP_DEFS = [
- ("beparams", _EmptyDict),
- ("hvparams", _EmptyDict),
+ _OP_PARAMS = [
+ _PInstanceName,
+ _PForce,
+ ("hvparams", _EmptyDict, _TDict),
+ ("beparams", _EmptyDict, _TDict),
]
REQ_BGL = False
"""
HPATH = "instance-reboot"
HTYPE = constants.HTYPE_INSTANCE
- _OP_REQP = [
- ("instance_name", _TNEString),
- ("ignore_secondaries", _TBool),
- ("reboot_type", _TElemOf(constants.REBOOT_TYPES)),
+ _OP_PARAMS = [
+ _PInstanceName,
+ ("ignore_secondaries", False, _TBool),
+ ("reboot_type", _NoDefault, _TElemOf(constants.REBOOT_TYPES)),
+ _PShutdownTimeout,
]
- _OP_DEFS = [("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT)]
REQ_BGL = False
def ExpandNames(self):
"""
HPATH = "instance-stop"
HTYPE = constants.HTYPE_INSTANCE
- _OP_REQP = [("instance_name", _TNEString)]
- _OP_DEFS = [("timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT)]
+ _OP_PARAMS = [
+ _PInstanceName,
+ ("timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, _TPositiveInt),
+ ]
REQ_BGL = False
def ExpandNames(self):
"""
HPATH = "instance-reinstall"
HTYPE = constants.HTYPE_INSTANCE
- _OP_REQP = [("instance_name", _TNEString)]
- _OP_DEFS = [
- ("os_type", None),
- ("force_variant", False),
+ _OP_PARAMS = [
+ _PInstanceName,
+ ("os_type", None, _TMaybeString),
+ ("force_variant", False, _TBool),
]
REQ_BGL = False
"""
HPATH = "instance-recreate-disks"
HTYPE = constants.HTYPE_INSTANCE
- _OP_REQP = [
- ("instance_name", _TNEString),
- ("disks", _TListOf(_TPInt)),
+ _OP_PARAMS = [
+ _PInstanceName,
+ ("disks", _EmptyList, _TListOf(_TPositiveInt)),
]
REQ_BGL = False
"""
HPATH = "instance-rename"
HTYPE = constants.HTYPE_INSTANCE
- _OP_REQP = [
- ("instance_name", _TNEString),
- ("new_name", _TNEString),
+ _OP_PARAMS = [
+ _PInstanceName,
+ ("new_name", _NoDefault, _TNonEmptyString),
+ ("ignore_ip", False, _TBool),
+ ("check_name", True, _TBool),
]
- _OP_DEFS = [("ignore_ip", False)]
def BuildHooksEnv(self):
"""Build hooks env.
self.instance = instance
# new name verification
- name_info = utils.GetHostInfo(self.op.new_name)
+ if self.op.check_name:
+ name_info = netutils.GetHostInfo(self.op.new_name)
+ self.op.new_name = name_info.name
+
+ new_name = self.op.new_name
- self.op.new_name = new_name = name_info.name
instance_list = self.cfg.GetInstanceList()
if new_name in instance_list:
raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
new_name, errors.ECODE_EXISTS)
if not self.op.ignore_ip:
- if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
+ if netutils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
raise errors.OpPrereqError("IP %s of instance %s already in use" %
(name_info.ip, new_name),
errors.ECODE_NOTUNIQUE)
"""
HPATH = "instance-remove"
HTYPE = constants.HTYPE_INSTANCE
- _OP_REQP = [
- ("instance_name", _TNEString),
- ("ignore_failures", _TBool),
+ _OP_PARAMS = [
+ _PInstanceName,
+ ("ignore_failures", False, _TBool),
+ _PShutdownTimeout,
]
- _OP_DEFS = [("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT)]
REQ_BGL = False
def ExpandNames(self):
"""
# pylint: disable-msg=W0142
- _OP_REQP = [
- ("output_fields", _TListOf(_TNEString)),
- ("names", _TListOf(_TNEString)),
- ("use_locking", _TBool),
+ _OP_PARAMS = [
+ ("output_fields", _NoDefault, _TListOf(_TNonEmptyString)),
+ ("names", _EmptyList, _TListOf(_TNonEmptyString)),
+ ("use_locking", False, _TBool),
]
REQ_BGL = False
_SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
if name not in constants.HVC_GLOBALS] +
["be/%s" % name
for name in constants.BES_PARAMETERS])
- _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
+ _FIELDS_DYNAMIC = utils.FieldSet("oper_state",
+ "oper_ram",
+ "oper_vcpus",
+ "status")
def CheckArguments(self):
val = live_data[instance.name].get("memory", "?")
else:
val = "-"
+ elif field == "oper_vcpus":
+ if instance.primary_node in bad_nodes:
+ val = None
+ elif instance.name in live_data:
+ val = live_data[instance.name].get("vcpus", "?")
+ else:
+ val = "-"
elif field == "vcpus":
val = i_be[constants.BE_VCPUS]
elif field == "disk_template":
"""
HPATH = "instance-failover"
HTYPE = constants.HTYPE_INSTANCE
- _OP_REQP = [
- ("instance_name", _TNEString),
- ("ignore_consistency", _TBool),
+ _OP_PARAMS = [
+ _PInstanceName,
+ ("ignore_consistency", False, _TBool),
+ _PShutdownTimeout,
]
- _OP_DEFS = [("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT)]
REQ_BGL = False
def ExpandNames(self):
"""
HPATH = "instance-migrate"
HTYPE = constants.HTYPE_INSTANCE
- _OP_REQP = [
- ("instance_name", _TNEString),
- ("live", _TBool),
- ("cleanup", _TBool),
+ _OP_PARAMS = [
+ _PInstanceName,
+ _PMigrationLive,
+ ("cleanup", False, _TBool),
]
REQ_BGL = False
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
self._migrater = TLMigrateInstance(self, self.op.instance_name,
- self.op.live, self.op.cleanup)
+ self.op.cleanup)
self.tasklets = [self._migrater]
def DeclareLocks(self, level):
"""
HPATH = "instance-move"
HTYPE = constants.HTYPE_INSTANCE
- _OP_REQP = [
- ("instance_name", _TNEString),
- ("target_node", _TNEString),
+ _OP_PARAMS = [
+ _PInstanceName,
+ ("target_node", _NoDefault, _TNonEmptyString),
+ _PShutdownTimeout,
]
- _OP_DEFS = [("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT)]
REQ_BGL = False
def ExpandNames(self):
"""
HPATH = "node-migrate"
HTYPE = constants.HTYPE_NODE
- _OP_REQP = [
- ("node_name", _TNEString),
- ("live", _TBool),
+ _OP_PARAMS = [
+ _PNodeName,
+ _PMigrationLive,
]
REQ_BGL = False
logging.debug("Migrating instance %s", inst.name)
names.append(inst.name)
- tasklets.append(TLMigrateInstance(self, inst.name, self.op.live, False))
+ tasklets.append(TLMigrateInstance(self, inst.name, False))
self.tasklets = tasklets
class TLMigrateInstance(Tasklet):
- def __init__(self, lu, instance_name, live, cleanup):
+ def __init__(self, lu, instance_name, cleanup):
"""Initializes this class.
"""
# Parameters
self.instance_name = instance_name
- self.live = live
self.cleanup = cleanup
+ self.live = False # will be overridden later
def CheckPrereq(self):
"""Check prerequisites.
self.instance = instance
+ if self.lu.op.live is None:
+ # read the default value from the hypervisor
+ i_hv = self.cfg.GetClusterInfo().FillHV(instance, skip_globals=False)
+ self.lu.op.live = i_hv[constants.HV_MIGRATION_TYPE]
+
+ self.live = self.lu.op.live == constants.HT_MIGRATION_LIVE
+
def _WaitUntilSync(self):
"""Poll with custom rpc for disk sync.
"""
HPATH = "instance-add"
HTYPE = constants.HTYPE_INSTANCE
- _OP_REQP = [
- ("instance_name", _TNEString),
- ("mode", _TElemOf(constants.INSTANCE_CREATE_MODES)),
- ("start", _TBool),
- ("wait_for_sync", _TBool),
- ("ip_check", _TBool),
- ("disks", _TListOf(_TDict)),
- ("nics", _TListOf(_TDict)),
- ("hvparams", _TDict),
- ("beparams", _TDict),
- ("osparams", _TDict),
- ]
- _OP_DEFS = [
- ("name_check", True),
- ("no_install", False),
- ("os_type", None),
- ("force_variant", False),
- ("source_handshake", None),
- ("source_x509_ca", None),
- ("source_instance_name", None),
- ("src_node", None),
- ("src_path", None),
- ("pnode", None),
- ("snode", None),
- ("iallocator", None),
- ("hypervisor", None),
- ("disk_template", None),
- ("identify_defaults", None),
+ _OP_PARAMS = [
+ _PInstanceName,
+ ("mode", _NoDefault, _TElemOf(constants.INSTANCE_CREATE_MODES)),
+ ("start", True, _TBool),
+ ("wait_for_sync", True, _TBool),
+ ("ip_check", True, _TBool),
+ ("name_check", True, _TBool),
+ ("disks", _NoDefault, _TListOf(_TDict)),
+ ("nics", _NoDefault, _TListOf(_TDict)),
+ ("hvparams", _EmptyDict, _TDict),
+ ("beparams", _EmptyDict, _TDict),
+ ("osparams", _EmptyDict, _TDict),
+ ("no_install", None, _TMaybeBool),
+ ("os_type", None, _TMaybeString),
+ ("force_variant", False, _TBool),
+ ("source_handshake", None, _TOr(_TList, _TNone)),
+ ("source_x509_ca", None, _TOr(_TList, _TNone)),
+ ("source_instance_name", None, _TMaybeString),
+ ("src_node", None, _TMaybeString),
+ ("src_path", None, _TMaybeString),
+ ("pnode", None, _TMaybeString),
+ ("snode", None, _TMaybeString),
+ ("iallocator", None, _TMaybeString),
+ ("hypervisor", None, _TMaybeString),
+ ("disk_template", _NoDefault, _CheckDiskTemplate),
+ ("identify_defaults", False, _TBool),
+ ("file_driver", None, _TOr(_TNone, _TElemOf(constants.FILE_DRIVER))),
+ ("file_storage_dir", None, _TMaybeString),
+ ("dry_run", False, _TBool),
]
REQ_BGL = False
self.LogInfo("No-installation mode selected, disabling startup")
self.op.start = False
# validate/normalize the instance name
- self.op.instance_name = utils.HostInfo.NormalizeName(self.op.instance_name)
+ self.op.instance_name = \
+ netutils.HostInfo.NormalizeName(self.op.instance_name)
+
if self.op.ip_check and not self.op.name_check:
# TODO: make the ip check more flexible and not depend on the name check
raise errors.OpPrereqError("Cannot do ip checks without a name check",
raise errors.OpPrereqError("Either all disks are adopted or none is",
errors.ECODE_INVAL)
if has_adopt:
- if self.op.disk_template != constants.DT_PLAIN:
- raise errors.OpPrereqError("Disk adoption is only supported for the"
- " 'plain' disk template",
+ if self.op.disk_template not in constants.DTS_MAY_ADOPT:
+ raise errors.OpPrereqError("Disk adoption is not supported for the"
+ " '%s' disk template" %
+ self.op.disk_template,
errors.ECODE_INVAL)
if self.op.iallocator is not None:
raise errors.OpPrereqError("Disk adoption not allowed with an"
# instance name verification
if self.op.name_check:
- self.hostname1 = utils.GetHostInfo(self.op.instance_name)
+ self.hostname1 = netutils.GetHostInfo(self.op.instance_name)
self.op.instance_name = self.hostname1.name
# used in CheckPrereq for ip ping check
self.check_ip = self.hostname1.ip
errors.ECODE_INVAL)
### Node/iallocator related checks
- if [self.op.iallocator, self.op.pnode].count(None) != 1:
- raise errors.OpPrereqError("One and only one of iallocator and primary"
- " node must be given",
- errors.ECODE_INVAL)
+ _CheckIAllocatorOrNode(self, "iallocator", "pnode")
self._cds = _GetClusterDomainSecret()
raise errors.OpPrereqError("Missing source instance name",
errors.ECODE_INVAL)
- self.source_instance_name = \
- utils.GetHostInfo(utils.HostInfo.NormalizeName(src_instance_name)).name
+ norm_name = netutils.HostInfo.NormalizeName(src_instance_name)
+ self.source_instance_name = netutils.GetHostInfo(norm_name).name
else:
raise errors.OpPrereqError("Invalid instance creation mode %r" %
errors.ECODE_INVAL)
nic_ip = self.hostname1.ip
else:
- if not utils.IsValidIP(ip):
+ if not netutils.IsValidIP4(ip):
raise errors.OpPrereqError("Given IP address '%s' doesn't look"
" like a valid IP" % ip,
errors.ECODE_INVAL)
# ip ping checks (we use the same ip that was resolved in ExpandNames)
if self.op.ip_check:
- if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
+ if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
raise errors.OpPrereqError("IP %s of instance %s already in use" %
(self.check_ip, self.op.instance_name),
errors.ECODE_NOTUNIQUE)
console.
"""
- _OP_REQP = [("instance_name", _TNEString)]
+ _OP_PARAMS = [
+ _PInstanceName
+ ]
REQ_BGL = False
def ExpandNames(self):
"""
HPATH = "mirrors-replace"
HTYPE = constants.HTYPE_INSTANCE
- _OP_REQP = [
- ("instance_name", _TNEString),
- ("mode", _TElemOf(constants.REPLACE_MODES)),
- ("disks", _TListOf(_TPInt)),
- ]
- _OP_DEFS = [
- ("remote_node", None),
- ("iallocator", None),
- ("early_release", None),
+ _OP_PARAMS = [
+ _PInstanceName,
+ ("mode", _NoDefault, _TElemOf(constants.REPLACE_MODES)),
+ ("disks", _EmptyList, _TListOf(_TPositiveInt)),
+ ("remote_node", None, _TMaybeString),
+ ("iallocator", None, _TMaybeString),
+ ("early_release", False, _TBool),
]
REQ_BGL = False
"""Repairs the volume group on a node.
"""
- _OP_REQP = [("node_name", _TNEString)]
+ _OP_PARAMS = [
+ _PNodeName,
+ ("storage_type", _NoDefault, _CheckStorageType),
+ ("name", _NoDefault, _TNonEmptyString),
+ ("ignore_consistency", False, _TBool),
+ ]
REQ_BGL = False
def CheckArguments(self):
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
- _CheckStorageType(self.op.storage_type)
-
storage_type = self.op.storage_type
if (constants.SO_FIX_CONSISTENCY not in
"""Computes the node evacuation strategy.
"""
- _OP_REQP = [("nodes", _TListOf(_TNEString))]
- _OP_DEFS = [
- ("remote_node", None),
- ("iallocator", None),
+ _OP_PARAMS = [
+ ("nodes", _NoDefault, _TListOf(_TNonEmptyString)),
+ ("remote_node", None, _TMaybeString),
+ ("iallocator", None, _TMaybeString),
]
REQ_BGL = False
def CheckArguments(self):
- if self.op.remote_node is not None and self.op.iallocator is not None:
- raise errors.OpPrereqError("Give either the iallocator or the new"
- " secondary, not both", errors.ECODE_INVAL)
+ _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
def ExpandNames(self):
self.op.nodes = _GetWantedNodes(self, self.op.nodes)
"""
HPATH = "disk-grow"
HTYPE = constants.HTYPE_INSTANCE
- _OP_REQP = [
- ("instance_name", _TNEString),
- ("disk", _TInt),
- ("amount", _TInt),
- ("wait_for_sync", _TBool),
+ _OP_PARAMS = [
+ _PInstanceName,
+ ("disk", _NoDefault, _TInt),
+ ("amount", _NoDefault, _TInt),
+ ("wait_for_sync", True, _TBool),
]
REQ_BGL = False
"""Query runtime instance data.
"""
- _OP_REQP = [
- ("instances", _TListOf(_TNEString)),
- ("static", _TBool),
+ _OP_PARAMS = [
+ ("instances", _EmptyList, _TListOf(_TNonEmptyString)),
+ ("static", False, _TBool),
]
REQ_BGL = False
"""
HPATH = "instance-modify"
HTYPE = constants.HTYPE_INSTANCE
- _OP_REQP = [("instance_name", _TNEString)]
- _OP_DEFS = [
- ("nics", _EmptyList),
- ("disks", _EmptyList),
- ("beparams", _EmptyDict),
- ("hvparams", _EmptyDict),
- ("disk_template", None),
- ("remote_node", None),
- ("os_name", None),
- ("force_variant", False),
- ("osparams", None),
- ("force", False),
+ _OP_PARAMS = [
+ _PInstanceName,
+ ("nics", _EmptyList, _TList),
+ ("disks", _EmptyList, _TList),
+ ("beparams", _EmptyDict, _TDict),
+ ("hvparams", _EmptyDict, _TDict),
+ ("disk_template", None, _TMaybeString),
+ ("remote_node", None, _TMaybeString),
+ ("os_name", None, _TMaybeString),
+ ("force_variant", False, _TBool),
+ ("osparams", None, _TOr(_TDict, _TNone)),
+ _PForce,
]
REQ_BGL = False
if nic_ip.lower() == constants.VALUE_NONE:
nic_dict['ip'] = None
else:
- if not utils.IsValidIP(nic_ip):
+ if not netutils.IsValidIP4(nic_ip):
raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
errors.ECODE_INVAL)
" %s to %s" % (instance.disk_template,
self.op.disk_template),
errors.ECODE_INVAL)
+ _CheckInstanceDown(self, instance, "cannot change disk template")
if self.op.disk_template in constants.DTS_NET_MIRROR:
_CheckNodeOnline(self, self.op.remote_node)
_CheckNodeNotDrained(self, self.op.remote_node)
disks = [{"size": d.size} for d in instance.disks]
required = _ComputeDiskSize(self.op.disk_template, disks)
_CheckNodesFreeDisk(self, [self.op.remote_node], required)
- _CheckInstanceDown(self, instance, "cannot change disk template")
# hvparams processing
if self.op.hvparams:
"""Query the exports list
"""
- _OP_REQP = [("nodes", _TListOf(_TNEString))]
+ _OP_PARAMS = [
+ ("nodes", _EmptyList, _TListOf(_TNonEmptyString)),
+ ("use_locking", False, _TBool),
+ ]
REQ_BGL = False
def ExpandNames(self):
"""Prepares an instance for an export and returns useful information.
"""
- _OP_REQP = [
- ("instance_name", _TNEString),
- ("mode", _TElemOf(constants.EXPORT_MODES)),
+ _OP_PARAMS = [
+ _PInstanceName,
+ ("mode", _NoDefault, _TElemOf(constants.EXPORT_MODES)),
]
REQ_BGL = False
"""
HPATH = "instance-export"
HTYPE = constants.HTYPE_INSTANCE
- _OP_REQP = [
- ("instance_name", _TNEString),
- ("target_node", _TNEString),
- ("shutdown", _TBool),
- ("mode", _TElemOf(constants.EXPORT_MODES)),
- ]
- _OP_DEFS = [
- ("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT),
- ("remove_instance", False),
- ("ignore_remove_failures", False),
- ("mode", constants.EXPORT_MODE_LOCAL),
- ("x509_key_name", None),
- ("destination_x509_ca", None),
+ _OP_PARAMS = [
+ _PInstanceName,
+ ("target_node", _NoDefault, _TOr(_TNonEmptyString, _TList)),
+ ("shutdown", True, _TBool),
+ _PShutdownTimeout,
+ ("remove_instance", False, _TBool),
+ ("ignore_remove_failures", False, _TBool),
+ ("mode", constants.EXPORT_MODE_LOCAL, _TElemOf(constants.EXPORT_MODES)),
+ ("x509_key_name", None, _TOr(_TList, _TNone)),
+ ("destination_x509_ca", None, _TMaybeString),
]
REQ_BGL = False
feedback_fn("Deactivating disks for %s" % instance.name)
_ShutdownInstanceDisks(self, instance)
+ if not (compat.all(dresults) and fin_resu):
+ failures = []
+ if not fin_resu:
+ failures.append("export finalization")
+ if not compat.all(dresults):
+ fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
+ if not dsk)
+ failures.append("disk export: disk(s) %s" % fdsk)
+
+ raise errors.OpExecError("Export failed, errors in %s" %
+ utils.CommaJoin(failures))
+
+ # At this point, the export was successful, we can cleanup/finish
+
# Remove instance if requested
if self.op.remove_instance:
- if not (compat.all(dresults) and fin_resu):
- feedback_fn("Not removing instance %s as parts of the export failed" %
- instance.name)
- else:
- feedback_fn("Removing instance %s" % instance.name)
- _RemoveInstance(self, feedback_fn, instance,
- self.op.ignore_remove_failures)
+ feedback_fn("Removing instance %s" % instance.name)
+ _RemoveInstance(self, feedback_fn, instance,
+ self.op.ignore_remove_failures)
if self.op.mode == constants.EXPORT_MODE_LOCAL:
self._CleanupExports(feedback_fn)
"""Remove exports related to the named instance.
"""
- _OP_REQP = [("instance_name", _TNEString)]
+ _OP_PARAMS = [
+ _PInstanceName,
+ ]
REQ_BGL = False
def ExpandNames(self):
"""Returns the tags of a given object.
"""
- _OP_REQP = [
- ("kind", _TElemOf(constants.VALID_TAG_TYPES)),
- ("name", _TNEString),
+ _OP_PARAMS = [
+ ("kind", _NoDefault, _TElemOf(constants.VALID_TAG_TYPES)),
+ ("name", _NoDefault, _TNonEmptyString),
]
REQ_BGL = False
"""Searches the tags for a given pattern.
"""
- _OP_REQP = [("pattern", _TNEString)]
+ _OP_PARAMS = [
+ ("pattern", _NoDefault, _TNonEmptyString),
+ ]
REQ_BGL = False
def ExpandNames(self):
"""Sets a tag on a given object.
"""
- _OP_REQP = [
- ("kind", _TElemOf(constants.VALID_TAG_TYPES)),
- ("name", _TNEString),
- ("tags", _TListOf(objects.TaggableObject.ValidateTag)),
+ _OP_PARAMS = [
+ ("kind", _NoDefault, _TElemOf(constants.VALID_TAG_TYPES)),
+ ("name", _NoDefault, _TNonEmptyString),
+ ("tags", _NoDefault, _TListOf(_TNonEmptyString)),
]
REQ_BGL = False
"""Delete a list of tags from a given object.
"""
- _OP_REQP = [
- ("kind", _TElemOf(constants.VALID_TAG_TYPES)),
- ("name", _TNEString),
- ("tags", _TListOf(objects.TaggableObject.ValidateTag)),
+ _OP_PARAMS = [
+ ("kind", _NoDefault, _TElemOf(constants.VALID_TAG_TYPES)),
+ ("name", _NoDefault, _TNonEmptyString),
+ ("tags", _NoDefault, _TListOf(_TNonEmptyString)),
]
REQ_BGL = False
time.
"""
- _OP_REQP = [
- ("duration", _TFloat),
- ("on_master", _TBool),
- ("on_nodes", _TListOf(_TNEString)),
+ _OP_PARAMS = [
+ ("duration", _NoDefault, _TFloat),
+ ("on_master", True, _TBool),
+ ("on_nodes", _EmptyList, _TListOf(_TNonEmptyString)),
+ ("repeat", 0, _TPositiveInt)
]
REQ_BGL = False
- def CheckArguments(self):
- # TODO: convert to the type system
- self.op.repeat = getattr(self.op, "repeat", 0)
- if self.op.repeat < 0:
- raise errors.OpPrereqError("Repetition count cannot be negative")
-
def ExpandNames(self):
"""Expand names and set required locks.
self._TestDelay()
+class LUTestJobqueue(NoHooksLU):
+ """Utility LU to test some aspects of the job queue.
+
+ """
+ _OP_PARAMS = [
+ ("notify_waitlock", False, _TBool),
+ ("notify_exec", False, _TBool),
+ ("log_messages", _EmptyList, _TListOf(_TString)),
+ ("fail", False, _TBool),
+ ]
+ REQ_BGL = False
+
+ # Must be lower than default timeout for WaitForJobChange to see whether it
+ # notices changed jobs
+ _CLIENT_CONNECT_TIMEOUT = 20.0
+ _CLIENT_CONFIRM_TIMEOUT = 60.0
+
+ @classmethod
+ def _NotifyUsingSocket(cls, cb, errcls):
+ """Opens a Unix socket and waits for another program to connect.
+
+ @type cb: callable
+ @param cb: Callback to send socket name to client
+ @type errcls: class
+ @param errcls: Exception class to use for errors
+
+ """
+ # Using a temporary directory as there's no easy way to create temporary
+ # sockets without writing a custom loop around tempfile.mktemp and
+ # socket.bind
+ tmpdir = tempfile.mkdtemp()
+ try:
+ tmpsock = utils.PathJoin(tmpdir, "sock")
+
+ logging.debug("Creating temporary socket at %s", tmpsock)
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ try:
+ sock.bind(tmpsock)
+ sock.listen(1)
+
+ # Send details to client
+ cb(tmpsock)
+
+ # Wait for client to connect before continuing
+ sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
+ try:
+ (conn, _) = sock.accept()
+ except socket.error, err:
+ raise errcls("Client didn't connect in time (%s)" % err)
+ finally:
+ sock.close()
+ finally:
+ # Remove as soon as client is connected
+ shutil.rmtree(tmpdir)
+
+ # Wait for client to close
+ try:
+ try:
+ conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
+ conn.recv(1)
+ except socket.error, err:
+ raise errcls("Client failed to confirm notification (%s)" % err)
+ finally:
+ conn.close()
+
+ def _SendNotification(self, test, arg, sockname):
+ """Sends a notification to the client.
+
+ @type test: string
+ @param test: Test name
+ @param arg: Test argument (depends on test)
+ @type sockname: string
+ @param sockname: Socket path
+
+ """
+ self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
+
+ def _Notify(self, prereq, test, arg):
+ """Notifies the client of a test.
+
+ @type prereq: bool
+ @param prereq: Whether this is a prereq-phase test
+ @type test: string
+ @param test: Test name
+ @param arg: Test argument (depends on test)
+
+ """
+ if prereq:
+ errcls = errors.OpPrereqError
+ else:
+ errcls = errors.OpExecError
+
+ return self._NotifyUsingSocket(compat.partial(self._SendNotification,
+ test, arg),
+ errcls)
+
+ def CheckArguments(self):
+ self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
+ self.expandnames_calls = 0
+
+ def ExpandNames(self):
+ checkargs_calls = getattr(self, "checkargs_calls", 0)
+ if checkargs_calls < 1:
+ raise errors.ProgrammerError("CheckArguments was not called")
+
+ self.expandnames_calls += 1
+
+ if self.op.notify_waitlock:
+ self._Notify(True, constants.JQT_EXPANDNAMES, None)
+
+ self.LogInfo("Expanding names")
+
+ # Get lock on master node (just to get a lock, not for a particular reason)
+ self.needed_locks = {
+ locking.LEVEL_NODE: self.cfg.GetMasterNode(),
+ }
+
+ def Exec(self, feedback_fn):
+ if self.expandnames_calls < 1:
+ raise errors.ProgrammerError("ExpandNames was not called")
+
+ if self.op.notify_exec:
+ self._Notify(False, constants.JQT_EXEC, None)
+
+ self.LogInfo("Executing")
+
+ if self.op.log_messages:
+ for idx, msg in enumerate(self.op.log_messages):
+ self.LogInfo("Sending log message %s", idx + 1)
+ feedback_fn(constants.JQT_MSGPREFIX + msg)
+ # Report how many test messages have been sent
+ self._Notify(False, constants.JQT_LOGMSG, idx + 1)
+
+ if self.op.fail:
+ raise errors.OpExecError("Opcode failure was requested")
+
+ return True
+
+
class IAllocator(object):
"""IAllocator framework.
This LU runs the allocator tests
"""
- _OP_REQP = [
- ("direction", _TElemOf(constants.VALID_IALLOCATOR_DIRECTIONS)),
- ("mode", _TElemOf(constants.VALID_IALLOCATOR_MODES)),
- ("name", _TNEString),
- ("nics", _TOr(_TNone, _TListOf(
- _TDictOf(_TElemOf(["mac", "ip", "bridge"]), _TNEString)))),
- ("disks", _TOr(_TNone, _TList)),
- ]
- _OP_DEFS = [
- ("hypervisor", None),
- ("allocator", None),
- ("nics", None),
- ("disks", None),
+ _OP_PARAMS = [
+ ("direction", _NoDefault, _TElemOf(constants.VALID_IALLOCATOR_DIRECTIONS)),
+ ("mode", _NoDefault, _TElemOf(constants.VALID_IALLOCATOR_MODES)),
+ ("name", _NoDefault, _TNonEmptyString),
+ ("nics", _NoDefault, _TOr(_TNone, _TListOf(
+ _TDictOf(_TElemOf(["mac", "ip", "bridge"]),
+ _TOr(_TNone, _TNonEmptyString))))),
+ ("disks", _NoDefault, _TOr(_TNone, _TList)),
+ ("hypervisor", None, _TMaybeString),
+ ("allocator", None, _TMaybeString),
+ ("tags", _EmptyList, _TListOf(_TNonEmptyString)),
+ ("mem_size", None, _TOr(_TNone, _TPositiveInt)),
+ ("vcpus", None, _TOr(_TNone, _TPositiveInt)),
+ ("os", None, _TMaybeString),
+ ("disk_template", None, _TMaybeString),
+ ("evac_nodes", None, _TOr(_TNone, _TListOf(_TNonEmptyString))),
]
def CheckPrereq(self):