"""Module implementing the master-side code."""
-# pylint: disable-msg=W0613,W0201
+# pylint: disable-msg=W0201
+
+# W0201 since most LU attributes are defined in CheckPrereq or similar
+# functions
import os
import os.path
import platform
import logging
import copy
+import OpenSSL
from ganeti import ssh
from ganeti import utils
self.recalculate_locks = {}
self.__ssh = None
# logging
- self.LogWarning = processor.LogWarning
- self.LogInfo = processor.LogInfo
- self.LogStep = processor.LogStep
+ self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
+ self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
+ self.LogStep = processor.LogStep # pylint: disable-msg=C0103
# support for dry-run
self.dry_run_result = None
+ # support for generic debug attribute
+ if (not hasattr(self.op, "debug_level") or
+ not isinstance(self.op.debug_level, int)):
+ self.op.debug_level = 0
# Tasklets
self.tasklets = None
and hook results
"""
+ # API must be kept, thus we ignore the unused argument and could
+ # be a function warnings
+ # pylint: disable-msg=W0613,R0201
return lu_result
def _ExpandAndLockInstance(self):
else:
assert locking.LEVEL_INSTANCE not in self.needed_locks, \
"_ExpandAndLockInstance called with instance-level locks set"
- expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
- if expanded_name is None:
- raise errors.OpPrereqError("Instance '%s' not known" %
- self.op.instance_name, errors.ECODE_NOENT)
- self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
- self.op.instance_name = expanded_name
+ self.op.instance_name = _ExpandInstanceName(self.cfg,
+ self.op.instance_name)
+ self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
def _LockInstancesNodes(self, primary_only=False):
"""Helper function to declare instances' nodes for locking.
del self.recalculate_locks[locking.LEVEL_NODE]
-class NoHooksLU(LogicalUnit):
+class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
"""Simple LU which runs no hooks.
This LU is intended as a parent for other LogicalUnits which will
HPATH = None
HTYPE = None
+ def BuildHooksEnv(self):
+ """Empty BuildHooksEnv for NoHooksLu.
+
+ This just raises an error.
+
+ """
+ assert False, "BuildHooksEnv called for NoHooksLUs"
+
class Tasklet:
"""Tasklet base class.
@param nodes: list of node names or None for all nodes
@rtype: list
@return: the list of nodes, sorted
- @raise errors.OpProgrammerError: if the nodes parameter is wrong type
+ @raise errors.ProgrammerError: if the nodes parameter is wrong type
"""
if not isinstance(nodes, list):
raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
" non-empty list of nodes whose name is to be expanded.")
- wanted = []
- for name in nodes:
- node = lu.cfg.ExpandNodeName(name)
- if node is None:
- raise errors.OpPrereqError("No such node name '%s'" % name,
- errors.ECODE_NOENT)
- wanted.append(node)
-
+ wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes]
return utils.NiceSort(wanted)
errors.ECODE_INVAL)
if instances:
- wanted = []
-
- for name in instances:
- instance = lu.cfg.ExpandInstanceName(name)
- if instance is None:
- raise errors.OpPrereqError("No such instance name '%s'" % name,
- errors.ECODE_NOENT)
- wanted.append(instance)
-
+ wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
else:
wanted = utils.NiceSort(lu.cfg.GetInstanceList())
return wanted
if used_globals:
msg = ("The following hypervisor parameters are global and cannot"
" be customized at instance level, please modify them at"
- " cluster level: %s" % ", ".join(used_globals))
+ " cluster level: %s" % utils.CommaJoin(used_globals))
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
errors.ECODE_INVAL)
+def _CheckNodeHasOS(lu, node, os_name, force_variant):
+ """Ensure that a node supports a given OS.
+
+ @param lu: the LU on behalf of which we make the check
+ @param node: the node to check
+ @param os_name: the OS to query about
+ @param force_variant: whether to ignore variant errors
+ @raise errors.OpPrereqError: if the node is not supporting the OS
+
+ """
+ result = lu.rpc.call_os_get(node, os_name)
+ result.Raise("OS '%s' not in supported OS list for node %s" %
+ (os_name, node),
+ prereq=True, ecode=errors.ECODE_INVAL)
+ if not force_variant:
+ _CheckOSVariant(result.payload, os_name)
+
+
+def _CheckDiskTemplate(template):
+ """Ensure a given disk template is valid.
+
+ """
+ if template not in constants.DISK_TEMPLATES:
+ msg = ("Invalid disk template name '%s', valid templates are: %s" %
+ (template, utils.CommaJoin(constants.DISK_TEMPLATES)))
+ raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+
+
+def _CheckInstanceDown(lu, instance, reason):
+ """Ensure that an instance is not running."""
+ if instance.admin_up:
+ raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
+ (instance.name, reason), errors.ECODE_STATE)
+
+ pnode = instance.primary_node
+ ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
+ ins_l.Raise("Can't contact node %s for instance information" % pnode,
+ prereq=True, ecode=errors.ECODE_ENVIRON)
+
+ if instance.name in ins_l.payload:
+ raise errors.OpPrereqError("Instance %s is running, %s" %
+ (instance.name, reason), errors.ECODE_STATE)
+
+
+def _ExpandItemName(fn, name, kind):
+ """Expand an item name.
+
+ @param fn: the function to use for expansion
+ @param name: requested item name
+ @param kind: text description ('Node' or 'Instance')
+ @return: the resolved (full) name
+ @raise errors.OpPrereqError: if the item is not found
+
+ """
+ full_name = fn(name)
+ if full_name is None:
+ raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
+ errors.ECODE_NOENT)
+ return full_name
+
+
+def _ExpandNodeName(cfg, name):
+ """Wrapper over L{_ExpandItemName} for nodes."""
+ return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
+
+
+def _ExpandInstanceName(cfg, name):
+ """Wrapper over L{_ExpandItemName} for instance."""
+ return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
+
+
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
memory, vcpus, nics, disk_template, disks,
bep, hvp, hypervisor_name):
}
if override:
args.update(override)
- return _BuildInstanceHookEnv(**args)
+ return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
def _AdjustCandidatePool(lu, exceptions):
mod_list = lu.cfg.MaintainCandidatePool(exceptions)
if mod_list:
lu.LogInfo("Promoted nodes to master candidate role: %s",
- ", ".join(node.name for node in mod_list))
+ utils.CommaJoin(node.name for node in mod_list))
for name in mod_list:
lu.context.ReaddNode(name)
mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
return faulty
+def _FormatTimestamp(secs):
+ """Formats a Unix timestamp with the local timezone.
+
+ """
+ return time.strftime("%F %T %Z", time.gmtime(secs))
+
+
class LUPostInitCluster(LogicalUnit):
"""Logical unit for running hooks after cluster initialization.
try:
hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
except:
+ # pylint: disable-msg=W0702
self.LogWarning("Errors occurred running hooks on %s" % master)
result = self.rpc.call_node_stop_master(master, False)
return master
+def _VerifyCertificateInner(filename, expired, not_before, not_after, now,
+ warn_days=constants.SSL_CERT_EXPIRATION_WARN,
+ error_days=constants.SSL_CERT_EXPIRATION_ERROR):
+ """Verifies certificate details for LUVerifyCluster.
+
+ """
+ if expired:
+ msg = "Certificate %s is expired" % filename
+
+ if not_before is not None and not_after is not None:
+ msg += (" (valid from %s to %s)" %
+ (_FormatTimestamp(not_before),
+ _FormatTimestamp(not_after)))
+ elif not_before is not None:
+ msg += " (valid from %s)" % _FormatTimestamp(not_before)
+ elif not_after is not None:
+ msg += " (valid until %s)" % _FormatTimestamp(not_after)
+
+ return (LUVerifyCluster.ETYPE_ERROR, msg)
+
+ elif not_before is not None and not_before > now:
+ return (LUVerifyCluster.ETYPE_WARNING,
+ "Certificate %s not yet valid (valid from %s)" %
+ (filename, _FormatTimestamp(not_before)))
+
+ elif not_after is not None:
+ remaining_days = int((not_after - now) / (24 * 3600))
+
+ msg = ("Certificate %s expires in %d days" % (filename, remaining_days))
+
+ if remaining_days <= error_days:
+ return (LUVerifyCluster.ETYPE_ERROR, msg)
+
+ if remaining_days <= warn_days:
+ return (LUVerifyCluster.ETYPE_WARNING, msg)
+
+ return (None, None)
+
+
+def _VerifyCertificate(filename):
+ """Verifies a certificate for LUVerifyCluster.
+
+ @type filename: string
+ @param filename: Path to PEM file
+
+ """
+ try:
+ cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
+ utils.ReadFile(filename))
+ except Exception, err: # pylint: disable-msg=W0703
+ return (LUVerifyCluster.ETYPE_ERROR,
+ "Failed to load X509 certificate %s: %s" % (filename, err))
+
+ # Depending on the pyOpenSSL version, this can just return (None, None)
+ (not_before, not_after) = utils.GetX509CertValidity(cert)
+
+ return _VerifyCertificateInner(filename, cert.has_expired(),
+ not_before, not_after, time.time())
+
+
class LUVerifyCluster(LogicalUnit):
"""Verifies the cluster status.
TINSTANCE = "instance"
ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
+ ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
ENODESSH = (TNODE, "ENODESSH")
ENODEVERSION = (TNODE, "ENODEVERSION")
ENODESETUP = (TNODE, "ENODESETUP")
+ ENODETIME = (TNODE, "ENODETIME")
ETYPE_FIELD = "code"
ETYPE_ERROR = "ERROR"
"""
node = nodeinfo.name
- _ErrorIf = self._ErrorIf
+ _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
# main result, node_result should be a non-empty dict
test = not node_result or not isinstance(node_result, dict)
# check that ':' is not present in PV names, since it's a
# special character for lvcreate (denotes the range of PEs to
# use on the PV)
- for size, pvname, owner_vg in pvlist:
+ for _, pvname, owner_vg in pvlist:
test = ":" in pvname
_ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
" '%s' of VG '%s'", pvname, owner_vg)
available on the instance's node.
"""
- _ErrorIf = self._ErrorIf
+ _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
node_current = instanceconfig.primary_node
node_vol_should = {}
"""
self.bad = False
- _ErrorIf = self._ErrorIf
+ _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
verbose = self.op.verbose
self._feedback_fn = feedback_fn
feedback_fn("* Verifying global settings")
for msg in self.cfg.VerifyConfig():
_ErrorIf(True, self.ECLUSTERCFG, None, msg)
+ # Check the cluster certificates
+ for cert_filename in constants.ALL_CERT_FILES:
+ (errcode, msg) = _VerifyCertificate(cert_filename)
+ _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
+
vg_name = self.cfg.GetVGName()
hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
nodelist = utils.NiceSort(self.cfg.GetNodeList())
master_files = [constants.CLUSTER_CONF_FILE]
file_names = ssconf.SimpleStore().GetFileList()
- file_names.append(constants.SSL_CERT_FILE)
- file_names.append(constants.RAPI_CERT_FILE)
+ file_names.extend(constants.ALL_CERT_FILES)
file_names.extend(master_files)
local_checksums = utils.FingerprintFiles(file_names)
constants.NV_VERSION: None,
constants.NV_HVINFO: self.cfg.GetHypervisorType(),
constants.NV_NODESETUP: None,
+ constants.NV_TIME: None,
}
+
if vg_name is not None:
node_verify_param[constants.NV_VGLIST] = None
node_verify_param[constants.NV_LVLIST] = vg_name
node_verify_param[constants.NV_PVLIST] = [vg_name]
node_verify_param[constants.NV_DRBDLIST] = None
+
+ # Due to the way our RPC system works, exact response times cannot be
+ # guaranteed (e.g. a broken node could run into a timeout). By keeping the
+ # time before and after executing the request, we can at least have a time
+ # window.
+ nvinfo_starttime = time.time()
all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
self.cfg.GetClusterName())
+ nvinfo_endtime = time.time()
cluster = self.cfg.GetClusterInfo()
master_node = self.cfg.GetMasterNode()
else:
instance = instanceinfo[instance]
node_drbd[minor] = (instance.name, instance.admin_up)
+
self._VerifyNode(node_i, file_names, local_checksums,
nresult, master_files, node_drbd, vg_name)
idata = nresult.get(constants.NV_INSTANCELIST, None)
test = not isinstance(idata, list)
_ErrorIf(test, self.ENODEHV, node,
- "rpc call to node failed (instancelist)")
+ "rpc call to node failed (instancelist): %s",
+ utils.SafeEncode(str(idata)))
if test:
continue
if test:
continue
+ # Node time
+ ntime = nresult.get(constants.NV_TIME, None)
+ try:
+ ntime_merged = utils.MergeTime(ntime)
+ except (ValueError, TypeError):
+ _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
+
+ if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
+ ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
+ elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
+ ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
+ else:
+ ntime_diff = None
+
+ _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
+ "Node time diverges by at least %s from master node time",
+ ntime_diff)
+
+ if ntime_diff is not None:
+ continue
+
try:
node_info[node] = {
"mfree": int(nodeinfo['memory_free']),
_ErrorIf(snode not in node_info and snode not in n_offline,
self.ENODERPC, snode,
"instance %s, connection to secondary node"
- "failed", instance)
+ " failed", instance)
if snode in node_info:
node_info[snode]['sinst'].append(instance)
# warn that the instance lives on offline nodes
_ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
"instance lives on offline node(s) %s",
- ", ".join(inst_nodes_offline))
+ utils.CommaJoin(inst_nodes_offline))
feedback_fn("* Verifying orphan volumes")
self._VerifyOrphanVolumes(node_vol_should, node_volume)
assert hooks_results, "invalid result from hooks"
for node_name in hooks_results:
- show_node_header = True
res = hooks_results[node_name]
msg = res.fail_msg
test = msg and not res.offline
self._ErrorIf(test, self.ENODEHOOKS, node_name,
"Communication failure in hooks execution: %s", msg)
- if test:
+ if res.offline or msg:
+ # No need to investigate payload if node is offline or gave an error.
# override manually lu_result here as _ErrorIf only
# overrides self.bad
lu_result = 1
if test:
output = indent_re.sub(' ', output)
feedback_fn("%s" % output)
- lu_result = 1
+ lu_result = 0
return lu_result
continue
lvs = node_res.payload
- for lv_name, (_, lv_inactive, lv_online) in lvs.items():
+ for lv_name, (_, _, lv_online) in lvs.items():
inst = nv_dict.pop((node, lv_name), None)
if (not lv_online and inst is not None
and inst.name not in res_instances):
if self.op.instances:
self.wanted_names = []
for name in self.op.instances:
- full_name = self.cfg.ExpandInstanceName(name)
- if full_name is None:
- raise errors.OpPrereqError("Instance '%s' not known" % name,
- errors.ECODE_NOENT)
+ full_name = _ExpandInstanceName(self.cfg, name)
self.wanted_names.append(full_name)
self.needed_locks = {
locking.LEVEL_NODE: [],
"NEW_NAME": self.op.name,
}
mn = self.cfg.GetMasterNode()
- return env, [mn], [mn]
+ all_nodes = self.cfg.GetNodeList()
+ return env, [mn], all_nodes
def CheckPrereq(self):
"""Verify that the passed name is a valid one.
self.new_nicparams = objects.FillDict(
cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
objects.NIC.CheckParameterSyntax(self.new_nicparams)
+ nic_errors = []
+
+ # check all instances for consistency
+ for instance in self.cfg.GetAllInstancesInfo().values():
+ for nic_idx, nic in enumerate(instance.nics):
+ params_copy = copy.deepcopy(nic.nicparams)
+ params_filled = objects.FillDict(self.new_nicparams, params_copy)
+
+ # check parameter syntax
+ try:
+ objects.NIC.CheckParameterSyntax(params_filled)
+ except errors.ConfigurationError, err:
+ nic_errors.append("Instance %s, nic/%d: %s" %
+ (instance.name, nic_idx, err))
+
+ # if we're moving instances to routed, check that they have an ip
+ target_mode = params_filled[constants.NIC_MODE]
+ if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
+ nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
+ (instance.name, nic_idx))
+ if nic_errors:
+ raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
+ "\n".join(nic_errors))
# hypervisor list/parameters
self.new_hvparams = objects.FillDict(cluster.hvparams, {})
else:
self.new_hvparams[hv_name].update(hv_dict)
+ # os hypervisor parameters
+ self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
+ if self.op.os_hvp:
+ if not isinstance(self.op.os_hvp, dict):
+ raise errors.OpPrereqError("Invalid 'os_hvp' parameter on input",
+ errors.ECODE_INVAL)
+ for os_name, hvs in self.op.os_hvp.items():
+ if not isinstance(hvs, dict):
+ raise errors.OpPrereqError(("Invalid 'os_hvp' parameter on"
+ " input"), errors.ECODE_INVAL)
+ if os_name not in self.new_os_hvp:
+ self.new_os_hvp[os_name] = hvs
+ else:
+ for hv_name, hv_dict in hvs.items():
+ if hv_name not in self.new_os_hvp[os_name]:
+ self.new_os_hvp[os_name][hv_name] = hv_dict
+ else:
+ self.new_os_hvp[os_name][hv_name].update(hv_dict)
+
if self.op.enabled_hypervisors is not None:
self.hv_list = self.op.enabled_hypervisors
if not self.hv_list:
invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
if invalid_hvs:
raise errors.OpPrereqError("Enabled hypervisors contains invalid"
- " entries: %s" % " ,".join(invalid_hvs),
+ " entries: %s" %
+ utils.CommaJoin(invalid_hvs),
errors.ECODE_INVAL)
else:
self.hv_list = cluster.enabled_hypervisors
hv_class.CheckParameterSyntax(hv_params)
_CheckHVParams(self, node_list, hv_name, hv_params)
+ if self.op.os_hvp:
+ # no need to check any newly-enabled hypervisors, since the
+ # defaults have already been checked in the above code-block
+ for os_name, os_hvp in self.new_os_hvp.items():
+ for hv_name, hv_params in os_hvp.items():
+ utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
+ # we need to fill in the new os_hvp on top of the actual hv_p
+ cluster_defaults = self.new_hvparams.get(hv_name, {})
+ new_osp = objects.FillDict(cluster_defaults, hv_params)
+ hv_class = hypervisor.GetHypervisor(hv_name)
+ hv_class.CheckParameterSyntax(new_osp)
+ _CheckHVParams(self, node_list, hv_name, new_osp)
+
+
def Exec(self, feedback_fn):
"""Change the parameters of the cluster.
" state, not changing")
if self.op.hvparams:
self.cluster.hvparams = self.new_hvparams
+ if self.op.os_hvp:
+ self.cluster.os_hvp = self.new_os_hvp
if self.op.enabled_hypervisors is not None:
self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
if self.op.beparams:
"""
# 1. Gather target nodes
myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
- dist_nodes = lu.cfg.GetNodeList()
+ dist_nodes = lu.cfg.GetOnlineNodeList()
if additional_nodes is not None:
dist_nodes.extend(additional_nodes)
if myself.name in dist_nodes:
constants.SSH_KNOWN_HOSTS_FILE,
constants.RAPI_CERT_FILE,
constants.RAPI_USERS_FILE,
- constants.HMAC_CLUSTER_KEY,
+ constants.CONFD_HMAC_KEY,
])
enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
_RedistributeAncillaryFiles(self)
-def _WaitForSync(lu, instance, oneshot=False, unlock=False):
+def _WaitForSync(lu, instance, oneshot=False):
"""Sleep and poll for an instance's disk to sync.
"""
"""
@staticmethod
- def _DiagnoseByOS(node_list, rlist):
+ def _DiagnoseByOS(rlist):
"""Remaps a per-node return list into an a per-os per-node dictionary
- @param node_list: a list with the names of all nodes
@param rlist: a map with node names as keys and OS objects as values
@rtype: dict
"""
valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
node_data = self.rpc.call_os_diagnose(valid_nodes)
- pol = self._DiagnoseByOS(valid_nodes, node_data)
+ pol = self._DiagnoseByOS(node_data)
output = []
calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
calc_variants = "variants" in self.op.output_fields
"NODE_NAME": self.op.node_name,
}
all_nodes = self.cfg.GetNodeList()
- if self.op.node_name in all_nodes:
+ try:
all_nodes.remove(self.op.node_name)
+ except ValueError:
+ logging.warning("Node %s which is about to be removed not found"
+ " in the all nodes list", self.op.node_name)
return env, all_nodes, all_nodes
def CheckPrereq(self):
Any errors are signaled by raising errors.OpPrereqError.
"""
- node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
- if node is None:
- raise errors.OpPrereqError("Node '%s' is unknown." % self.op.node_name,
- errors.ECODE_NOENT)
+ self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
+ node = self.cfg.GetNodeInfo(self.op.node_name)
+ assert node is not None
instance_list = self.cfg.GetInstanceList()
# Run post hooks on the node before it's removed
hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
try:
- h_results = hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
+ hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
except:
+ # pylint: disable-msg=W0702
self.LogWarning("Errors occurred running hooks on %s" % node.name)
result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
"""Logical unit for querying nodes.
"""
+ # pylint: disable-msg=W0142
_OP_REQP = ["output_fields", "names", "use_locking"]
REQ_BGL = False
inst_fields = frozenset(("pinst_cnt", "pinst_list",
"sinst_cnt", "sinst_list"))
if inst_fields & frozenset(self.op.output_fields):
- instancelist = self.cfg.GetInstanceList()
+ inst_data = self.cfg.GetAllInstancesInfo()
- for instance_name in instancelist:
- inst = self.cfg.GetInstanceInfo(instance_name)
+ for inst in inst_data.values():
if inst.primary_node in node_to_primary:
node_to_primary[inst.primary_node].add(inst.name)
for secnode in inst.secondary_nodes:
REQ_BGL = False
def CheckArguments(self):
- node_name = self.cfg.ExpandNodeName(self.op.node_name)
- if node_name is None:
- raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name,
- errors.ECODE_NOENT)
-
- self.op.node_name = node_name
+ self.opnode_name = _ExpandNodeName(self.cfg, self.op.node_name)
storage_type = self.op.storage_type
if storage_type not in constants.VALID_STORAGE_TYPES:
HTYPE = constants.HTYPE_NODE
_OP_REQP = ["node_name"]
+ def CheckArguments(self):
+ # validate/normalize the node name
+ self.op.node_name = utils.HostInfo.NormalizeName(self.op.node_name)
+
def BuildHooksEnv(self):
"""Build hooks env.
# later in the procedure; this also means that if the re-add
# fails, we are left with a non-offlined, broken node
if self.op.readd:
- new_node.drained = new_node.offline = False
+ new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
self.LogInfo("Readding a node, the offline/drained flags were reset")
# if we demote the node, we do cleanup later in the procedure
new_node.master_candidate = self.master_candidate
REQ_BGL = False
def CheckArguments(self):
- node_name = self.cfg.ExpandNodeName(self.op.node_name)
- if node_name is None:
- raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name,
- errors.ECODE_INVAL)
- self.op.node_name = node_name
+ self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
_CheckBooleanOpField(self.op, 'master_candidate')
_CheckBooleanOpField(self.op, 'offline')
_CheckBooleanOpField(self.op, 'drained')
+ _CheckBooleanOpField(self.op, 'auto_promote')
all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
if all_mods.count(None) == 3:
raise errors.OpPrereqError("Please pass at least one modification",
" state at the same time",
errors.ECODE_INVAL)
+ # Boolean value that tells us whether we're offlining or draining the node
+ self.offline_or_drain = (self.op.offline == True or
+ self.op.drained == True)
+ self.deoffline_or_drain = (self.op.offline == False or
+ self.op.drained == False)
+ self.might_demote = (self.op.master_candidate == False or
+ self.offline_or_drain)
+
+ self.lock_all = self.op.auto_promote and self.might_demote
+
+
def ExpandNames(self):
- self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
+ if self.lock_all:
+ self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
+ else:
+ self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
def BuildHooksEnv(self):
"""Build hooks env.
" only via masterfailover",
errors.ECODE_INVAL)
- # Boolean value that tells us whether we're offlining or draining the node
- offline_or_drain = self.op.offline == True or self.op.drained == True
- deoffline_or_drain = self.op.offline == False or self.op.drained == False
-
- if (node.master_candidate and
- (self.op.master_candidate == False or offline_or_drain)):
- cp_size = self.cfg.GetClusterInfo().candidate_pool_size
- mc_now, mc_should, mc_max = self.cfg.GetMasterCandidateStats()
- if mc_now <= cp_size:
- msg = ("Not enough master candidates (desired"
- " %d, new value will be %d)" % (cp_size, mc_now-1))
- # Only allow forcing the operation if it's an offline/drain operation,
- # and we could not possibly promote more nodes.
- # FIXME: this can still lead to issues if in any way another node which
- # could be promoted appears in the meantime.
- if self.op.force and offline_or_drain and mc_should == mc_max:
- self.LogWarning(msg)
- else:
- raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+
+ if node.master_candidate and self.might_demote and not self.lock_all:
+ assert not self.op.auto_promote, "auto-promote set but lock_all not"
+ # check if after removing the current node, we're missing master
+ # candidates
+ (mc_remaining, mc_should, _) = \
+ self.cfg.GetMasterCandidateStats(exceptions=[node.name])
+ if mc_remaining < mc_should:
+ raise errors.OpPrereqError("Not enough master candidates, please"
+ " pass auto_promote to allow promotion",
+ errors.ECODE_INVAL)
if (self.op.master_candidate == True and
((node.offline and not self.op.offline == False) or
errors.ECODE_INVAL)
# If we're being deofflined/drained, we'll MC ourself if needed
- if (deoffline_or_drain and not offline_or_drain and not
- self.op.master_candidate == True):
+ if (self.deoffline_or_drain and not self.offline_or_drain and not
+ self.op.master_candidate == True and not node.master_candidate):
self.op.master_candidate = _DecideSelfPromotion(self)
if self.op.master_candidate:
self.LogInfo("Autopromoting node to master candidate")
node.offline = False
result.append(("offline", "clear offline status due to drain"))
+ # we locked all nodes, we adjust the CP before updating this node
+ if self.lock_all:
+ _AdjustCandidatePool(self, [node.name])
+
# this will trigger configuration file update, if needed
self.cfg.Update(node, feedback_fn)
+
# this will trigger job queue propagation or cleanup
if changed_mc:
self.context.ReaddNode(node)
REQ_BGL = False
def CheckArguments(self):
- node_name = self.cfg.ExpandNodeName(self.op.node_name)
- if node_name is None:
- raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name,
- errors.ECODE_NOENT)
- self.op.node_name = node_name
- if node_name == self.cfg.GetMasterNode() and not self.op.force:
+ self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
+ if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
raise errors.OpPrereqError("The node is the master and the force"
" parameter was not set",
errors.ECODE_INVAL)
"""
cluster = self.cfg.GetClusterInfo()
+ os_hvp = {}
+
+ # Filter just for enabled hypervisors
+ for os_name, hv_dict in cluster.os_hvp.items():
+ os_hvp[os_name] = {}
+ for hv_name, hv_params in hv_dict.items():
+ if hv_name in cluster.enabled_hypervisors:
+ os_hvp[os_name][hv_name] = hv_params
+
result = {
"software_version": constants.RELEASE_VERSION,
"protocol_version": constants.PROTOCOL_VERSION,
"enabled_hypervisors": cluster.enabled_hypervisors,
"hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
for hypervisor_name in cluster.enabled_hypervisors]),
+ "os_hvp": os_hvp,
"beparams": cluster.beparams,
"nicparams": cluster.nicparams,
"candidate_pool_size": cluster.candidate_pool_size,
elif field == "drain_flag":
entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
elif field == "watcher_pause":
- return utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
+ entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
else:
raise errors.ParameterError(field)
values.append(entry)
_ShutdownInstanceDisks.
"""
- pnode = instance.primary_node
- ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
- ins_l.Raise("Can't contact node %s" % pnode)
-
- if instance.name in ins_l.payload:
- raise errors.OpExecError("Instance is running, can't shutdown"
- " block devices.")
-
+ _CheckInstanceDown(lu, instance, "cannot shutdown disks")
_ShutdownInstanceDisks(lu, instance)
errors.ECODE_NORES)
+def _CheckNodesFreeDisk(lu, nodenames, requested):
+ """Checks if nodes have enough free disk space in the default VG.
+
+ This function check if all given nodes have the needed amount of
+ free disk. In case any node has less disk or we cannot get the
+ information from the node, this function raise an OpPrereqError
+ exception.
+
+ @type lu: C{LogicalUnit}
+ @param lu: a logical unit from which we get configuration data
+ @type nodenames: C{list}
+ @param node: the list of node names to check
+ @type requested: C{int}
+ @param requested: the amount of disk in MiB to check for
+ @raise errors.OpPrereqError: if the node doesn't have enough disk, or
+ we cannot check the node
+
+ """
+ nodeinfo = lu.rpc.call_node_info(nodenames, lu.cfg.GetVGName(),
+ lu.cfg.GetHypervisorType())
+ for node in nodenames:
+ info = nodeinfo[node]
+ info.Raise("Cannot get current information from node %s" % node,
+ prereq=True, ecode=errors.ECODE_ENVIRON)
+ vg_free = info.payload.get("vg_free", None)
+ if not isinstance(vg_free, int):
+ raise errors.OpPrereqError("Can't compute free disk space on node %s,"
+ " result was '%s'" % (node, vg_free),
+ errors.ECODE_ENVIRON)
+ if requested > vg_free:
+ raise errors.OpPrereqError("Not enough disk space on target node %s:"
+ " required %d MiB, available %d MiB" %
+ (node, requested, vg_free),
+ errors.ECODE_NORES)
+
+
class LUStartupInstance(LogicalUnit):
"""Starts an instance.
raise errors.OpPrereqError("Instance '%s' has no disks" %
self.op.instance_name,
errors.ECODE_INVAL)
- if instance.admin_up:
- raise errors.OpPrereqError("Instance '%s' is marked to be up" %
- self.op.instance_name,
- errors.ECODE_STATE)
- remote_info = self.rpc.call_instance_info(instance.primary_node,
- instance.name,
- instance.hypervisor)
- remote_info.Raise("Error checking node %s" % instance.primary_node,
- prereq=True, ecode=errors.ECODE_ENVIRON)
- if remote_info.payload:
- raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
- (self.op.instance_name,
- instance.primary_node),
- errors.ECODE_STATE)
+ _CheckInstanceDown(self, instance, "cannot reinstall")
self.op.os_type = getattr(self.op, "os_type", None)
self.op.force_variant = getattr(self.op, "force_variant", False)
if self.op.os_type is not None:
# OS verification
- pnode = self.cfg.GetNodeInfo(
- self.cfg.ExpandNodeName(instance.primary_node))
- if pnode is None:
- raise errors.OpPrereqError("Primary node '%s' is unknown" %
- self.op.pnode, errors.ECODE_NOENT)
- result = self.rpc.call_os_get(pnode.name, self.op.os_type)
- result.Raise("OS '%s' not in supported OS list for primary node %s" %
- (self.op.os_type, pnode.name),
- prereq=True, ecode=errors.ECODE_INVAL)
- if not self.op.force_variant:
- _CheckOSVariant(result.payload, self.op.os_type)
+ pnode = _ExpandNodeName(self.cfg, instance.primary_node)
+ _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
self.instance = instance
_StartInstanceDisks(self, inst, None)
try:
feedback_fn("Running the instance OS create scripts...")
- result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
+ # FIXME: pass debug option from opcode to backend
+ result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
+ self.op.debug_level)
result.Raise("Could not install OS for instance %s on node %s" %
(inst.name, inst.primary_node))
finally:
if instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Instance '%s' has no disks" %
self.op.instance_name, errors.ECODE_INVAL)
- if instance.admin_up:
- raise errors.OpPrereqError("Instance '%s' is marked to be up" %
- self.op.instance_name, errors.ECODE_STATE)
- remote_info = self.rpc.call_instance_info(instance.primary_node,
- instance.name,
- instance.hypervisor)
- remote_info.Raise("Error checking node %s" % instance.primary_node,
- prereq=True, ecode=errors.ECODE_ENVIRON)
- if remote_info.payload:
- raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
- (self.op.instance_name,
- instance.primary_node), errors.ECODE_STATE)
+ _CheckInstanceDown(self, instance, "cannot recreate disks")
if not self.op.disks:
self.op.disks = range(len(instance.disks))
"""
to_skip = []
- for idx, disk in enumerate(self.instance.disks):
+ for idx, _ in enumerate(self.instance.disks):
if idx not in self.op.disks: # disk idx has not been passed in
to_skip.append(idx)
continue
This checks that the instance is in the cluster and is not running.
"""
- instance = self.cfg.GetInstanceInfo(
- self.cfg.ExpandInstanceName(self.op.instance_name))
- if instance is None:
- raise errors.OpPrereqError("Instance '%s' not known" %
- self.op.instance_name, errors.ECODE_NOENT)
+ self.op.instance_name = _ExpandInstanceName(self.cfg,
+ self.op.instance_name)
+ instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ assert instance is not None
_CheckNodeOnline(self, instance.primary_node)
-
- if instance.admin_up:
- raise errors.OpPrereqError("Instance '%s' is marked to be up" %
- self.op.instance_name, errors.ECODE_STATE)
- remote_info = self.rpc.call_instance_info(instance.primary_node,
- instance.name,
- instance.hypervisor)
- remote_info.Raise("Error checking node %s" % instance.primary_node,
- prereq=True, ecode=errors.ECODE_ENVIRON)
- if remote_info.payload:
- raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
- (self.op.instance_name,
- instance.primary_node), errors.ECODE_STATE)
+ _CheckInstanceDown(self, instance, "cannot rename")
self.instance = instance
# new name verification
_StartInstanceDisks(self, inst, None)
try:
result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
- old_name)
+ old_name, self.op.debug_level)
msg = result.fail_msg
if msg:
msg = ("Could not run OS rename script for instance %s on node %s"
env = _BuildInstanceHookEnvByObject(self, self.instance)
env["SHUTDOWN_TIMEOUT"] = self.shutdown_timeout
nl = [self.cfg.GetMasterNode()]
- return env, nl, nl
+ nl_post = list(self.instance.all_nodes) + nl
+ return env, nl, nl_post
def CheckPrereq(self):
"""Check prerequisites.
" node %s: %s" %
(instance.name, instance.primary_node, msg))
- logging.info("Removing block devices for instance %s", instance.name)
+ _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
- if not _RemoveDisks(self, instance):
- if self.op.ignore_failures:
- feedback_fn("Warning: can't remove instance's disks")
- else:
- raise errors.OpExecError("Can't remove instance's disks")
- logging.info("Removing instance %s out of cluster config", instance.name)
+def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
+ """Utility function to remove an instance.
+
+ """
+ logging.info("Removing block devices for instance %s", instance.name)
- self.cfg.RemoveInstance(instance.name)
- self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
+ if not _RemoveDisks(lu, instance):
+ if not ignore_failures:
+ raise errors.OpExecError("Can't remove instance's disks")
+ feedback_fn("Warning: can't remove instance's disks")
+
+ logging.info("Removing instance %s out of cluster config", instance.name)
+
+ lu.cfg.RemoveInstance(instance.name)
+
+ assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
+ "Instance lock removal conflict"
+
+ # Remove lock for the instance
+ lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
class LUQueryInstances(NoHooksLU):
"""Logical unit for querying instances.
"""
+ # pylint: disable-msg=W0142
_OP_REQP = ["output_fields", "names", "use_locking"]
REQ_BGL = False
_SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
"""Computes the list of nodes and their attributes.
"""
+ # pylint: disable-msg=R0912
+ # way too many branches here
all_info = self.cfg.GetAllInstancesInfo()
if self.wanted == locking.ALL_SET:
# caller didn't specify instance names, so ordering is not important
This runs on master, primary and secondary nodes of the instance.
"""
+ instance = self.instance
+ source_node = instance.primary_node
+ target_node = instance.secondary_nodes[0]
env = {
"IGNORE_CONSISTENCY": self.op.ignore_consistency,
"SHUTDOWN_TIMEOUT": self.shutdown_timeout,
+ "OLD_PRIMARY": source_node,
+ "OLD_SECONDARY": target_node,
+ "NEW_PRIMARY": target_node,
+ "NEW_SECONDARY": source_node,
}
- env.update(_BuildInstanceHookEnvByObject(self, self.instance))
- nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
- return env, nl, nl
+ env.update(_BuildInstanceHookEnvByObject(self, instance))
+ nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
+ nl_post = list(nl)
+ nl_post.append(source_node)
+ return env, nl, nl_post
def CheckPrereq(self):
"""Check prerequisites.
"""
instance = self._migrater.instance
+ source_node = instance.primary_node
+ target_node = instance.secondary_nodes[0]
env = _BuildInstanceHookEnvByObject(self, instance)
env["MIGRATE_LIVE"] = self.op.live
env["MIGRATE_CLEANUP"] = self.op.cleanup
+ env.update({
+ "OLD_PRIMARY": source_node,
+ "OLD_SECONDARY": target_node,
+ "NEW_PRIMARY": target_node,
+ "NEW_SECONDARY": source_node,
+ })
nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
- return env, nl, nl
+ nl_post = list(nl)
+ nl_post.append(source_node)
+ return env, nl, nl_post
class LUMoveInstance(LogicalUnit):
def ExpandNames(self):
self._ExpandAndLockInstance()
- target_node = self.cfg.ExpandNodeName(self.op.target_node)
- if target_node is None:
- raise errors.OpPrereqError("Node '%s' not known" %
- self.op.target_node, errors.ECODE_NOENT)
+ target_node = _ExpandNodeName(self.cfg, self.op.target_node)
self.op.target_node = target_node
self.needed_locks[locking.LEVEL_NODE] = [target_node]
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
for idx, dsk in enumerate(instance.disks):
if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
raise errors.OpPrereqError("Instance disk %d has a complex layout,"
- " cannot copy", errors.ECODE_STATE)
+ " cannot copy" % idx, errors.ECODE_STATE)
_CheckNodeOnline(self, target_node)
_CheckNodeNotDrained(self, target_node)
REQ_BGL = False
def ExpandNames(self):
- self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name)
- if self.op.node_name is None:
- raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name,
- errors.ECODE_NOENT)
+ self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
self.needed_locks = {
locking.LEVEL_NODE: [self.op.node_name],
This checks that the instance is in the cluster.
"""
- instance = self.cfg.GetInstanceInfo(
- self.cfg.ExpandInstanceName(self.instance_name))
- if instance is None:
- raise errors.OpPrereqError("Instance '%s' not known" %
- self.instance_name, errors.ECODE_NOENT)
+ instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
+ instance = self.cfg.GetInstanceInfo(instance_name)
+ assert instance is not None
if instance.disk_template != constants.DT_DRBD8:
raise errors.OpPrereqError("Instance's disk layout is not"
"""
port = lu.cfg.AllocatePort()
vgname = lu.cfg.GetVGName()
- shared_secret = lu.cfg.GenerateDRBDSecret()
+ shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
logical_id=(vgname, names[0]))
dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
"hvparams", "beparams"]
REQ_BGL = False
- def _ExpandNode(self, node):
- """Expands and checks one node name.
+ def CheckArguments(self):
+ """Check arguments.
"""
- node_full = self.cfg.ExpandNodeName(node)
- if node_full is None:
- raise errors.OpPrereqError("Unknown node %s" % node, errors.ECODE_NOENT)
- return node_full
+ # set optional parameters to none if they don't exist
+ for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
+ if not hasattr(self.op, attr):
+ setattr(self.op, attr, None)
+
+ # do not require name_check to ease forward/backward compatibility
+ # for tools
+ if not hasattr(self.op, "name_check"):
+ self.op.name_check = True
+ if not hasattr(self.op, "no_install"):
+ self.op.no_install = False
+ if self.op.no_install and self.op.start:
+ self.LogInfo("No-installation mode selected, disabling startup")
+ self.op.start = False
+ # validate/normalize the instance name
+ self.op.instance_name = utils.HostInfo.NormalizeName(self.op.instance_name)
+ if self.op.ip_check and not self.op.name_check:
+ # TODO: make the ip check more flexible and not depend on the name check
+ raise errors.OpPrereqError("Cannot do ip checks without a name check",
+ errors.ECODE_INVAL)
+ if (self.op.disk_template == constants.DT_FILE and
+ not constants.ENABLE_FILE_STORAGE):
+ raise errors.OpPrereqError("File storage disabled at configure time",
+ errors.ECODE_INVAL)
+ # check disk information: either all adopt, or no adopt
+ has_adopt = has_no_adopt = False
+ for disk in self.op.disks:
+ if "adopt" in disk:
+ has_adopt = True
+ else:
+ has_no_adopt = True
+ if has_adopt and has_no_adopt:
+ raise errors.OpPrereqError("Either all disks have are adoped or none is",
+ errors.ECODE_INVAL)
+ if has_adopt:
+ if self.op.disk_template != constants.DT_PLAIN:
+ raise errors.OpPrereqError("Disk adoption is only supported for the"
+ " 'plain' disk template",
+ errors.ECODE_INVAL)
+ if self.op.iallocator is not None:
+ raise errors.OpPrereqError("Disk adoption not allowed with an"
+ " iallocator script", errors.ECODE_INVAL)
+ if self.op.mode == constants.INSTANCE_IMPORT:
+ raise errors.OpPrereqError("Disk adoption not allowed for"
+ " instance import", errors.ECODE_INVAL)
+
+ self.adopt_disks = has_adopt
def ExpandNames(self):
"""ExpandNames for CreateInstance.
"""
self.needed_locks = {}
- # set optional parameters to none if they don't exist
- for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
- if not hasattr(self.op, attr):
- setattr(self.op, attr, None)
-
# cheap checks, mostly valid constants given
# verify creation mode
self.op.mode, errors.ECODE_INVAL)
# disk template and mirror node verification
- if self.op.disk_template not in constants.DISK_TEMPLATES:
- raise errors.OpPrereqError("Invalid disk template name",
- errors.ECODE_INVAL)
+ _CheckDiskTemplate(self.op.disk_template)
if self.op.hypervisor is None:
self.op.hypervisor = self.cfg.GetHypervisorType()
#### instance parameters check
# instance name verification
- hostname1 = utils.GetHostInfo(self.op.instance_name)
- self.op.instance_name = instance_name = hostname1.name
+ if self.op.name_check:
+ hostname1 = utils.GetHostInfo(self.op.instance_name)
+ self.op.instance_name = instance_name = hostname1.name
+ # used in CheckPrereq for ip ping check
+ self.check_ip = hostname1.ip
+ else:
+ instance_name = self.op.instance_name
+ self.check_ip = None
# this is just a preventive check, but someone might still add this
# instance in the meantime, and creation will fail at lock-add time
if ip is None or ip.lower() == constants.VALUE_NONE:
nic_ip = None
elif ip.lower() == constants.VALUE_AUTO:
+ if not self.op.name_check:
+ raise errors.OpPrereqError("IP address set to auto but name checks"
+ " have been skipped. Aborting.",
+ errors.ECODE_INVAL)
nic_ip = hostname1.ip
else:
if not utils.IsValidIP(ip):
errors.ECODE_INVAL)
nic_ip = ip
- # TODO: check the ip for uniqueness !!
+ # TODO: check the ip address for uniqueness
if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
raise errors.OpPrereqError("Routed nic mode requires an ip address",
errors.ECODE_INVAL)
# MAC address verification
mac = nic.get("mac", constants.VALUE_AUTO)
if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
- if not utils.IsValidMac(mac.lower()):
- raise errors.OpPrereqError("Invalid MAC address specified: %s" %
- mac, errors.ECODE_INVAL)
- else:
- try:
- self.cfg.ReserveMAC(mac, self.proc.GetECId())
- except errors.ReservationError:
- raise errors.OpPrereqError("MAC address %s already in use"
- " in cluster" % mac,
- errors.ECODE_NOTUNIQUE)
+ mac = utils.NormalizeAndValidateMac(mac)
+
+ try:
+ self.cfg.ReserveMAC(mac, self.proc.GetECId())
+ except errors.ReservationError:
+ raise errors.OpPrereqError("MAC address %s already in use"
+ " in cluster" % mac,
+ errors.ECODE_NOTUNIQUE)
# bridge verification
bridge = nic.get("bridge", None)
raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
try:
size = int(size)
- except ValueError:
+ except (TypeError, ValueError):
raise errors.OpPrereqError("Invalid disk size '%s'" % size,
errors.ECODE_INVAL)
- self.disks.append({"size": size, "mode": mode})
-
- # used in CheckPrereq for ip ping check
- self.check_ip = hostname1.ip
+ new_disk = {"size": size, "mode": mode}
+ if "adopt" in disk:
+ new_disk["adopt"] = disk["adopt"]
+ self.disks.append(new_disk)
# file storage checks
if (self.op.file_driver and
if self.op.iallocator:
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
else:
- self.op.pnode = self._ExpandNode(self.op.pnode)
+ self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
nodelist = [self.op.pnode]
if self.op.snode is not None:
- self.op.snode = self._ExpandNode(self.op.snode)
+ self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
nodelist.append(self.op.snode)
self.needed_locks[locking.LEVEL_NODE] = nodelist
" path requires a source node option.",
errors.ECODE_INVAL)
else:
- self.op.src_node = src_node = self._ExpandNode(src_node)
+ self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
self.needed_locks[locking.LEVEL_NODE].append(src_node)
if not os.path.isabs(src_path):
self.op.src_path = src_path = \
- os.path.join(constants.EXPORT_DIR, src_path)
+ utils.PathJoin(constants.EXPORT_DIR, src_path)
# On import force_variant must be True, because if we forced it at
# initial install, our only chance when importing it back is that it
# works again!
self.op.force_variant = True
+ if self.op.no_install:
+ self.LogInfo("No-installation mode has no effect during import")
+
else: # INSTANCE_CREATE
if getattr(self.op, "os_type", None) is None:
raise errors.OpPrereqError("No guest OS specified",
" iallocator '%s': %s" %
(self.op.iallocator, ial.info),
errors.ECODE_NORES)
- if len(ial.nodes) != ial.required_nodes:
+ if len(ial.result) != ial.required_nodes:
raise errors.OpPrereqError("iallocator '%s' returned invalid number"
" of nodes (%s), required %s" %
- (self.op.iallocator, len(ial.nodes),
+ (self.op.iallocator, len(ial.result),
ial.required_nodes), errors.ECODE_FAULT)
- self.op.pnode = ial.nodes[0]
+ self.op.pnode = ial.result[0]
self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
self.op.instance_name, self.op.iallocator,
- ", ".join(ial.nodes))
+ utils.CommaJoin(ial.result))
if ial.required_nodes == 2:
- self.op.snode = ial.nodes[1]
+ self.op.snode = ial.result[1]
def BuildHooksEnv(self):
"""Build hooks env.
self.secondaries)
return env, nl, nl
-
def CheckPrereq(self):
"""Check prerequisites.
if src_path in exp_list[node].payload:
found = True
self.op.src_node = src_node = node
- self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
- src_path)
+ self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
+ src_path)
break
if not found:
raise errors.OpPrereqError("No export found for relative path %s" %
if export_info.has_option(constants.INISECT_INS, option):
# FIXME: are the old os-es, disk sizes, etc. useful?
export_name = export_info.get(constants.INISECT_INS, option)
- image = os.path.join(src_path, export_name)
+ image = utils.PathJoin(src_path, export_name)
disk_images.append(image)
else:
disk_images.append(False)
nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
# ENDIF: self.op.mode == constants.INSTANCE_IMPORT
- # ip ping checks (we use the same ip that was resolved in ExpandNames)
- if self.op.start and not self.op.ip_check:
- raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
- " adding an instance in start mode",
- errors.ECODE_INVAL)
+ # ip ping checks (we use the same ip that was resolved in ExpandNames)
if self.op.ip_check:
if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
raise errors.OpPrereqError("IP %s of instance %s already in use" %
req_size = _ComputeDiskSize(self.op.disk_template,
self.disks)
- # Check lv size requirements
- if req_size is not None:
- nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
- self.op.hypervisor)
- for node in nodenames:
- info = nodeinfo[node]
- info.Raise("Cannot get current information from node %s" % node)
- info = info.payload
- vg_free = info.get('vg_free', None)
- if not isinstance(vg_free, int):
- raise errors.OpPrereqError("Can't compute free disk space on"
- " node %s" % node, errors.ECODE_ENVIRON)
- if req_size > vg_free:
- raise errors.OpPrereqError("Not enough disk space on target node %s."
- " %d MB available, %d MB required" %
- (node, vg_free, req_size),
- errors.ECODE_NORES)
+ # Check lv size requirements, if not adopting
+ if req_size is not None and not self.adopt_disks:
+ _CheckNodesFreeDisk(self, nodenames, req_size)
+
+ if self.adopt_disks: # instead, we must check the adoption data
+ all_lvs = set([i["adopt"] for i in self.disks])
+ if len(all_lvs) != len(self.disks):
+ raise errors.OpPrereqError("Duplicate volume names given for adoption",
+ errors.ECODE_INVAL)
+ for lv_name in all_lvs:
+ try:
+ self.cfg.ReserveLV(lv_name, self.proc.GetECId())
+ except errors.ReservationError:
+ raise errors.OpPrereqError("LV named %s used by another instance" %
+ lv_name, errors.ECODE_NOTUNIQUE)
+
+ node_lvs = self.rpc.call_lv_list([pnode.name],
+ self.cfg.GetVGName())[pnode.name]
+ node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
+ node_lvs = node_lvs.payload
+ delta = all_lvs.difference(node_lvs.keys())
+ if delta:
+ raise errors.OpPrereqError("Missing logical volume(s): %s" %
+ utils.CommaJoin(delta),
+ errors.ECODE_INVAL)
+ online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
+ if online_lvs:
+ raise errors.OpPrereqError("Online logical volumes found, cannot"
+ " adopt: %s" % utils.CommaJoin(online_lvs),
+ errors.ECODE_STATE)
+ # update the size of disk based on what is found
+ for dsk in self.disks:
+ dsk["size"] = int(float(node_lvs[dsk["adopt"]][0]))
_CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
- # os verification
- result = self.rpc.call_os_get(pnode.name, self.op.os_type)
- result.Raise("OS '%s' not in supported os list for primary node %s" %
- (self.op.os_type, pnode.name),
- prereq=True, ecode=errors.ECODE_INVAL)
- if not self.op.force_variant:
- _CheckOSVariant(result.payload, self.op.os_type)
+ _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
_CheckNicsBridgesExist(self, self.nics, self.pnode.name)
else:
network_port = None
- ##if self.op.vnc_bind_address is None:
- ## self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
-
# this is needed because os.path.join does not accept None arguments
if self.op.file_storage_dir is None:
string_file_storage_dir = ""
string_file_storage_dir = self.op.file_storage_dir
# build the full file storage dir path
- file_storage_dir = os.path.normpath(os.path.join(
- self.cfg.GetFileStorageDir(),
- string_file_storage_dir, instance))
-
+ file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
+ string_file_storage_dir, instance)
disks = _GenerateDiskTemplate(self,
self.op.disk_template,
hypervisor=self.op.hypervisor,
)
- feedback_fn("* creating instance disks...")
- try:
- _CreateDisks(self, iobj)
- except errors.OpExecError:
- self.LogWarning("Device creation failed, reverting...")
+ if self.adopt_disks:
+ # rename LVs to the newly-generated names; we need to construct
+ # 'fake' LV disks with the old data, plus the new unique_id
+ tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
+ rename_to = []
+ for t_dsk, a_dsk in zip (tmp_disks, self.disks):
+ rename_to.append(t_dsk.logical_id)
+ t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
+ self.cfg.SetDiskID(t_dsk, pnode_name)
+ result = self.rpc.call_blockdev_rename(pnode_name,
+ zip(tmp_disks, rename_to))
+ result.Raise("Failed to rename adoped LVs")
+ else:
+ feedback_fn("* creating instance disks...")
try:
- _RemoveDisks(self, iobj)
- finally:
- self.cfg.ReleaseDRBDMinors(instance)
- raise
+ _CreateDisks(self, iobj)
+ except errors.OpExecError:
+ self.LogWarning("Device creation failed, reverting...")
+ try:
+ _RemoveDisks(self, iobj)
+ finally:
+ self.cfg.ReleaseDRBDMinors(instance)
+ raise
feedback_fn("adding instance %s to cluster config" % instance)
raise errors.OpExecError("There are some degraded disks for"
" this instance")
- feedback_fn("creating os for instance %s on node %s" %
- (instance, pnode_name))
-
- if iobj.disk_template != constants.DT_DISKLESS:
+ if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
if self.op.mode == constants.INSTANCE_CREATE:
- feedback_fn("* running the instance OS create scripts...")
- result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
- result.Raise("Could not add os for instance %s"
- " on node %s" % (instance, pnode_name))
+ if not self.op.no_install:
+ feedback_fn("* running the instance OS create scripts...")
+ # FIXME: pass debug option from opcode to backend
+ result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
+ self.op.debug_level)
+ result.Raise("Could not add os for instance %s"
+ " on node %s" % (instance, pnode_name))
elif self.op.mode == constants.INSTANCE_IMPORT:
feedback_fn("* running the instance OS import scripts...")
src_node = self.op.src_node
src_images = self.src_images
cluster_name = self.cfg.GetClusterName()
+ # FIXME: pass debug option from opcode to backend
import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
src_node, src_images,
- cluster_name)
+ cluster_name,
+ self.op.debug_level)
msg = import_result.fail_msg
if msg:
self.LogWarning("Error while importing the disk images for instance"
self.op.remote_node = None
if not hasattr(self.op, "iallocator"):
self.op.iallocator = None
+ if not hasattr(self.op, "early_release"):
+ self.op.early_release = False
TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
self.op.iallocator)
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
elif self.op.remote_node is not None:
- remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
- if remote_node is None:
- raise errors.OpPrereqError("Node '%s' not known" %
- self.op.remote_node, errors.ECODE_NOENT)
-
+ remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
self.op.remote_node = remote_node
# Warning: do not remove the locking of the new secondary here
self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
self.op.iallocator, self.op.remote_node,
- self.op.disks)
+ self.op.disks, False, self.op.early_release)
self.tasklets = [self.replacer]
self.op.remote_node = None
if not hasattr(self.op, "iallocator"):
self.op.iallocator = None
+ if not hasattr(self.op, "early_release"):
+ self.op.early_release = False
TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG,
self.op.remote_node,
self.op.iallocator)
def ExpandNames(self):
- self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name)
- if self.op.node_name is None:
- raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name,
- errors.ECODE_NOENT)
+ self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
self.needed_locks = {}
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
elif self.op.remote_node is not None:
- remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
- if remote_node is None:
- raise errors.OpPrereqError("Node '%s' not known" %
- self.op.remote_node, errors.ECODE_NOENT)
-
- self.op.remote_node = remote_node
+ self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
# Warning: do not remove the locking of the new secondary here
# unless DRBD8.AddChildren is changed to work in parallel;
# currently it doesn't since parallel invocations of
# FindUnusedMinor will conflict
- self.needed_locks[locking.LEVEL_NODE] = [remote_node]
+ self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
else:
names.append(inst.name)
replacer = TLReplaceDisks(self, inst.name, constants.REPLACE_DISK_CHG,
- self.op.iallocator, self.op.remote_node, [])
+ self.op.iallocator, self.op.remote_node, [],
+ True, self.op.early_release)
tasklets.append(replacer)
self.tasklets = tasklets
"""
def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
- disks):
+ disks, delay_iallocator, early_release):
"""Initializes this class.
"""
self.iallocator_name = iallocator_name
self.remote_node = remote_node
self.disks = disks
+ self.delay_iallocator = delay_iallocator
+ self.early_release = early_release
# Runtime data
self.instance = None
" %s" % (iallocator_name, ial.info),
errors.ECODE_NORES)
- if len(ial.nodes) != ial.required_nodes:
+ if len(ial.result) != ial.required_nodes:
raise errors.OpPrereqError("iallocator '%s' returned invalid number"
" of nodes (%s), required %s" %
- (len(ial.nodes), ial.required_nodes),
+ (iallocator_name,
+ len(ial.result), ial.required_nodes),
errors.ECODE_FAULT)
- remote_node_name = ial.nodes[0]
+ remote_node_name = ial.result[0]
lu.LogInfo("Selected new secondary for instance '%s': %s",
instance_name, remote_node_name)
len(instance.secondary_nodes),
errors.ECODE_FAULT)
+ if not self.delay_iallocator:
+ self._CheckPrereq2()
+
+ def _CheckPrereq2(self):
+ """Check prerequisites, second part.
+
+ This function should always be part of CheckPrereq. It was separated and is
+ now called from Exec because during node evacuation iallocator was only
+ called with an unmodified cluster model, not taking planned changes into
+ account.
+
+ """
+ instance = self.instance
secondary_node = instance.secondary_nodes[0]
if self.iallocator_name is None:
_CheckNodeNotDrained(self.lu, remote_node)
+ old_node_info = self.cfg.GetNodeInfo(secondary_node)
+ assert old_node_info is not None
+ if old_node_info.offline and not self.early_release:
+ # doesn't make sense to delay the release
+ self.early_release = True
+ self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
+ " early-release mode", secondary_node)
+
else:
raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
self.mode)
This dispatches the disk replacement to the appropriate handler.
"""
+ if self.delay_iallocator:
+ self._CheckPrereq2()
+
if not self.disks:
feedback_fn("No disks need replacement")
return
feedback_fn("Replacing disk(s) %s for %s" %
- (", ".join([str(i) for i in self.disks]), self.instance.name))
+ (utils.CommaJoin(self.disks), self.instance.name))
activate_disks = (not self.instance.admin_up)
return iv_names
def _CheckDevices(self, node_name, iv_names):
- for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
+ for name, (dev, _, _) in iv_names.iteritems():
self.cfg.SetDiskID(dev, node_name)
result = self.rpc.call_blockdev_find(node_name, dev)
raise errors.OpExecError("DRBD device %s is degraded!" % name)
def _RemoveOldStorage(self, node_name, iv_names):
- for name, (dev, old_lvs, _) in iv_names.iteritems():
+ for name, (_, old_lvs, _) in iv_names.iteritems():
self.lu.LogInfo("Remove logical volumes for %s" % name)
for lv in old_lvs:
self.lu.LogWarning("Can't remove old LV: %s" % msg,
hint="remove unused LVs manually")
+ def _ReleaseNodeLock(self, node_name):
+ """Releases the lock for a given node."""
+ self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
+
def _ExecDrbd8DiskOnly(self, feedback_fn):
"""Replace a disk on the primary or secondary for DRBD 8.
self.cfg.Update(self.instance, feedback_fn)
+ cstep = 5
+ if self.early_release:
+ self.lu.LogStep(cstep, steps_total, "Removing old storage")
+ cstep += 1
+ self._RemoveOldStorage(self.target_node, iv_names)
+ # WARNING: we release both node locks here, do not do other RPCs
+ # than WaitForSync to the primary node
+ self._ReleaseNodeLock([self.target_node, self.other_node])
+
# Wait for sync
# This can fail as the old devices are degraded and _WaitForSync
# does a combined result over all disks, so we don't check its return value
- self.lu.LogStep(5, steps_total, "Sync devices")
- _WaitForSync(self.lu, self.instance, unlock=True)
+ self.lu.LogStep(cstep, steps_total, "Sync devices")
+ cstep += 1
+ _WaitForSync(self.lu, self.instance)
# Check all devices manually
self._CheckDevices(self.instance.primary_node, iv_names)
# Step: remove old storage
- self.lu.LogStep(6, steps_total, "Removing old storage")
- self._RemoveOldStorage(self.target_node, iv_names)
+ if not self.early_release:
+ self.lu.LogStep(cstep, steps_total, "Removing old storage")
+ cstep += 1
+ self._RemoveOldStorage(self.target_node, iv_names)
def _ExecDrbd8Secondary(self, feedback_fn):
"""Replace the secondary node for DRBD 8.
if self.instance.primary_node == o_node1:
p_minor = o_minor1
else:
+ assert self.instance.primary_node == o_node2, "Three-node instance?"
p_minor = o_minor2
new_alone_id = (self.instance.primary_node, self.new_node, None,
to_node, msg,
hint=("please do a gnt-instance info to see the"
" status of disks"))
+ cstep = 5
+ if self.early_release:
+ self.lu.LogStep(cstep, steps_total, "Removing old storage")
+ cstep += 1
+ self._RemoveOldStorage(self.target_node, iv_names)
+ # WARNING: we release all node locks here, do not do other RPCs
+ # than WaitForSync to the primary node
+ self._ReleaseNodeLock([self.instance.primary_node,
+ self.target_node,
+ self.new_node])
# Wait for sync
# This can fail as the old devices are degraded and _WaitForSync
# does a combined result over all disks, so we don't check its return value
- self.lu.LogStep(5, steps_total, "Sync devices")
- _WaitForSync(self.lu, self.instance, unlock=True)
+ self.lu.LogStep(cstep, steps_total, "Sync devices")
+ cstep += 1
+ _WaitForSync(self.lu, self.instance)
# Check all devices manually
self._CheckDevices(self.instance.primary_node, iv_names)
# Step: remove old storage
- self.lu.LogStep(6, steps_total, "Removing old storage")
- self._RemoveOldStorage(self.target_node, iv_names)
+ if not self.early_release:
+ self.lu.LogStep(cstep, steps_total, "Removing old storage")
+ self._RemoveOldStorage(self.target_node, iv_names)
class LURepairNodeStorage(NoHooksLU):
REQ_BGL = False
def CheckArguments(self):
- node_name = self.cfg.ExpandNodeName(self.op.node_name)
- if node_name is None:
- raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name,
- errors.ECODE_NOENT)
-
- self.op.node_name = node_name
+ self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
def ExpandNames(self):
self.needed_locks = {
(self.op.name, self.op.node_name))
+class LUNodeEvacuationStrategy(NoHooksLU):
+ """Computes the node evacuation strategy.
+
+ """
+ _OP_REQP = ["nodes"]
+ REQ_BGL = False
+
+ def CheckArguments(self):
+ if not hasattr(self.op, "remote_node"):
+ self.op.remote_node = None
+ if not hasattr(self.op, "iallocator"):
+ self.op.iallocator = None
+ if self.op.remote_node is not None and self.op.iallocator is not None:
+ raise errors.OpPrereqError("Give either the iallocator or the new"
+ " secondary, not both", errors.ECODE_INVAL)
+
+ def ExpandNames(self):
+ self.op.nodes = _GetWantedNodes(self, self.op.nodes)
+ self.needed_locks = locks = {}
+ if self.op.remote_node is None:
+ locks[locking.LEVEL_NODE] = locking.ALL_SET
+ else:
+ self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
+ locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
+
+ def CheckPrereq(self):
+ pass
+
+ def Exec(self, feedback_fn):
+ if self.op.remote_node is not None:
+ instances = []
+ for node in self.op.nodes:
+ instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
+ result = []
+ for i in instances:
+ if i.primary_node == self.op.remote_node:
+ raise errors.OpPrereqError("Node %s is the primary node of"
+ " instance %s, cannot use it as"
+ " secondary" %
+ (self.op.remote_node, i.name),
+ errors.ECODE_INVAL)
+ result.append([i.name, self.op.remote_node])
+ else:
+ ial = IAllocator(self.cfg, self.rpc,
+ mode=constants.IALLOCATOR_MODE_MEVAC,
+ evac_nodes=self.op.nodes)
+ ial.Run(self.op.iallocator, validate=True)
+ if not ial.success:
+ raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
+ errors.ECODE_NORES)
+ result = ial.result
+ return result
+
+
class LUGrowDisk(LogicalUnit):
"""Grow a disk of an instance.
"AMOUNT": self.op.amount,
}
env.update(_BuildInstanceHookEnvByObject(self, self.instance))
- nl = [
- self.cfg.GetMasterNode(),
- self.instance.primary_node,
- ]
+ nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
return env, nl, nl
def CheckPrereq(self):
self.disk = instance.FindDisk(self.op.disk)
- nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
- instance.hypervisor)
- for node in nodenames:
- info = nodeinfo[node]
- info.Raise("Cannot get current information from node %s" % node)
- vg_free = info.payload.get('vg_free', None)
- if not isinstance(vg_free, int):
- raise errors.OpPrereqError("Can't compute free disk space on"
- " node %s" % node, errors.ECODE_ENVIRON)
- if self.op.amount > vg_free:
- raise errors.OpPrereqError("Not enough disk space on target node %s:"
- " %d MiB available, %d MiB required" %
- (node, vg_free, self.op.amount),
- errors.ECODE_NORES)
+ _CheckNodesFreeDisk(self, nodenames, self.op.amount)
def Exec(self, feedback_fn):
"""Execute disk grow.
self.cfg.SetDiskID(disk, node)
result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
result.Raise("Grow request failed to node %s" % node)
+
+ # TODO: Rewrite code to work properly
+ # DRBD goes into sync mode for a short amount of time after executing the
+ # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
+ # calling "resize" in sync mode fails. Sleeping for a short amount of
+ # time is a work-around.
+ time.sleep(5)
+
disk.RecordGrow(self.op.amount)
self.cfg.Update(instance, feedback_fn)
if self.op.wait_for_sync:
if self.op.instances:
self.wanted_names = []
for name in self.op.instances:
- full_name = self.cfg.ExpandInstanceName(name)
- if full_name is None:
- raise errors.OpPrereqError("Instance '%s' not known" % name,
- errors.ECODE_NOENT)
+ full_name = _ExpandInstanceName(self.cfg, name)
self.wanted_names.append(full_name)
self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
else:
self.op.beparams = {}
if not hasattr(self.op, 'hvparams'):
self.op.hvparams = {}
+ if not hasattr(self.op, "disk_template"):
+ self.op.disk_template = None
+ if not hasattr(self.op, "remote_node"):
+ self.op.remote_node = None
+ if not hasattr(self.op, "os_name"):
+ self.op.os_name = None
+ if not hasattr(self.op, "force_variant"):
+ self.op.force_variant = False
self.op.force = getattr(self.op, "force", False)
- if not (self.op.nics or self.op.disks or
- self.op.hvparams or self.op.beparams):
+ if not (self.op.nics or self.op.disks or self.op.disk_template or
+ self.op.hvparams or self.op.beparams or self.op.os_name):
raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
if self.op.hvparams:
errors.ECODE_INVAL)
try:
size = int(size)
- except ValueError, err:
+ except (TypeError, ValueError), err:
raise errors.OpPrereqError("Invalid disk size parameter: %s" %
str(err), errors.ECODE_INVAL)
disk_dict['size'] = size
raise errors.OpPrereqError("Only one disk add or remove operation"
" supported at a time", errors.ECODE_INVAL)
+ if self.op.disks and self.op.disk_template is not None:
+ raise errors.OpPrereqError("Disk template conversion and other disk"
+ " changes not supported at the same time",
+ errors.ECODE_INVAL)
+
+ if self.op.disk_template:
+ _CheckDiskTemplate(self.op.disk_template)
+ if (self.op.disk_template in constants.DTS_NET_MIRROR and
+ self.op.remote_node is None):
+ raise errors.OpPrereqError("Changing the disk template to a mirrored"
+ " one requires specifying a secondary node",
+ errors.ECODE_INVAL)
+
# NIC validation
nic_addremove = 0
for nic_op, nic_dict in self.op.nics:
if 'mac' in nic_dict:
nic_mac = nic_dict['mac']
if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
- if not utils.IsValidMac(nic_mac):
- raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac,
- errors.ECODE_INVAL)
+ nic_mac = utils.NormalizeAndValidateMac(nic_mac)
+
if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
raise errors.OpPrereqError("'auto' is not a valid MAC address when"
" modifying an existing nic",
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
+ if self.op.disk_template and self.op.remote_node:
+ self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
+ self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
def BuildHooksEnv(self):
"""Build hooks env.
del args['nics'][-1]
env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
+ if self.op.disk_template:
+ env["NEW_DISK_TEMPLATE"] = self.op.disk_template
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
return env, nl, nl
- def _GetUpdatedParams(self, old_params, update_dict,
+ @staticmethod
+ def _GetUpdatedParams(old_params, update_dict,
default_values, parameter_types):
"""Return the new params dict for the given params.
pnode = instance.primary_node
nodelist = list(instance.all_nodes)
+ if self.op.disk_template:
+ if instance.disk_template == self.op.disk_template:
+ raise errors.OpPrereqError("Instance already has disk template %s" %
+ instance.disk_template, errors.ECODE_INVAL)
+
+ if (instance.disk_template,
+ self.op.disk_template) not in self._DISK_CONVERSIONS:
+ raise errors.OpPrereqError("Unsupported disk template conversion from"
+ " %s to %s" % (instance.disk_template,
+ self.op.disk_template),
+ errors.ECODE_INVAL)
+ if self.op.disk_template in constants.DTS_NET_MIRROR:
+ _CheckNodeOnline(self, self.op.remote_node)
+ _CheckNodeNotDrained(self, self.op.remote_node)
+ disks = [{"size": d.size} for d in instance.disks]
+ required = _ComputeDiskSize(self.op.disk_template, disks)
+ _CheckNodesFreeDisk(self, [self.op.remote_node], required)
+ _CheckInstanceDown(self, instance, "cannot change disk template")
+
# hvparams processing
if self.op.hvparams:
i_hvdict, hv_new = self._GetUpdatedParams(
continue
if nic_op != constants.DDM_ADD:
# an existing nic
+ if not instance.nics:
+ raise errors.OpPrereqError("Invalid NIC index %s, instance has"
+ " no NICs" % nic_op,
+ errors.ECODE_INVAL)
if nic_op < 0 or nic_op >= len(instance.nics):
raise errors.OpPrereqError("Invalid NIC index %s, valid values"
" are 0 to %d" %
- (nic_op, len(instance.nics)),
+ (nic_op, len(instance.nics) - 1),
errors.ECODE_INVAL)
old_nic_params = instance.nics[nic_op].nicparams
old_nic_ip = instance.nics[nic_op].ip
raise errors.OpPrereqError("Disk operations not supported for"
" diskless instances",
errors.ECODE_INVAL)
- for disk_op, disk_dict in self.op.disks:
+ for disk_op, _ in self.op.disks:
if disk_op == constants.DDM_REMOVE:
if len(instance.disks) == 1:
raise errors.OpPrereqError("Cannot remove the last disk of"
- " an instance",
- errors.ECODE_INVAL)
- ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
- ins_l = ins_l[pnode]
- msg = ins_l.fail_msg
- if msg:
- raise errors.OpPrereqError("Can't contact node %s: %s" %
- (pnode, msg), errors.ECODE_ENVIRON)
- if instance.name in ins_l.payload:
- raise errors.OpPrereqError("Instance is running, can't remove"
- " disks.", errors.ECODE_STATE)
+ " an instance", errors.ECODE_INVAL)
+ _CheckInstanceDown(self, instance, "cannot remove disks")
if (disk_op == constants.DDM_ADD and
len(instance.nics) >= constants.MAX_DISKS):
(disk_op, len(instance.disks)),
errors.ECODE_INVAL)
+ # OS change
+ if self.op.os_name and not self.op.force:
+ _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
+ self.op.force_variant)
+
return
+ def _ConvertPlainToDrbd(self, feedback_fn):
+ """Converts an instance from plain to drbd.
+
+ """
+ feedback_fn("Converting template to drbd")
+ instance = self.instance
+ pnode = instance.primary_node
+ snode = self.op.remote_node
+
+ # create a fake disk info for _GenerateDiskTemplate
+ disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
+ new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
+ instance.name, pnode, [snode],
+ disk_info, None, None, 0)
+ info = _GetInstanceInfoText(instance)
+ feedback_fn("Creating aditional volumes...")
+ # first, create the missing data and meta devices
+ for disk in new_disks:
+ # unfortunately this is... not too nice
+ _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
+ info, True)
+ for child in disk.children:
+ _CreateSingleBlockDev(self, snode, instance, child, info, True)
+ # at this stage, all new LVs have been created, we can rename the
+ # old ones
+ feedback_fn("Renaming original volumes...")
+ rename_list = [(o, n.children[0].logical_id)
+ for (o, n) in zip(instance.disks, new_disks)]
+ result = self.rpc.call_blockdev_rename(pnode, rename_list)
+ result.Raise("Failed to rename original LVs")
+
+ feedback_fn("Initializing DRBD devices...")
+ # all child devices are in place, we can now create the DRBD devices
+ for disk in new_disks:
+ for node in [pnode, snode]:
+ f_create = node == pnode
+ _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
+
+ # at this point, the instance has been modified
+ instance.disk_template = constants.DT_DRBD8
+ instance.disks = new_disks
+ self.cfg.Update(instance, feedback_fn)
+
+ # disks are created, waiting for sync
+ disk_abort = not _WaitForSync(self, instance)
+ if disk_abort:
+ raise errors.OpExecError("There are some degraded disks for"
+ " this instance, please cleanup manually")
+
+ def _ConvertDrbdToPlain(self, feedback_fn):
+ """Converts an instance from drbd to plain.
+
+ """
+ instance = self.instance
+ assert len(instance.secondary_nodes) == 1
+ pnode = instance.primary_node
+ snode = instance.secondary_nodes[0]
+ feedback_fn("Converting template to plain")
+
+ old_disks = instance.disks
+ new_disks = [d.children[0] for d in old_disks]
+
+ # copy over size and mode
+ for parent, child in zip(old_disks, new_disks):
+ child.size = parent.size
+ child.mode = parent.mode
+
+ # update instance structure
+ instance.disks = new_disks
+ instance.disk_template = constants.DT_PLAIN
+ self.cfg.Update(instance, feedback_fn)
+
+ feedback_fn("Removing volumes on the secondary node...")
+ for disk in old_disks:
+ self.cfg.SetDiskID(disk, snode)
+ msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
+ if msg:
+ self.LogWarning("Could not remove block device %s on node %s,"
+ " continuing anyway: %s", disk.iv_name, snode, msg)
+
+ feedback_fn("Removing unneeded volumes on the primary node...")
+ for idx, disk in enumerate(old_disks):
+ meta = disk.children[1]
+ self.cfg.SetDiskID(meta, pnode)
+ msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
+ if msg:
+ self.LogWarning("Could not remove metadata for disk %d on node %s,"
+ " continuing anyway: %s", idx, pnode, msg)
+
+
def Exec(self, feedback_fn):
"""Modifies an instance.
result = []
instance = self.instance
- cluster = self.cluster
# disk changes
for disk_op, disk_dict in self.op.disks:
if disk_op == constants.DDM_REMOVE:
# change a given disk
instance.disks[disk_op].mode = disk_dict['mode']
result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
+
+ if self.op.disk_template:
+ r_shut = _ShutdownInstanceDisks(self, instance)
+ if not r_shut:
+ raise errors.OpExecError("Cannot shutdow instance disks, unable to"
+ " proceed with disk template conversion")
+ mode = (instance.disk_template, self.op.disk_template)
+ try:
+ self._DISK_CONVERSIONS[mode](self, feedback_fn)
+ except:
+ self.cfg.ReleaseDRBDMinors(instance.name)
+ raise
+ result.append(("disk_template", self.op.disk_template))
+
# NIC changes
for nic_op, nic_dict in self.op.nics:
if nic_op == constants.DDM_REMOVE:
for key in 'mac', 'ip':
if key in nic_dict:
setattr(instance.nics[nic_op], key, nic_dict[key])
- if nic_op in self.nic_pnew:
- instance.nics[nic_op].nicparams = self.nic_pnew[nic_op]
+ if nic_op in self.nic_pinst:
+ instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
for key, val in nic_dict.iteritems():
result.append(("nic.%s/%d" % (key, nic_op), val))
for key, val in self.op.beparams.iteritems():
result.append(("be/%s" % key, val))
+ # OS change
+ if self.op.os_name:
+ instance.os = self.op.os_name
+
self.cfg.Update(instance, feedback_fn)
return result
+ _DISK_CONVERSIONS = {
+ (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
+ (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
+ }
class LUQueryExports(NoHooksLU):
"""Query the exports list
"""Check the arguments.
"""
+ _CheckBooleanOpField(self.op, "remove_instance")
+ _CheckBooleanOpField(self.op, "ignore_remove_failures")
+
self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
constants.DEFAULT_SHUTDOWN_TIMEOUT)
+ self.remove_instance = getattr(self.op, "remove_instance", False)
+ self.ignore_remove_failures = getattr(self.op, "ignore_remove_failures",
+ False)
+
+ if self.remove_instance and not self.op.shutdown:
+ raise errors.OpPrereqError("Can not remove instance without shutting it"
+ " down before")
def ExpandNames(self):
self._ExpandAndLockInstance()
+
# FIXME: lock only instance primary and destination node
#
# Sad but true, for now we have do lock all nodes, as we don't know where
"EXPORT_NODE": self.op.target_node,
"EXPORT_DO_SHUTDOWN": self.op.shutdown,
"SHUTDOWN_TIMEOUT": self.shutdown_timeout,
+ # TODO: Generic function for boolean env variables
+ "REMOVE_INSTANCE": str(bool(self.remove_instance)),
}
env.update(_BuildInstanceHookEnvByObject(self, self.instance))
nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
"Cannot retrieve locked instance %s" % self.op.instance_name
_CheckNodeOnline(self, self.instance.primary_node)
- self.dst_node = self.cfg.GetNodeInfo(
- self.cfg.ExpandNodeName(self.op.target_node))
+ self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
+ self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
+ assert self.dst_node is not None
- if self.dst_node is None:
- # This is wrong node name, not a non-locked node
- raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node,
- errors.ECODE_NOENT)
_CheckNodeOnline(self, self.dst_node.name)
_CheckNodeNotDrained(self, self.dst_node.name)
# instance disk type verification
+ # TODO: Implement export support for file-based disks
for disk in self.instance.disks:
if disk.dev_type == constants.LD_FILE:
raise errors.OpPrereqError("Export not supported for instances with"
feedback_fn("Shutting down instance %s" % instance.name)
result = self.rpc.call_instance_shutdown(src_node, instance,
self.shutdown_timeout)
+ # TODO: Maybe ignore failures if ignore_remove_failures is set
result.Raise("Could not shutdown instance %s on"
" node %s" % (instance.name, src_node))
snap_disks.append(new_dev)
finally:
- if self.op.shutdown and instance.admin_up:
+ if self.op.shutdown and instance.admin_up and not self.remove_instance:
feedback_fn("Starting instance %s" % instance.name)
result = self.rpc.call_instance_start(src_node, instance, None, None)
msg = result.fail_msg
feedback_fn("Exporting snapshot %s from %s to %s" %
(idx, src_node, dst_node.name))
if dev:
+ # FIXME: pass debug from opcode to backend
result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
- instance, cluster_name, idx)
+ instance, cluster_name,
+ idx, self.op.debug_level)
msg = result.fail_msg
if msg:
self.LogWarning("Could not export disk/%s from node %s to"
feedback_fn("Deactivating disks for %s" % instance.name)
_ShutdownInstanceDisks(self, instance)
+ # Remove instance if requested
+ if self.remove_instance:
+ feedback_fn("Removing instance %s" % instance.name)
+ _RemoveInstance(self, feedback_fn, instance, self.ignore_remove_failures)
+
nodelist = self.cfg.GetNodeList()
nodelist.remove(dst_node.name)
if msg:
self.LogWarning("Could not remove older export for instance %s"
" on node %s: %s", iname, node, msg)
+
return fin_resu, dresults
" Domain Name.")
-class TagsLU(NoHooksLU):
+class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
"""Generic tags LU.
This is an abstract class which is the parent of all the other tags LUs.
def ExpandNames(self):
self.needed_locks = {}
if self.op.kind == constants.TAG_NODE:
- name = self.cfg.ExpandNodeName(self.op.name)
- if name is None:
- raise errors.OpPrereqError("Invalid node name (%s)" %
- (self.op.name,), errors.ECODE_NOENT)
- self.op.name = name
- self.needed_locks[locking.LEVEL_NODE] = name
+ self.op.name = _ExpandNodeName(self.cfg, self.op.name)
+ self.needed_locks[locking.LEVEL_NODE] = self.op.name
elif self.op.kind == constants.TAG_INSTANCE:
- name = self.cfg.ExpandInstanceName(self.op.name)
- if name is None:
- raise errors.OpPrereqError("Invalid instance name (%s)" %
- (self.op.name,), errors.ECODE_NOENT)
- self.op.name = name
- self.needed_locks[locking.LEVEL_INSTANCE] = name
+ self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
+ self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
def CheckPrereq(self):
"""Check prerequisites.
easy usage
"""
+ # pylint: disable-msg=R0902
+ # lots of instance attributes
_ALLO_KEYS = [
- "mem_size", "disks", "disk_template",
+ "name", "mem_size", "disks", "disk_template",
"os", "tags", "nics", "vcpus", "hypervisor",
]
_RELO_KEYS = [
- "relocate_from",
+ "name", "relocate_from",
+ ]
+ _EVAC_KEYS = [
+ "evac_nodes",
]
- def __init__(self, cfg, rpc, mode, name, **kwargs):
+ def __init__(self, cfg, rpc, mode, **kwargs):
self.cfg = cfg
self.rpc = rpc
# init buffer variables
self.in_text = self.out_text = self.in_data = self.out_data = None
# init all input fields so that pylint is happy
self.mode = mode
- self.name = name
self.mem_size = self.disks = self.disk_template = None
self.os = self.tags = self.nics = self.vcpus = None
self.hypervisor = None
self.relocate_from = None
+ self.name = None
+ self.evac_nodes = None
# computed fields
self.required_nodes = None
# init result fields
- self.success = self.info = self.nodes = None
+ self.success = self.info = self.result = None
if self.mode == constants.IALLOCATOR_MODE_ALLOC:
keyset = self._ALLO_KEYS
+ fn = self._AddNewInstance
elif self.mode == constants.IALLOCATOR_MODE_RELOC:
keyset = self._RELO_KEYS
+ fn = self._AddRelocateInstance
+ elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
+ keyset = self._EVAC_KEYS
+ fn = self._AddEvacuateNodes
else:
raise errors.ProgrammerError("Unknown mode '%s' passed to the"
" IAllocator" % self.mode)
raise errors.ProgrammerError("Invalid input parameter '%s' to"
" IAllocator" % key)
setattr(self, key, kwargs[key])
+
for key in keyset:
if key not in kwargs:
raise errors.ProgrammerError("Missing input parameter '%s' to"
" IAllocator" % key)
- self._BuildInputData()
+ self._BuildInputData(fn)
def _ComputeClusterData(self):
"""Compute the generic allocator input data.
hypervisor_name = self.hypervisor
elif self.mode == constants.IALLOCATOR_MODE_RELOC:
hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
+ elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
+ hypervisor_name = cluster_info.enabled_hypervisors[0]
node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
hypervisor_name)
done.
"""
- data = self.in_data
-
disk_space = _ComputeDiskSize(self.disk_template, self.disks)
if self.disk_template in constants.DTS_NET_MIRROR:
else:
self.required_nodes = 1
request = {
- "type": "allocate",
"name": self.name,
"disk_template": self.disk_template,
"tags": self.tags,
"nics": self.nics,
"required_nodes": self.required_nodes,
}
- data["request"] = request
+ return request
def _AddRelocateInstance(self):
"""Add relocate instance data to allocator structure.
disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
request = {
- "type": "relocate",
"name": self.name,
"disk_space_total": disk_space,
"required_nodes": self.required_nodes,
"relocate_from": self.relocate_from,
}
- self.in_data["request"] = request
+ return request
+
+ def _AddEvacuateNodes(self):
+ """Add evacuate nodes data to allocator structure.
- def _BuildInputData(self):
+ """
+ request = {
+ "evac_nodes": self.evac_nodes
+ }
+ return request
+
+ def _BuildInputData(self, fn):
"""Build input data structures.
"""
self._ComputeClusterData()
- if self.mode == constants.IALLOCATOR_MODE_ALLOC:
- self._AddNewInstance()
- else:
- self._AddRelocateInstance()
+ request = fn()
+ request["type"] = self.mode
+ self.in_data["request"] = request
self.in_text = serializer.Dump(self.in_data)
if not isinstance(rdict, dict):
raise errors.OpExecError("Can't parse iallocator results: not a dict")
- for key in "success", "info", "nodes":
+ # TODO: remove backwards compatiblity in later versions
+ if "nodes" in rdict and "result" not in rdict:
+ rdict["result"] = rdict["nodes"]
+ del rdict["nodes"]
+
+ for key in "success", "info", "result":
if key not in rdict:
raise errors.OpExecError("Can't parse iallocator results:"
" missing key '%s'" % key)
setattr(self, key, rdict[key])
- if not isinstance(rdict["nodes"], list):
- raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
+ if not isinstance(rdict["result"], list):
+ raise errors.OpExecError("Can't parse iallocator results: 'result' key"
" is not a list")
self.out_data = rdict
if not hasattr(self.op, "name"):
raise errors.OpPrereqError("Missing attribute 'name' on opcode input",
errors.ECODE_INVAL)
- fname = self.cfg.ExpandInstanceName(self.op.name)
- if fname is None:
- raise errors.OpPrereqError("Instance '%s' not found for relocation" %
- self.op.name, errors.ECODE_NOENT)
+ fname = _ExpandInstanceName(self.cfg, self.op.name)
self.op.name = fname
self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
+ elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
+ if not hasattr(self.op, "evac_nodes"):
+ raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
+ " opcode input", errors.ECODE_INVAL)
else:
raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
self.op.mode, errors.ECODE_INVAL)
vcpus=self.op.vcpus,
hypervisor=self.op.hypervisor,
)
- else:
+ elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
ial = IAllocator(self.cfg, self.rpc,
mode=self.op.mode,
name=self.op.name,
relocate_from=list(self.relocate_from),
)
+ elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
+ ial = IAllocator(self.cfg, self.rpc,
+ mode=self.op.mode,
+ evac_nodes=self.op.evac_nodes)
+ else:
+ raise errors.ProgrammerError("Uncatched mode %s in"
+ " LUTestAllocator.Exec", self.op.mode)
if self.op.direction == constants.IALLOCATOR_DIR_IN:
result = ial.in_text