@param nodes: list of node names or None for all nodes
@rtype: list
@return: the list of nodes, sorted
- @raise errors.OpProgrammerError: if the nodes parameter is wrong type
+ @raise errors.ProgrammerError: if the nodes parameter is wrong type
"""
if not isinstance(nodes, list):
raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
" non-empty list of nodes whose name is to be expanded.")
- wanted = []
- for name in nodes:
- node = _ExpandNodeName(lu.cfg, name)
- wanted.append(node)
-
+ wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes]
return utils.NiceSort(wanted)
try:
ntime_merged = utils.MergeTime(ntime)
except (ValueError, TypeError):
- _ErrorIf(test, self.ENODETIME, node, "Node returned invalid time")
+ _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
ntime_diff = abs(nvinfo_starttime - ntime_merged)
elif field == "drain_flag":
entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
elif field == "watcher_pause":
- return utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
+ entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
else:
raise errors.ParameterError(field)
values.append(entry)
# TODO: make the ip check more flexible and not depend on the name check
raise errors.OpPrereqError("Cannot do ip checks without a name check",
errors.ECODE_INVAL)
+ if (self.op.disk_template == constants.DT_FILE and
+ not constants.ENABLE_FILE_STORAGE):
+ raise errors.OpPrereqError("File storage disabled at configure time",
+ errors.ECODE_INVAL)
def ExpandNames(self):
"""ExpandNames for CreateInstance.
self.needed_locks[locking.LEVEL_NODE].append(src_node)
if not os.path.isabs(src_path):
self.op.src_path = src_path = \
- os.path.join(constants.EXPORT_DIR, src_path)
+ utils.PathJoin(constants.EXPORT_DIR, src_path)
# On import force_variant must be True, because if we forced it at
# initial install, our only chance when importing it back is that it
" iallocator '%s': %s" %
(self.op.iallocator, ial.info),
errors.ECODE_NORES)
- if len(ial.nodes) != ial.required_nodes:
+ if len(ial.result) != ial.required_nodes:
raise errors.OpPrereqError("iallocator '%s' returned invalid number"
" of nodes (%s), required %s" %
- (self.op.iallocator, len(ial.nodes),
+ (self.op.iallocator, len(ial.result),
ial.required_nodes), errors.ECODE_FAULT)
- self.op.pnode = ial.nodes[0]
+ self.op.pnode = ial.result[0]
self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
self.op.instance_name, self.op.iallocator,
- utils.CommaJoin(ial.nodes))
+ utils.CommaJoin(ial.result))
if ial.required_nodes == 2:
- self.op.snode = ial.nodes[1]
+ self.op.snode = ial.result[1]
def BuildHooksEnv(self):
"""Build hooks env.
if src_path in exp_list[node].payload:
found = True
self.op.src_node = src_node = node
- self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
- src_path)
+ self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
+ src_path)
break
if not found:
raise errors.OpPrereqError("No export found for relative path %s" %
if export_info.has_option(constants.INISECT_INS, option):
# FIXME: are the old os-es, disk sizes, etc. useful?
export_name = export_info.get(constants.INISECT_INS, option)
- image = os.path.join(src_path, export_name)
+ image = utils.PathJoin(src_path, export_name)
disk_images.append(image)
else:
disk_images.append(False)
string_file_storage_dir = self.op.file_storage_dir
# build the full file storage dir path
- file_storage_dir = os.path.normpath(os.path.join(
- self.cfg.GetFileStorageDir(),
- string_file_storage_dir, instance))
+ file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
+ string_file_storage_dir, instance)
disks = _GenerateDiskTemplate(self,
" %s" % (iallocator_name, ial.info),
errors.ECODE_NORES)
- if len(ial.nodes) != ial.required_nodes:
+ if len(ial.result) != ial.required_nodes:
raise errors.OpPrereqError("iallocator '%s' returned invalid number"
" of nodes (%s), required %s" %
(iallocator_name,
- len(ial.nodes), ial.required_nodes),
+ len(ial.result), ial.required_nodes),
errors.ECODE_FAULT)
- remote_node_name = ial.nodes[0]
+ remote_node_name = ial.result[0]
lu.LogInfo("Selected new secondary for instance '%s': %s",
instance_name, remote_node_name)
(self.op.name, self.op.node_name))
+class LUNodeEvacuationStrategy(NoHooksLU):
+ """Computes the node evacuation strategy.
+
+ """
+ _OP_REQP = ["nodes"]
+ REQ_BGL = False
+
+ def CheckArguments(self):
+ if not hasattr(self.op, "remote_node"):
+ self.op.remote_node = None
+ if not hasattr(self.op, "iallocator"):
+ self.op.iallocator = None
+ if self.op.remote_node is not None and self.op.iallocator is not None:
+ raise errors.OpPrereqError("Give either the iallocator or the new"
+ " secondary, not both", errors.ECODE_INVAL)
+
+ def ExpandNames(self):
+ self.op.nodes = _GetWantedNodes(self, self.op.nodes)
+ self.needed_locks = locks = {}
+ if self.op.remote_node is None:
+ locks[locking.LEVEL_NODE] = locking.ALL_SET
+ else:
+ self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
+ locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
+
+ def CheckPrereq(self):
+ pass
+
+ def Exec(self, feedback_fn):
+ if self.op.remote_node is not None:
+ instances = []
+ for node in self.op.nodes:
+ instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
+ result = []
+ for i in instances:
+ if i.primary_node == self.op.remote_node:
+ raise errors.OpPrereqError("Node %s is the primary node of"
+ " instance %s, cannot use it as"
+ " secondary" %
+ (self.op.remote_node, i.name),
+ errors.ECODE_INVAL)
+ result.append([i.name, self.op.remote_node])
+ else:
+ ial = IAllocator(self.cfg, self.rpc,
+ mode=constants.IALLOCATOR_MODE_MEVAC,
+ evac_nodes=self.op.nodes)
+ ial.Run(self.op.iallocator, validate=True)
+ if not ial.success:
+ raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
+ errors.ECODE_NORES)
+ result = ial.result
+ return result
+
+
class LUGrowDisk(LogicalUnit):
"""Grow a disk of an instance.
# pylint: disable-msg=R0902
# lots of instance attributes
_ALLO_KEYS = [
- "mem_size", "disks", "disk_template",
+ "name", "mem_size", "disks", "disk_template",
"os", "tags", "nics", "vcpus", "hypervisor",
]
_RELO_KEYS = [
- "relocate_from",
+ "name", "relocate_from",
+ ]
+ _EVAC_KEYS = [
+ "evac_nodes",
]
- def __init__(self, cfg, rpc, mode, name, **kwargs):
+ def __init__(self, cfg, rpc, mode, **kwargs):
self.cfg = cfg
self.rpc = rpc
# init buffer variables
self.in_text = self.out_text = self.in_data = self.out_data = None
# init all input fields so that pylint is happy
self.mode = mode
- self.name = name
self.mem_size = self.disks = self.disk_template = None
self.os = self.tags = self.nics = self.vcpus = None
self.hypervisor = None
self.relocate_from = None
+ self.name = None
+ self.evac_nodes = None
# computed fields
self.required_nodes = None
# init result fields
- self.success = self.info = self.nodes = None
+ self.success = self.info = self.result = None
if self.mode == constants.IALLOCATOR_MODE_ALLOC:
keyset = self._ALLO_KEYS
+ fn = self._AddNewInstance
elif self.mode == constants.IALLOCATOR_MODE_RELOC:
keyset = self._RELO_KEYS
+ fn = self._AddRelocateInstance
+ elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
+ keyset = self._EVAC_KEYS
+ fn = self._AddEvacuateNodes
else:
raise errors.ProgrammerError("Unknown mode '%s' passed to the"
" IAllocator" % self.mode)
raise errors.ProgrammerError("Invalid input parameter '%s' to"
" IAllocator" % key)
setattr(self, key, kwargs[key])
+
for key in keyset:
if key not in kwargs:
raise errors.ProgrammerError("Missing input parameter '%s' to"
" IAllocator" % key)
- self._BuildInputData()
+ self._BuildInputData(fn)
def _ComputeClusterData(self):
"""Compute the generic allocator input data.
hypervisor_name = self.hypervisor
elif self.mode == constants.IALLOCATOR_MODE_RELOC:
hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
+ elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
+ hypervisor_name = cluster_info.enabled_hypervisors[0]
node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
hypervisor_name)
done.
"""
- data = self.in_data
-
disk_space = _ComputeDiskSize(self.disk_template, self.disks)
if self.disk_template in constants.DTS_NET_MIRROR:
else:
self.required_nodes = 1
request = {
- "type": "allocate",
"name": self.name,
"disk_template": self.disk_template,
"tags": self.tags,
"nics": self.nics,
"required_nodes": self.required_nodes,
}
- data["request"] = request
+ return request
def _AddRelocateInstance(self):
"""Add relocate instance data to allocator structure.
disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
request = {
- "type": "relocate",
"name": self.name,
"disk_space_total": disk_space,
"required_nodes": self.required_nodes,
"relocate_from": self.relocate_from,
}
- self.in_data["request"] = request
+ return request
- def _BuildInputData(self):
+ def _AddEvacuateNodes(self):
+ """Add evacuate nodes data to allocator structure.
+
+ """
+ request = {
+ "evac_nodes": self.evac_nodes
+ }
+ return request
+
+ def _BuildInputData(self, fn):
"""Build input data structures.
"""
self._ComputeClusterData()
- if self.mode == constants.IALLOCATOR_MODE_ALLOC:
- self._AddNewInstance()
- else:
- self._AddRelocateInstance()
+ request = fn()
+ request["type"] = self.mode
+ self.in_data["request"] = request
self.in_text = serializer.Dump(self.in_data)
if not isinstance(rdict, dict):
raise errors.OpExecError("Can't parse iallocator results: not a dict")
- for key in "success", "info", "nodes":
+ # TODO: remove backwards compatiblity in later versions
+ if "nodes" in rdict and "result" not in rdict:
+ rdict["result"] = rdict["nodes"]
+ del rdict["nodes"]
+
+ for key in "success", "info", "result":
if key not in rdict:
raise errors.OpExecError("Can't parse iallocator results:"
" missing key '%s'" % key)
setattr(self, key, rdict[key])
- if not isinstance(rdict["nodes"], list):
- raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
+ if not isinstance(rdict["result"], list):
+ raise errors.OpExecError("Can't parse iallocator results: 'result' key"
" is not a list")
self.out_data = rdict
fname = _ExpandInstanceName(self.cfg, self.op.name)
self.op.name = fname
self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
+ elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
+ if not hasattr(self.op, "evac_nodes"):
+ raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
+ " opcode input", errors.ECODE_INVAL)
else:
raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
self.op.mode, errors.ECODE_INVAL)
vcpus=self.op.vcpus,
hypervisor=self.op.hypervisor,
)
- else:
+ elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
ial = IAllocator(self.cfg, self.rpc,
mode=self.op.mode,
name=self.op.name,
relocate_from=list(self.relocate_from),
)
+ elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
+ ial = IAllocator(self.cfg, self.rpc,
+ mode=self.op.mode,
+ evac_nodes=self.op.evac_nodes)
+ else:
+ raise errors.ProgrammerError("Uncatched mode %s in"
+ " LUTestAllocator.Exec", self.op.mode)
if self.op.direction == constants.IALLOCATOR_DIR_IN:
result = ial.in_text