root / lib / masterd / iallocator.py @ 653bc0f1
History | View | Annotate | Download (27.1 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Module implementing the iallocator code."""
|
23 |
|
24 |
from ganeti import compat |
25 |
from ganeti import constants |
26 |
from ganeti import errors |
27 |
from ganeti import ht |
28 |
from ganeti import outils |
29 |
from ganeti import opcodes |
30 |
import ganeti.rpc.node as rpc |
31 |
from ganeti import serializer |
32 |
from ganeti import utils |
33 |
|
34 |
import ganeti.masterd.instance as gmi |
35 |
|
36 |
|
37 |
_STRING_LIST = ht.TListOf(ht.TString) |
38 |
_JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, { |
39 |
# pylint: disable=E1101
|
40 |
# Class '...' has no 'OP_ID' member
|
41 |
"OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
|
42 |
opcodes.OpInstanceMigrate.OP_ID, |
43 |
opcodes.OpInstanceReplaceDisks.OP_ID]), |
44 |
}))) |
45 |
|
46 |
_NEVAC_MOVED = \ |
47 |
ht.TListOf(ht.TAnd(ht.TIsLength(3),
|
48 |
ht.TItems([ht.TNonEmptyString, |
49 |
ht.TNonEmptyString, |
50 |
ht.TListOf(ht.TNonEmptyString), |
51 |
]))) |
52 |
_NEVAC_FAILED = \ |
53 |
ht.TListOf(ht.TAnd(ht.TIsLength(2),
|
54 |
ht.TItems([ht.TNonEmptyString, |
55 |
ht.TMaybeString, |
56 |
]))) |
57 |
_NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
|
58 |
ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST])) |
59 |
|
60 |
_INST_NAME = ("name", ht.TNonEmptyString)
|
61 |
_INST_UUID = ("inst_uuid", ht.TNonEmptyString)
|
62 |
|
63 |
|
64 |
class _AutoReqParam(outils.AutoSlots): |
65 |
"""Meta class for request definitions.
|
66 |
|
67 |
"""
|
68 |
@classmethod
|
69 |
def _GetSlots(mcs, attrs): |
70 |
"""Extract the slots out of REQ_PARAMS.
|
71 |
|
72 |
"""
|
73 |
params = attrs.setdefault("REQ_PARAMS", [])
|
74 |
return [slot for (slot, _) in params] |
75 |
|
76 |
|
77 |
class IARequestBase(outils.ValidatedSlots): |
78 |
"""A generic IAllocator request object.
|
79 |
|
80 |
"""
|
81 |
__metaclass__ = _AutoReqParam |
82 |
|
83 |
MODE = NotImplemented
|
84 |
REQ_PARAMS = [] |
85 |
REQ_RESULT = NotImplemented
|
86 |
|
87 |
def __init__(self, **kwargs): |
88 |
"""Constructor for IARequestBase.
|
89 |
|
90 |
The constructor takes only keyword arguments and will set
|
91 |
attributes on this object based on the passed arguments. As such,
|
92 |
it means that you should not pass arguments which are not in the
|
93 |
REQ_PARAMS attribute for this class.
|
94 |
|
95 |
"""
|
96 |
outils.ValidatedSlots.__init__(self, **kwargs)
|
97 |
|
98 |
self.Validate()
|
99 |
|
100 |
def Validate(self): |
101 |
"""Validates all parameters of the request.
|
102 |
|
103 |
"""
|
104 |
assert self.MODE in constants.VALID_IALLOCATOR_MODES |
105 |
|
106 |
for (param, validator) in self.REQ_PARAMS: |
107 |
if not hasattr(self, param): |
108 |
raise errors.OpPrereqError("Request is missing '%s' parameter" % param, |
109 |
errors.ECODE_INVAL) |
110 |
|
111 |
value = getattr(self, param) |
112 |
if not validator(value): |
113 |
raise errors.OpPrereqError(("Request parameter '%s' has invalid" |
114 |
" type %s/value %s") %
|
115 |
(param, type(value), value),
|
116 |
errors.ECODE_INVAL) |
117 |
|
118 |
def GetRequest(self, cfg): |
119 |
"""Gets the request data dict.
|
120 |
|
121 |
@param cfg: The configuration instance
|
122 |
|
123 |
"""
|
124 |
raise NotImplementedError |
125 |
|
126 |
def ValidateResult(self, ia, result): |
127 |
"""Validates the result of an request.
|
128 |
|
129 |
@param ia: The IAllocator instance
|
130 |
@param result: The IAllocator run result
|
131 |
@raises ResultValidationError: If validation fails
|
132 |
|
133 |
"""
|
134 |
if ia.success and not self.REQ_RESULT(result): |
135 |
raise errors.ResultValidationError("iallocator returned invalid result," |
136 |
" expected %s, got %s" %
|
137 |
(self.REQ_RESULT, result))
|
138 |
|
139 |
|
140 |
class IAReqInstanceAlloc(IARequestBase): |
141 |
"""An instance allocation request.
|
142 |
|
143 |
"""
|
144 |
# pylint: disable=E1101
|
145 |
MODE = constants.IALLOCATOR_MODE_ALLOC |
146 |
REQ_PARAMS = [ |
147 |
_INST_NAME, |
148 |
("memory", ht.TNonNegativeInt),
|
149 |
("spindle_use", ht.TNonNegativeInt),
|
150 |
("disks", ht.TListOf(ht.TDict)),
|
151 |
("disk_template", ht.TString),
|
152 |
("os", ht.TString),
|
153 |
("tags", _STRING_LIST),
|
154 |
("nics", ht.TListOf(ht.TDict)),
|
155 |
("vcpus", ht.TInt),
|
156 |
("hypervisor", ht.TString),
|
157 |
("node_whitelist", ht.TMaybeListOf(ht.TNonEmptyString)),
|
158 |
] |
159 |
REQ_RESULT = ht.TList |
160 |
|
161 |
def RequiredNodes(self): |
162 |
"""Calculates the required nodes based on the disk_template.
|
163 |
|
164 |
"""
|
165 |
if self.disk_template in constants.DTS_INT_MIRROR: |
166 |
return 2 |
167 |
else:
|
168 |
return 1 |
169 |
|
170 |
def GetRequest(self, cfg): |
171 |
"""Requests a new instance.
|
172 |
|
173 |
The checks for the completeness of the opcode must have already been
|
174 |
done.
|
175 |
|
176 |
"""
|
177 |
disk_space = gmi.ComputeDiskSize(self.disk_template, self.disks) |
178 |
|
179 |
return {
|
180 |
"name": self.name, |
181 |
"disk_template": self.disk_template, |
182 |
"tags": self.tags, |
183 |
"os": self.os, |
184 |
"vcpus": self.vcpus, |
185 |
"memory": self.memory, |
186 |
"spindle_use": self.spindle_use, |
187 |
"disks": self.disks, |
188 |
"disk_space_total": disk_space,
|
189 |
"nics": self.nics, |
190 |
"required_nodes": self.RequiredNodes(), |
191 |
"hypervisor": self.hypervisor, |
192 |
} |
193 |
|
194 |
def ValidateResult(self, ia, result): |
195 |
"""Validates an single instance allocation request.
|
196 |
|
197 |
"""
|
198 |
IARequestBase.ValidateResult(self, ia, result)
|
199 |
|
200 |
if ia.success and len(result) != self.RequiredNodes(): |
201 |
raise errors.ResultValidationError("iallocator returned invalid number" |
202 |
" of nodes (%s), required %s" %
|
203 |
(len(result), self.RequiredNodes())) |
204 |
|
205 |
|
206 |
class IAReqMultiInstanceAlloc(IARequestBase): |
207 |
"""An multi instance allocation request.
|
208 |
|
209 |
"""
|
210 |
# pylint: disable=E1101
|
211 |
MODE = constants.IALLOCATOR_MODE_MULTI_ALLOC |
212 |
REQ_PARAMS = [ |
213 |
("instances", ht.TListOf(ht.TInstanceOf(IAReqInstanceAlloc))),
|
214 |
] |
215 |
_MASUCCESS = \ |
216 |
ht.TListOf(ht.TAnd(ht.TIsLength(2),
|
217 |
ht.TItems([ht.TNonEmptyString, |
218 |
ht.TListOf(ht.TNonEmptyString), |
219 |
]))) |
220 |
_MAFAILED = ht.TListOf(ht.TNonEmptyString) |
221 |
REQ_RESULT = ht.TAnd(ht.TList, ht.TIsLength(2),
|
222 |
ht.TItems([_MASUCCESS, _MAFAILED])) |
223 |
|
224 |
def GetRequest(self, cfg): |
225 |
return {
|
226 |
"instances": [iareq.GetRequest(cfg) for iareq in self.instances], |
227 |
} |
228 |
|
229 |
|
230 |
class IAReqRelocate(IARequestBase): |
231 |
"""A relocation request.
|
232 |
|
233 |
"""
|
234 |
# pylint: disable=E1101
|
235 |
MODE = constants.IALLOCATOR_MODE_RELOC |
236 |
REQ_PARAMS = [ |
237 |
_INST_UUID, |
238 |
("relocate_from_node_uuids", _STRING_LIST),
|
239 |
] |
240 |
REQ_RESULT = ht.TList |
241 |
|
242 |
def GetRequest(self, cfg): |
243 |
"""Request an relocation of an instance
|
244 |
|
245 |
The checks for the completeness of the opcode must have already been
|
246 |
done.
|
247 |
|
248 |
"""
|
249 |
instance = cfg.GetInstanceInfo(self.inst_uuid)
|
250 |
if instance is None: |
251 |
raise errors.ProgrammerError("Unknown instance '%s' passed to" |
252 |
" IAllocator" % self.inst_uuid) |
253 |
|
254 |
if instance.disk_template not in constants.DTS_MIRRORED: |
255 |
raise errors.OpPrereqError("Can't relocate non-mirrored instances", |
256 |
errors.ECODE_INVAL) |
257 |
|
258 |
if (instance.disk_template in constants.DTS_INT_MIRROR and |
259 |
len(instance.secondary_nodes) != 1): |
260 |
raise errors.OpPrereqError("Instance has not exactly one secondary node", |
261 |
errors.ECODE_STATE) |
262 |
|
263 |
disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks] |
264 |
disk_space = gmi.ComputeDiskSize(instance.disk_template, disk_sizes) |
265 |
|
266 |
return {
|
267 |
"name": instance.name,
|
268 |
"disk_space_total": disk_space,
|
269 |
"required_nodes": 1, |
270 |
"relocate_from": cfg.GetNodeNames(self.relocate_from_node_uuids), |
271 |
} |
272 |
|
273 |
def ValidateResult(self, ia, result): |
274 |
"""Validates the result of an relocation request.
|
275 |
|
276 |
"""
|
277 |
IARequestBase.ValidateResult(self, ia, result)
|
278 |
|
279 |
node2group = dict((name, ndata["group"]) |
280 |
for (name, ndata) in ia.in_data["nodes"].items()) |
281 |
|
282 |
fn = compat.partial(self._NodesToGroups, node2group,
|
283 |
ia.in_data["nodegroups"])
|
284 |
|
285 |
instance = ia.cfg.GetInstanceInfo(self.inst_uuid)
|
286 |
request_groups = fn(ia.cfg.GetNodeNames(self.relocate_from_node_uuids) +
|
287 |
ia.cfg.GetNodeNames([instance.primary_node])) |
288 |
result_groups = fn(result + ia.cfg.GetNodeNames([instance.primary_node])) |
289 |
|
290 |
if ia.success and not set(result_groups).issubset(request_groups): |
291 |
raise errors.ResultValidationError("Groups of nodes returned by" |
292 |
" iallocator (%s) differ from original"
|
293 |
" groups (%s)" %
|
294 |
(utils.CommaJoin(result_groups), |
295 |
utils.CommaJoin(request_groups))) |
296 |
|
297 |
@staticmethod
|
298 |
def _NodesToGroups(node2group, groups, nodes): |
299 |
"""Returns a list of unique group names for a list of nodes.
|
300 |
|
301 |
@type node2group: dict
|
302 |
@param node2group: Map from node name to group UUID
|
303 |
@type groups: dict
|
304 |
@param groups: Group information
|
305 |
@type nodes: list
|
306 |
@param nodes: Node names
|
307 |
|
308 |
"""
|
309 |
result = set()
|
310 |
|
311 |
for node in nodes: |
312 |
try:
|
313 |
group_uuid = node2group[node] |
314 |
except KeyError: |
315 |
# Ignore unknown node
|
316 |
pass
|
317 |
else:
|
318 |
try:
|
319 |
group = groups[group_uuid] |
320 |
except KeyError: |
321 |
# Can't find group, let's use UUID
|
322 |
group_name = group_uuid |
323 |
else:
|
324 |
group_name = group["name"]
|
325 |
|
326 |
result.add(group_name) |
327 |
|
328 |
return sorted(result) |
329 |
|
330 |
|
331 |
class IAReqNodeEvac(IARequestBase): |
332 |
"""A node evacuation request.
|
333 |
|
334 |
"""
|
335 |
# pylint: disable=E1101
|
336 |
MODE = constants.IALLOCATOR_MODE_NODE_EVAC |
337 |
REQ_PARAMS = [ |
338 |
("instances", _STRING_LIST),
|
339 |
("evac_mode", ht.TEvacMode),
|
340 |
] |
341 |
REQ_RESULT = _NEVAC_RESULT |
342 |
|
343 |
def GetRequest(self, cfg): |
344 |
"""Get data for node-evacuate requests.
|
345 |
|
346 |
"""
|
347 |
return {
|
348 |
"instances": self.instances, |
349 |
"evac_mode": self.evac_mode, |
350 |
} |
351 |
|
352 |
|
353 |
class IAReqGroupChange(IARequestBase): |
354 |
"""A group change request.
|
355 |
|
356 |
"""
|
357 |
# pylint: disable=E1101
|
358 |
MODE = constants.IALLOCATOR_MODE_CHG_GROUP |
359 |
REQ_PARAMS = [ |
360 |
("instances", _STRING_LIST),
|
361 |
("target_groups", _STRING_LIST),
|
362 |
] |
363 |
REQ_RESULT = _NEVAC_RESULT |
364 |
|
365 |
def GetRequest(self, cfg): |
366 |
"""Get data for node-evacuate requests.
|
367 |
|
368 |
"""
|
369 |
return {
|
370 |
"instances": self.instances, |
371 |
"target_groups": self.target_groups, |
372 |
} |
373 |
|
374 |
|
375 |
class IAllocator(object): |
376 |
"""IAllocator framework.
|
377 |
|
378 |
An IAllocator instance has three sets of attributes:
|
379 |
- cfg that is needed to query the cluster
|
380 |
- input data (all members of the _KEYS class attribute are required)
|
381 |
- four buffer attributes (in|out_data|text), that represent the
|
382 |
input (to the external script) in text and data structure format,
|
383 |
and the output from it, again in two formats
|
384 |
- the result variables from the script (success, info, nodes) for
|
385 |
easy usage
|
386 |
|
387 |
"""
|
388 |
# pylint: disable=R0902
|
389 |
# lots of instance attributes
|
390 |
|
391 |
def __init__(self, cfg, rpc_runner, req): |
392 |
self.cfg = cfg
|
393 |
self.rpc = rpc_runner
|
394 |
self.req = req
|
395 |
# init buffer variables
|
396 |
self.in_text = self.out_text = self.in_data = self.out_data = None |
397 |
# init result fields
|
398 |
self.success = self.info = self.result = None |
399 |
|
400 |
self._BuildInputData(req)
|
401 |
|
402 |
def _ComputeClusterDataNodeInfo(self, disk_templates, node_list, |
403 |
cluster_info, hypervisor_name): |
404 |
"""Prepare and execute node info call.
|
405 |
|
406 |
@type disk_templates: list of string
|
407 |
@param disk_templates: the disk templates of the instances to be allocated
|
408 |
@type node_list: list of strings
|
409 |
@param node_list: list of nodes' UUIDs
|
410 |
@type cluster_info: L{objects.Cluster}
|
411 |
@param cluster_info: the cluster's information from the config
|
412 |
@type hypervisor_name: string
|
413 |
@param hypervisor_name: the hypervisor name
|
414 |
@rtype: same as the result of the node info RPC call
|
415 |
@return: the result of the node info RPC call
|
416 |
|
417 |
"""
|
418 |
storage_units_raw = utils.storage.GetStorageUnits(self.cfg, disk_templates)
|
419 |
storage_units = rpc.PrepareStorageUnitsForNodes(self.cfg, storage_units_raw,
|
420 |
node_list) |
421 |
hvspecs = [(hypervisor_name, cluster_info.hvparams[hypervisor_name])] |
422 |
return self.rpc.call_node_info(node_list, storage_units, hvspecs) |
423 |
|
424 |
def _ComputeClusterData(self, disk_template=None): |
425 |
"""Compute the generic allocator input data.
|
426 |
|
427 |
@type disk_template: list of string
|
428 |
@param disk_template: the disk templates of the instances to be allocated
|
429 |
|
430 |
"""
|
431 |
cluster_info = self.cfg.GetClusterInfo()
|
432 |
# cluster data
|
433 |
data = { |
434 |
"version": constants.IALLOCATOR_VERSION,
|
435 |
"cluster_name": self.cfg.GetClusterName(), |
436 |
"cluster_tags": list(cluster_info.GetTags()), |
437 |
"enabled_hypervisors": list(cluster_info.enabled_hypervisors), |
438 |
"ipolicy": cluster_info.ipolicy,
|
439 |
} |
440 |
ninfo = self.cfg.GetAllNodesInfo()
|
441 |
iinfo = self.cfg.GetAllInstancesInfo().values()
|
442 |
i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo] |
443 |
|
444 |
# node data
|
445 |
node_list = [n.uuid for n in ninfo.values() if n.vm_capable] |
446 |
|
447 |
if isinstance(self.req, IAReqInstanceAlloc): |
448 |
hypervisor_name = self.req.hypervisor
|
449 |
node_whitelist = self.req.node_whitelist
|
450 |
elif isinstance(self.req, IAReqRelocate): |
451 |
hypervisor_name = self.cfg.GetInstanceInfo(self.req.inst_uuid).hypervisor |
452 |
node_whitelist = None
|
453 |
else:
|
454 |
hypervisor_name = cluster_info.primary_hypervisor |
455 |
node_whitelist = None
|
456 |
|
457 |
if not disk_template: |
458 |
disk_template = cluster_info.enabled_disk_templates[0]
|
459 |
|
460 |
node_data = self._ComputeClusterDataNodeInfo([disk_template], node_list,
|
461 |
cluster_info, hypervisor_name) |
462 |
|
463 |
node_iinfo = \ |
464 |
self.rpc.call_all_instances_info(node_list,
|
465 |
cluster_info.enabled_hypervisors, |
466 |
cluster_info.hvparams) |
467 |
|
468 |
data["nodegroups"] = self._ComputeNodeGroupData(self.cfg) |
469 |
|
470 |
config_ndata = self._ComputeBasicNodeData(self.cfg, ninfo, node_whitelist) |
471 |
data["nodes"] = self._ComputeDynamicNodeData( |
472 |
ninfo, node_data, node_iinfo, i_list, config_ndata, disk_template) |
473 |
assert len(data["nodes"]) == len(ninfo), \ |
474 |
"Incomplete node data computed"
|
475 |
|
476 |
data["instances"] = self._ComputeInstanceData(self.cfg, cluster_info, |
477 |
i_list) |
478 |
|
479 |
self.in_data = data
|
480 |
|
481 |
@staticmethod
|
482 |
def _ComputeNodeGroupData(cfg): |
483 |
"""Compute node groups data.
|
484 |
|
485 |
"""
|
486 |
cluster = cfg.GetClusterInfo() |
487 |
ng = dict((guuid, {
|
488 |
"name": gdata.name,
|
489 |
"alloc_policy": gdata.alloc_policy,
|
490 |
"networks": [net_uuid for net_uuid, _ in gdata.networks.items()], |
491 |
"ipolicy": gmi.CalculateGroupIPolicy(cluster, gdata),
|
492 |
"tags": list(gdata.GetTags()), |
493 |
}) |
494 |
for guuid, gdata in cfg.GetAllNodeGroupsInfo().items()) |
495 |
|
496 |
return ng
|
497 |
|
498 |
@staticmethod
|
499 |
def _ComputeBasicNodeData(cfg, node_cfg, node_whitelist): |
500 |
"""Compute global node data.
|
501 |
|
502 |
@rtype: dict
|
503 |
@returns: a dict of name: (node dict, node config)
|
504 |
|
505 |
"""
|
506 |
# fill in static (config-based) values
|
507 |
node_results = dict((ninfo.name, {
|
508 |
"tags": list(ninfo.GetTags()), |
509 |
"primary_ip": ninfo.primary_ip,
|
510 |
"secondary_ip": ninfo.secondary_ip,
|
511 |
"offline": (ninfo.offline or |
512 |
not (node_whitelist is None or |
513 |
ninfo.name in node_whitelist)),
|
514 |
"drained": ninfo.drained,
|
515 |
"master_candidate": ninfo.master_candidate,
|
516 |
"group": ninfo.group,
|
517 |
"master_capable": ninfo.master_capable,
|
518 |
"vm_capable": ninfo.vm_capable,
|
519 |
"ndparams": cfg.GetNdParams(ninfo),
|
520 |
}) |
521 |
for ninfo in node_cfg.values()) |
522 |
|
523 |
return node_results
|
524 |
|
525 |
@staticmethod
|
526 |
def _GetAttributeFromHypervisorNodeData(hv_info, node_name, attr): |
527 |
"""Extract an attribute from the hypervisor's node information.
|
528 |
|
529 |
This is a helper function to extract data from the hypervisor's information
|
530 |
about the node, as part of the result of a node_info query.
|
531 |
|
532 |
@type hv_info: dict of strings
|
533 |
@param hv_info: dictionary of node information from the hypervisor
|
534 |
@type node_name: string
|
535 |
@param node_name: name of the node
|
536 |
@type attr: string
|
537 |
@param attr: key of the attribute in the hv_info dictionary
|
538 |
@rtype: integer
|
539 |
@return: the value of the attribute
|
540 |
@raises errors.OpExecError: if key not in dictionary or value not
|
541 |
integer
|
542 |
|
543 |
"""
|
544 |
if attr not in hv_info: |
545 |
raise errors.OpExecError("Node '%s' didn't return attribute" |
546 |
" '%s'" % (node_name, attr))
|
547 |
value = hv_info[attr] |
548 |
if not isinstance(value, int): |
549 |
raise errors.OpExecError("Node '%s' returned invalid value" |
550 |
" for '%s': %s" %
|
551 |
(node_name, attr, value)) |
552 |
return value
|
553 |
|
554 |
@staticmethod
|
555 |
def _ComputeStorageDataFromSpaceInfoByTemplate( |
556 |
space_info, node_name, disk_template): |
557 |
"""Extract storage data from node info.
|
558 |
|
559 |
@type space_info: see result of the RPC call node info
|
560 |
@param space_info: the storage reporting part of the result of the RPC call
|
561 |
node info
|
562 |
@type node_name: string
|
563 |
@param node_name: the node's name
|
564 |
@type disk_template: string
|
565 |
@param disk_template: the disk template to report space for
|
566 |
@rtype: 4-tuple of integers
|
567 |
@return: tuple of storage info (total_disk, free_disk, total_spindles,
|
568 |
free_spindles)
|
569 |
|
570 |
"""
|
571 |
storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template] |
572 |
if storage_type not in constants.STS_REPORT: |
573 |
total_disk = total_spindles = 0
|
574 |
free_disk = free_spindles = 0
|
575 |
else:
|
576 |
template_space_info = utils.storage.LookupSpaceInfoByDiskTemplate( |
577 |
space_info, disk_template) |
578 |
if not template_space_info: |
579 |
raise errors.OpExecError("Node '%s' didn't return space info for disk" |
580 |
"template '%s'" % (node_name, disk_template))
|
581 |
total_disk = template_space_info["storage_size"]
|
582 |
free_disk = template_space_info["storage_free"]
|
583 |
|
584 |
total_spindles = 0
|
585 |
free_spindles = 0
|
586 |
if disk_template in constants.DTS_LVM: |
587 |
lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType( |
588 |
space_info, constants.ST_LVM_PV) |
589 |
if lvm_pv_info:
|
590 |
total_spindles = lvm_pv_info["storage_size"]
|
591 |
free_spindles = lvm_pv_info["storage_free"]
|
592 |
return (total_disk, free_disk, total_spindles, free_spindles)
|
593 |
|
594 |
@staticmethod
|
595 |
def _ComputeStorageDataFromSpaceInfo(space_info, node_name, has_lvm): |
596 |
"""Extract storage data from node info.
|
597 |
|
598 |
@type space_info: see result of the RPC call node info
|
599 |
@param space_info: the storage reporting part of the result of the RPC call
|
600 |
node info
|
601 |
@type node_name: string
|
602 |
@param node_name: the node's name
|
603 |
@type has_lvm: boolean
|
604 |
@param has_lvm: whether or not LVM storage information is requested
|
605 |
@rtype: 4-tuple of integers
|
606 |
@return: tuple of storage info (total_disk, free_disk, total_spindles,
|
607 |
free_spindles)
|
608 |
|
609 |
"""
|
610 |
# TODO: replace this with proper storage reporting
|
611 |
if has_lvm:
|
612 |
lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType( |
613 |
space_info, constants.ST_LVM_VG) |
614 |
if not lvm_vg_info: |
615 |
raise errors.OpExecError("Node '%s' didn't return LVM vg space info." |
616 |
% (node_name)) |
617 |
total_disk = lvm_vg_info["storage_size"]
|
618 |
free_disk = lvm_vg_info["storage_free"]
|
619 |
lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType( |
620 |
space_info, constants.ST_LVM_PV) |
621 |
if not lvm_pv_info: |
622 |
raise errors.OpExecError("Node '%s' didn't return LVM pv space info." |
623 |
% (node_name)) |
624 |
total_spindles = lvm_pv_info["storage_size"]
|
625 |
free_spindles = lvm_pv_info["storage_free"]
|
626 |
else:
|
627 |
# we didn't even ask the node for VG status, so use zeros
|
628 |
total_disk = free_disk = 0
|
629 |
total_spindles = free_spindles = 0
|
630 |
return (total_disk, free_disk, total_spindles, free_spindles)
|
631 |
|
632 |
@staticmethod
|
633 |
def _ComputeInstanceMemory(instance_list, node_instances_info, node_uuid, |
634 |
input_mem_free): |
635 |
"""Compute memory used by primary instances.
|
636 |
|
637 |
@rtype: tuple (int, int, int)
|
638 |
@returns: A tuple of three integers: 1. the sum of memory used by primary
|
639 |
instances on the node (including the ones that are currently down), 2.
|
640 |
the sum of memory used by primary instances of the node that are up, 3.
|
641 |
the amount of memory that is free on the node considering the current
|
642 |
usage of the instances.
|
643 |
|
644 |
"""
|
645 |
i_p_mem = i_p_up_mem = 0
|
646 |
mem_free = input_mem_free |
647 |
for iinfo, beinfo in instance_list: |
648 |
if iinfo.primary_node == node_uuid:
|
649 |
i_p_mem += beinfo[constants.BE_MAXMEM] |
650 |
if iinfo.name not in node_instances_info[node_uuid].payload: |
651 |
i_used_mem = 0
|
652 |
else:
|
653 |
i_used_mem = int(node_instances_info[node_uuid]
|
654 |
.payload[iinfo.name]["memory"])
|
655 |
i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem |
656 |
mem_free -= max(0, i_mem_diff) |
657 |
|
658 |
if iinfo.admin_state == constants.ADMINST_UP:
|
659 |
i_p_up_mem += beinfo[constants.BE_MAXMEM] |
660 |
return (i_p_mem, i_p_up_mem, mem_free)
|
661 |
|
662 |
def _ComputeDynamicNodeData(self, node_cfg, node_data, node_iinfo, i_list, |
663 |
node_results, disk_template): |
664 |
"""Compute global node data.
|
665 |
|
666 |
@param node_results: the basic node structures as filled from the config
|
667 |
|
668 |
"""
|
669 |
#TODO(dynmem): compute the right data on MAX and MIN memory
|
670 |
# make a copy of the current dict
|
671 |
node_results = dict(node_results)
|
672 |
for nuuid, nresult in node_data.items(): |
673 |
ninfo = node_cfg[nuuid] |
674 |
assert ninfo.name in node_results, "Missing basic data for node %s" % \ |
675 |
ninfo.name |
676 |
|
677 |
if not ninfo.offline: |
678 |
nresult.Raise("Can't get data for node %s" % ninfo.name)
|
679 |
node_iinfo[nuuid].Raise("Can't get node instance info from node %s" %
|
680 |
ninfo.name) |
681 |
(_, space_info, (hv_info, )) = nresult.payload |
682 |
|
683 |
mem_free = self._GetAttributeFromHypervisorNodeData(hv_info, ninfo.name,
|
684 |
"memory_free")
|
685 |
|
686 |
(i_p_mem, i_p_up_mem, mem_free) = self._ComputeInstanceMemory(
|
687 |
i_list, node_iinfo, nuuid, mem_free) |
688 |
(total_disk, free_disk, total_spindles, free_spindles) = \ |
689 |
self._ComputeStorageDataFromSpaceInfoByTemplate(
|
690 |
space_info, ninfo.name, disk_template) |
691 |
|
692 |
# compute memory used by instances
|
693 |
pnr_dyn = { |
694 |
"total_memory": self._GetAttributeFromHypervisorNodeData( |
695 |
hv_info, ninfo.name, "memory_total"),
|
696 |
"reserved_memory": self._GetAttributeFromHypervisorNodeData( |
697 |
hv_info, ninfo.name, "memory_dom0"),
|
698 |
"free_memory": mem_free,
|
699 |
"total_disk": total_disk,
|
700 |
"free_disk": free_disk,
|
701 |
"total_spindles": total_spindles,
|
702 |
"free_spindles": free_spindles,
|
703 |
"total_cpus": self._GetAttributeFromHypervisorNodeData( |
704 |
hv_info, ninfo.name, "cpu_total"),
|
705 |
"reserved_cpus": self._GetAttributeFromHypervisorNodeData( |
706 |
hv_info, ninfo.name, "cpu_dom0"),
|
707 |
"i_pri_memory": i_p_mem,
|
708 |
"i_pri_up_memory": i_p_up_mem,
|
709 |
} |
710 |
pnr_dyn.update(node_results[ninfo.name]) |
711 |
node_results[ninfo.name] = pnr_dyn |
712 |
|
713 |
return node_results
|
714 |
|
715 |
@staticmethod
|
716 |
def _ComputeInstanceData(cfg, cluster_info, i_list): |
717 |
"""Compute global instance data.
|
718 |
|
719 |
"""
|
720 |
instance_data = {} |
721 |
for iinfo, beinfo in i_list: |
722 |
nic_data = [] |
723 |
for nic in iinfo.nics: |
724 |
filled_params = cluster_info.SimpleFillNIC(nic.nicparams) |
725 |
nic_dict = { |
726 |
"mac": nic.mac,
|
727 |
"ip": nic.ip,
|
728 |
"mode": filled_params[constants.NIC_MODE],
|
729 |
"link": filled_params[constants.NIC_LINK],
|
730 |
} |
731 |
if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
|
732 |
nic_dict["bridge"] = filled_params[constants.NIC_LINK]
|
733 |
nic_data.append(nic_dict) |
734 |
pir = { |
735 |
"tags": list(iinfo.GetTags()), |
736 |
"admin_state": iinfo.admin_state,
|
737 |
"vcpus": beinfo[constants.BE_VCPUS],
|
738 |
"memory": beinfo[constants.BE_MAXMEM],
|
739 |
"spindle_use": beinfo[constants.BE_SPINDLE_USE],
|
740 |
"os": iinfo.os,
|
741 |
"nodes": [cfg.GetNodeName(iinfo.primary_node)] +
|
742 |
cfg.GetNodeNames(iinfo.secondary_nodes), |
743 |
"nics": nic_data,
|
744 |
"disks": [{constants.IDISK_SIZE: dsk.size,
|
745 |
constants.IDISK_MODE: dsk.mode, |
746 |
constants.IDISK_SPINDLES: dsk.spindles} |
747 |
for dsk in iinfo.disks], |
748 |
"disk_template": iinfo.disk_template,
|
749 |
"disks_active": iinfo.disks_active,
|
750 |
"hypervisor": iinfo.hypervisor,
|
751 |
} |
752 |
pir["disk_space_total"] = gmi.ComputeDiskSize(iinfo.disk_template,
|
753 |
pir["disks"])
|
754 |
instance_data[iinfo.name] = pir |
755 |
|
756 |
return instance_data
|
757 |
|
758 |
def _BuildInputData(self, req): |
759 |
"""Build input data structures.
|
760 |
|
761 |
"""
|
762 |
request = req.GetRequest(self.cfg)
|
763 |
disk_template = None
|
764 |
if "disk_template" in request: |
765 |
disk_template = request["disk_template"]
|
766 |
self._ComputeClusterData(disk_template=disk_template)
|
767 |
|
768 |
request["type"] = req.MODE
|
769 |
self.in_data["request"] = request |
770 |
|
771 |
self.in_text = serializer.Dump(self.in_data) |
772 |
|
773 |
def Run(self, name, validate=True, call_fn=None): |
774 |
"""Run an instance allocator and return the results.
|
775 |
|
776 |
"""
|
777 |
if call_fn is None: |
778 |
call_fn = self.rpc.call_iallocator_runner
|
779 |
|
780 |
ial_params = self.cfg.GetDefaultIAllocatorParameters()
|
781 |
|
782 |
result = call_fn(self.cfg.GetMasterNode(), name, self.in_text, ial_params) |
783 |
result.Raise("Failure while running the iallocator script")
|
784 |
|
785 |
self.out_text = result.payload
|
786 |
if validate:
|
787 |
self._ValidateResult()
|
788 |
|
789 |
def _ValidateResult(self): |
790 |
"""Process the allocator results.
|
791 |
|
792 |
This will process and if successful save the result in
|
793 |
self.out_data and the other parameters.
|
794 |
|
795 |
"""
|
796 |
try:
|
797 |
rdict = serializer.Load(self.out_text)
|
798 |
except Exception, err: |
799 |
raise errors.OpExecError("Can't parse iallocator results: %s" % str(err)) |
800 |
|
801 |
if not isinstance(rdict, dict): |
802 |
raise errors.OpExecError("Can't parse iallocator results: not a dict") |
803 |
|
804 |
# TODO: remove backwards compatiblity in later versions
|
805 |
if "nodes" in rdict and "result" not in rdict: |
806 |
rdict["result"] = rdict["nodes"] |
807 |
del rdict["nodes"] |
808 |
|
809 |
for key in "success", "info", "result": |
810 |
if key not in rdict: |
811 |
raise errors.OpExecError("Can't parse iallocator results:" |
812 |
" missing key '%s'" % key)
|
813 |
setattr(self, key, rdict[key]) |
814 |
|
815 |
self.req.ValidateResult(self, self.result) |
816 |
self.out_data = rdict
|