root / lib / cmdlib / common.py @ 4b75f8a4
History | View | Annotate | Download (46.8 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Common functions used by multiple logical units."""
|
23 |
|
24 |
import copy |
25 |
import os |
26 |
|
27 |
from ganeti import compat |
28 |
from ganeti import constants |
29 |
from ganeti import errors |
30 |
from ganeti import hypervisor |
31 |
from ganeti import locking |
32 |
from ganeti import objects |
33 |
from ganeti import opcodes |
34 |
from ganeti import pathutils |
35 |
from ganeti.serializer import Private |
36 |
import ganeti.rpc.node as rpc |
37 |
from ganeti import ssconf |
38 |
from ganeti import utils |
39 |
|
40 |
|
41 |
# States of instance
|
42 |
INSTANCE_DOWN = [constants.ADMINST_DOWN] |
43 |
INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP] |
44 |
INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE] |
45 |
|
46 |
#: Instance status in which an instance can be marked as offline/online
|
47 |
CAN_CHANGE_INSTANCE_OFFLINE = (frozenset(INSTANCE_DOWN) | frozenset([ |
48 |
constants.ADMINST_OFFLINE, |
49 |
])) |
50 |
|
51 |
|
52 |
def _ExpandItemName(expand_fn, name, kind): |
53 |
"""Expand an item name.
|
54 |
|
55 |
@param expand_fn: the function to use for expansion
|
56 |
@param name: requested item name
|
57 |
@param kind: text description ('Node' or 'Instance')
|
58 |
@return: the result of the expand_fn, if successful
|
59 |
@raise errors.OpPrereqError: if the item is not found
|
60 |
|
61 |
"""
|
62 |
(uuid, full_name) = expand_fn(name) |
63 |
if uuid is None or full_name is None: |
64 |
raise errors.OpPrereqError("%s '%s' not known" % (kind, name), |
65 |
errors.ECODE_NOENT) |
66 |
return (uuid, full_name)
|
67 |
|
68 |
|
69 |
def ExpandInstanceUuidAndName(cfg, expected_uuid, name): |
70 |
"""Wrapper over L{_ExpandItemName} for instance."""
|
71 |
(uuid, full_name) = _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
|
72 |
if expected_uuid is not None and uuid != expected_uuid: |
73 |
raise errors.OpPrereqError(
|
74 |
"The instances UUID '%s' does not match the expected UUID '%s' for"
|
75 |
" instance '%s'. Maybe the instance changed since you submitted this"
|
76 |
" job." % (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
|
77 |
return (uuid, full_name)
|
78 |
|
79 |
|
80 |
def ExpandNodeUuidAndName(cfg, expected_uuid, name): |
81 |
"""Expand a short node name into the node UUID and full name.
|
82 |
|
83 |
@type cfg: L{config.ConfigWriter}
|
84 |
@param cfg: The cluster configuration
|
85 |
@type expected_uuid: string
|
86 |
@param expected_uuid: expected UUID for the node (or None if there is no
|
87 |
expectation). If it does not match, a L{errors.OpPrereqError} is
|
88 |
raised.
|
89 |
@type name: string
|
90 |
@param name: the short node name
|
91 |
|
92 |
"""
|
93 |
(uuid, full_name) = _ExpandItemName(cfg.ExpandNodeName, name, "Node")
|
94 |
if expected_uuid is not None and uuid != expected_uuid: |
95 |
raise errors.OpPrereqError(
|
96 |
"The nodes UUID '%s' does not match the expected UUID '%s' for node"
|
97 |
" '%s'. Maybe the node changed since you submitted this job." %
|
98 |
(uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE) |
99 |
return (uuid, full_name)
|
100 |
|
101 |
|
102 |
def ShareAll(): |
103 |
"""Returns a dict declaring all lock levels shared.
|
104 |
|
105 |
"""
|
106 |
return dict.fromkeys(locking.LEVELS, 1) |
107 |
|
108 |
|
109 |
def CheckNodeGroupInstances(cfg, group_uuid, owned_instance_names): |
110 |
"""Checks if the instances in a node group are still correct.
|
111 |
|
112 |
@type cfg: L{config.ConfigWriter}
|
113 |
@param cfg: The cluster configuration
|
114 |
@type group_uuid: string
|
115 |
@param group_uuid: Node group UUID
|
116 |
@type owned_instance_names: set or frozenset
|
117 |
@param owned_instance_names: List of currently owned instances
|
118 |
|
119 |
"""
|
120 |
wanted_instances = frozenset(cfg.GetInstanceNames(
|
121 |
cfg.GetNodeGroupInstances(group_uuid))) |
122 |
if owned_instance_names != wanted_instances:
|
123 |
raise errors.OpPrereqError("Instances in node group '%s' changed since" |
124 |
" locks were acquired, wanted '%s', have '%s';"
|
125 |
" retry the operation" %
|
126 |
(group_uuid, |
127 |
utils.CommaJoin(wanted_instances), |
128 |
utils.CommaJoin(owned_instance_names)), |
129 |
errors.ECODE_STATE) |
130 |
|
131 |
return wanted_instances
|
132 |
|
133 |
|
134 |
def GetWantedNodes(lu, short_node_names): |
135 |
"""Returns list of checked and expanded node names.
|
136 |
|
137 |
@type lu: L{LogicalUnit}
|
138 |
@param lu: the logical unit on whose behalf we execute
|
139 |
@type short_node_names: list
|
140 |
@param short_node_names: list of node names or None for all nodes
|
141 |
@rtype: tuple of lists
|
142 |
@return: tupe with (list of node UUIDs, list of node names)
|
143 |
@raise errors.ProgrammerError: if the nodes parameter is wrong type
|
144 |
|
145 |
"""
|
146 |
if short_node_names:
|
147 |
node_uuids = [ExpandNodeUuidAndName(lu.cfg, None, name)[0] |
148 |
for name in short_node_names] |
149 |
else:
|
150 |
node_uuids = lu.cfg.GetNodeList() |
151 |
|
152 |
return (node_uuids, [lu.cfg.GetNodeName(uuid) for uuid in node_uuids]) |
153 |
|
154 |
|
155 |
def GetWantedInstances(lu, short_inst_names): |
156 |
"""Returns list of checked and expanded instance names.
|
157 |
|
158 |
@type lu: L{LogicalUnit}
|
159 |
@param lu: the logical unit on whose behalf we execute
|
160 |
@type short_inst_names: list
|
161 |
@param short_inst_names: list of instance names or None for all instances
|
162 |
@rtype: tuple of lists
|
163 |
@return: tuple of (instance UUIDs, instance names)
|
164 |
@raise errors.OpPrereqError: if the instances parameter is wrong type
|
165 |
@raise errors.OpPrereqError: if any of the passed instances is not found
|
166 |
|
167 |
"""
|
168 |
if short_inst_names:
|
169 |
inst_uuids = [ExpandInstanceUuidAndName(lu.cfg, None, name)[0] |
170 |
for name in short_inst_names] |
171 |
else:
|
172 |
inst_uuids = lu.cfg.GetInstanceList() |
173 |
return (inst_uuids, [lu.cfg.GetInstanceName(uuid) for uuid in inst_uuids]) |
174 |
|
175 |
|
176 |
def RunPostHook(lu, node_name): |
177 |
"""Runs the post-hook for an opcode on a single node.
|
178 |
|
179 |
"""
|
180 |
hm = lu.proc.BuildHooksManager(lu) |
181 |
try:
|
182 |
hm.RunPhase(constants.HOOKS_PHASE_POST, node_names=[node_name]) |
183 |
except Exception, err: # pylint: disable=W0703 |
184 |
lu.LogWarning("Errors occurred running hooks on %s: %s",
|
185 |
node_name, err) |
186 |
|
187 |
|
188 |
def RedistributeAncillaryFiles(lu): |
189 |
"""Distribute additional files which are part of the cluster configuration.
|
190 |
|
191 |
ConfigWriter takes care of distributing the config and ssconf files, but
|
192 |
there are more files which should be distributed to all nodes. This function
|
193 |
makes sure those are copied.
|
194 |
|
195 |
"""
|
196 |
# Gather target nodes
|
197 |
cluster = lu.cfg.GetClusterInfo() |
198 |
master_info = lu.cfg.GetMasterNodeInfo() |
199 |
|
200 |
online_node_uuids = lu.cfg.GetOnlineNodeList() |
201 |
online_node_uuid_set = frozenset(online_node_uuids)
|
202 |
vm_node_uuids = list(online_node_uuid_set.intersection(
|
203 |
lu.cfg.GetVmCapableNodeList())) |
204 |
|
205 |
# Never distribute to master node
|
206 |
for node_uuids in [online_node_uuids, vm_node_uuids]: |
207 |
if master_info.uuid in node_uuids: |
208 |
node_uuids.remove(master_info.uuid) |
209 |
|
210 |
# Gather file lists
|
211 |
(files_all, _, files_mc, files_vm) = \ |
212 |
ComputeAncillaryFiles(cluster, True)
|
213 |
|
214 |
# Never re-distribute configuration file from here
|
215 |
assert not (pathutils.CLUSTER_CONF_FILE in files_all or |
216 |
pathutils.CLUSTER_CONF_FILE in files_vm)
|
217 |
assert not files_mc, "Master candidates not handled in this function" |
218 |
|
219 |
filemap = [ |
220 |
(online_node_uuids, files_all), |
221 |
(vm_node_uuids, files_vm), |
222 |
] |
223 |
|
224 |
# Upload the files
|
225 |
for (node_uuids, files) in filemap: |
226 |
for fname in files: |
227 |
UploadHelper(lu, node_uuids, fname) |
228 |
|
229 |
|
230 |
def ComputeAncillaryFiles(cluster, redist): |
231 |
"""Compute files external to Ganeti which need to be consistent.
|
232 |
|
233 |
@type redist: boolean
|
234 |
@param redist: Whether to include files which need to be redistributed
|
235 |
|
236 |
"""
|
237 |
# Compute files for all nodes
|
238 |
files_all = set([
|
239 |
pathutils.SSH_KNOWN_HOSTS_FILE, |
240 |
pathutils.CONFD_HMAC_KEY, |
241 |
pathutils.CLUSTER_DOMAIN_SECRET_FILE, |
242 |
pathutils.SPICE_CERT_FILE, |
243 |
pathutils.SPICE_CACERT_FILE, |
244 |
pathutils.RAPI_USERS_FILE, |
245 |
]) |
246 |
|
247 |
if redist:
|
248 |
# we need to ship at least the RAPI certificate
|
249 |
files_all.add(pathutils.RAPI_CERT_FILE) |
250 |
else:
|
251 |
files_all.update(pathutils.ALL_CERT_FILES) |
252 |
files_all.update(ssconf.SimpleStore().GetFileList()) |
253 |
|
254 |
if cluster.modify_etc_hosts:
|
255 |
files_all.add(pathutils.ETC_HOSTS) |
256 |
|
257 |
if cluster.use_external_mip_script:
|
258 |
files_all.add(pathutils.EXTERNAL_MASTER_SETUP_SCRIPT) |
259 |
|
260 |
# Files which are optional, these must:
|
261 |
# - be present in one other category as well
|
262 |
# - either exist or not exist on all nodes of that category (mc, vm all)
|
263 |
files_opt = set([
|
264 |
pathutils.RAPI_USERS_FILE, |
265 |
]) |
266 |
|
267 |
# Files which should only be on master candidates
|
268 |
files_mc = set()
|
269 |
|
270 |
if not redist: |
271 |
files_mc.add(pathutils.CLUSTER_CONF_FILE) |
272 |
|
273 |
# File storage
|
274 |
if (not redist and (cluster.IsFileStorageEnabled() or |
275 |
cluster.IsSharedFileStorageEnabled())): |
276 |
files_all.add(pathutils.FILE_STORAGE_PATHS_FILE) |
277 |
files_opt.add(pathutils.FILE_STORAGE_PATHS_FILE) |
278 |
|
279 |
# Files which should only be on VM-capable nodes
|
280 |
files_vm = set(
|
281 |
filename |
282 |
for hv_name in cluster.enabled_hypervisors |
283 |
for filename in |
284 |
hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[0])
|
285 |
|
286 |
files_opt |= set(
|
287 |
filename |
288 |
for hv_name in cluster.enabled_hypervisors |
289 |
for filename in |
290 |
hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[1])
|
291 |
|
292 |
# Filenames in each category must be unique
|
293 |
all_files_set = files_all | files_mc | files_vm |
294 |
assert (len(all_files_set) == |
295 |
sum(map(len, [files_all, files_mc, files_vm]))), \ |
296 |
"Found file listed in more than one file list"
|
297 |
|
298 |
# Optional files must be present in one other category
|
299 |
assert all_files_set.issuperset(files_opt), \
|
300 |
"Optional file not in a different required list"
|
301 |
|
302 |
# This one file should never ever be re-distributed via RPC
|
303 |
assert not (redist and |
304 |
pathutils.FILE_STORAGE_PATHS_FILE in all_files_set)
|
305 |
|
306 |
return (files_all, files_opt, files_mc, files_vm)
|
307 |
|
308 |
|
309 |
def UploadHelper(lu, node_uuids, fname): |
310 |
"""Helper for uploading a file and showing warnings.
|
311 |
|
312 |
"""
|
313 |
if os.path.exists(fname):
|
314 |
result = lu.rpc.call_upload_file(node_uuids, fname) |
315 |
for to_node_uuids, to_result in result.items(): |
316 |
msg = to_result.fail_msg |
317 |
if msg:
|
318 |
msg = ("Copy of file %s to node %s failed: %s" %
|
319 |
(fname, lu.cfg.GetNodeName(to_node_uuids), msg)) |
320 |
lu.LogWarning(msg) |
321 |
|
322 |
|
323 |
def MergeAndVerifyHvState(op_input, obj_input): |
324 |
"""Combines the hv state from an opcode with the one of the object
|
325 |
|
326 |
@param op_input: The input dict from the opcode
|
327 |
@param obj_input: The input dict from the objects
|
328 |
@return: The verified and updated dict
|
329 |
|
330 |
"""
|
331 |
if op_input:
|
332 |
invalid_hvs = set(op_input) - constants.HYPER_TYPES
|
333 |
if invalid_hvs:
|
334 |
raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:" |
335 |
" %s" % utils.CommaJoin(invalid_hvs),
|
336 |
errors.ECODE_INVAL) |
337 |
if obj_input is None: |
338 |
obj_input = {} |
339 |
type_check = constants.HVSTS_PARAMETER_TYPES |
340 |
return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
|
341 |
|
342 |
return None |
343 |
|
344 |
|
345 |
def MergeAndVerifyDiskState(op_input, obj_input): |
346 |
"""Combines the disk state from an opcode with the one of the object
|
347 |
|
348 |
@param op_input: The input dict from the opcode
|
349 |
@param obj_input: The input dict from the objects
|
350 |
@return: The verified and updated dict
|
351 |
"""
|
352 |
if op_input:
|
353 |
invalid_dst = set(op_input) - constants.DS_VALID_TYPES
|
354 |
if invalid_dst:
|
355 |
raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" % |
356 |
utils.CommaJoin(invalid_dst), |
357 |
errors.ECODE_INVAL) |
358 |
type_check = constants.DSS_PARAMETER_TYPES |
359 |
if obj_input is None: |
360 |
obj_input = {} |
361 |
return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value, |
362 |
type_check)) |
363 |
for key, value in op_input.items()) |
364 |
|
365 |
return None |
366 |
|
367 |
|
368 |
def CheckOSParams(lu, required, node_uuids, osname, osparams): |
369 |
"""OS parameters validation.
|
370 |
|
371 |
@type lu: L{LogicalUnit}
|
372 |
@param lu: the logical unit for which we check
|
373 |
@type required: boolean
|
374 |
@param required: whether the validation should fail if the OS is not
|
375 |
found
|
376 |
@type node_uuids: list
|
377 |
@param node_uuids: the list of nodes on which we should check
|
378 |
@type osname: string
|
379 |
@param osname: the name of the hypervisor we should use
|
380 |
@type osparams: dict
|
381 |
@param osparams: the parameters which we need to check
|
382 |
@raise errors.OpPrereqError: if the parameters are not valid
|
383 |
|
384 |
"""
|
385 |
node_uuids = _FilterVmNodes(lu, node_uuids) |
386 |
|
387 |
# Last chance to unwrap private elements.
|
388 |
for key in osparams: |
389 |
if isinstance(osparams[key], Private): |
390 |
osparams[key] = osparams[key].Get() |
391 |
|
392 |
result = lu.rpc.call_os_validate(node_uuids, required, osname, |
393 |
[constants.OS_VALIDATE_PARAMETERS], |
394 |
osparams) |
395 |
for node_uuid, nres in result.items(): |
396 |
# we don't check for offline cases since this should be run only
|
397 |
# against the master node and/or an instance's nodes
|
398 |
nres.Raise("OS Parameters validation failed on node %s" %
|
399 |
lu.cfg.GetNodeName(node_uuid)) |
400 |
if not nres.payload: |
401 |
lu.LogInfo("OS %s not found on node %s, validation skipped",
|
402 |
osname, lu.cfg.GetNodeName(node_uuid)) |
403 |
|
404 |
|
405 |
def CheckHVParams(lu, node_uuids, hvname, hvparams): |
406 |
"""Hypervisor parameter validation.
|
407 |
|
408 |
This function abstract the hypervisor parameter validation to be
|
409 |
used in both instance create and instance modify.
|
410 |
|
411 |
@type lu: L{LogicalUnit}
|
412 |
@param lu: the logical unit for which we check
|
413 |
@type node_uuids: list
|
414 |
@param node_uuids: the list of nodes on which we should check
|
415 |
@type hvname: string
|
416 |
@param hvname: the name of the hypervisor we should use
|
417 |
@type hvparams: dict
|
418 |
@param hvparams: the parameters which we need to check
|
419 |
@raise errors.OpPrereqError: if the parameters are not valid
|
420 |
|
421 |
"""
|
422 |
node_uuids = _FilterVmNodes(lu, node_uuids) |
423 |
|
424 |
cluster = lu.cfg.GetClusterInfo() |
425 |
hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams) |
426 |
|
427 |
hvinfo = lu.rpc.call_hypervisor_validate_params(node_uuids, hvname, hvfull) |
428 |
for node_uuid in node_uuids: |
429 |
info = hvinfo[node_uuid] |
430 |
if info.offline:
|
431 |
continue
|
432 |
info.Raise("Hypervisor parameter validation failed on node %s" %
|
433 |
lu.cfg.GetNodeName(node_uuid)) |
434 |
|
435 |
|
436 |
def AdjustCandidatePool(lu, exceptions, feedback_fn): |
437 |
"""Adjust the candidate pool after node operations.
|
438 |
|
439 |
"""
|
440 |
mod_list = lu.cfg.MaintainCandidatePool(exceptions) |
441 |
if mod_list:
|
442 |
lu.LogInfo("Promoted nodes to master candidate role: %s",
|
443 |
utils.CommaJoin(node.name for node in mod_list)) |
444 |
for node in mod_list: |
445 |
lu.context.ReaddNode(node) |
446 |
cluster = lu.cfg.GetClusterInfo() |
447 |
AddNodeCertToCandidateCerts(lu, node.uuid, cluster) |
448 |
lu.cfg.Update(cluster, feedback_fn) |
449 |
mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions) |
450 |
if mc_now > mc_max:
|
451 |
lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
|
452 |
(mc_now, mc_max)) |
453 |
|
454 |
|
455 |
def CheckNodePVs(nresult, exclusive_storage): |
456 |
"""Check node PVs.
|
457 |
|
458 |
"""
|
459 |
pvlist_dict = nresult.get(constants.NV_PVLIST, None)
|
460 |
if pvlist_dict is None: |
461 |
return (["Can't get PV list from node"], None) |
462 |
pvlist = map(objects.LvmPvInfo.FromDict, pvlist_dict)
|
463 |
errlist = [] |
464 |
# check that ':' is not present in PV names, since it's a
|
465 |
# special character for lvcreate (denotes the range of PEs to
|
466 |
# use on the PV)
|
467 |
for pv in pvlist: |
468 |
if ":" in pv.name: |
469 |
errlist.append("Invalid character ':' in PV '%s' of VG '%s'" %
|
470 |
(pv.name, pv.vg_name)) |
471 |
es_pvinfo = None
|
472 |
if exclusive_storage:
|
473 |
(errmsgs, es_pvinfo) = utils.LvmExclusiveCheckNodePvs(pvlist) |
474 |
errlist.extend(errmsgs) |
475 |
shared_pvs = nresult.get(constants.NV_EXCLUSIVEPVS, None)
|
476 |
if shared_pvs:
|
477 |
for (pvname, lvlist) in shared_pvs: |
478 |
# TODO: Check that LVs are really unrelated (snapshots, DRBD meta...)
|
479 |
errlist.append("PV %s is shared among unrelated LVs (%s)" %
|
480 |
(pvname, utils.CommaJoin(lvlist))) |
481 |
return (errlist, es_pvinfo)
|
482 |
|
483 |
|
484 |
def _ComputeMinMaxSpec(name, qualifier, ispecs, value): |
485 |
"""Computes if value is in the desired range.
|
486 |
|
487 |
@param name: name of the parameter for which we perform the check
|
488 |
@param qualifier: a qualifier used in the error message (e.g. 'disk/1',
|
489 |
not just 'disk')
|
490 |
@param ispecs: dictionary containing min and max values
|
491 |
@param value: actual value that we want to use
|
492 |
@return: None or an error string
|
493 |
|
494 |
"""
|
495 |
if value in [None, constants.VALUE_AUTO]: |
496 |
return None |
497 |
max_v = ispecs[constants.ISPECS_MAX].get(name, value) |
498 |
min_v = ispecs[constants.ISPECS_MIN].get(name, value) |
499 |
if value > max_v or min_v > value: |
500 |
if qualifier:
|
501 |
fqn = "%s/%s" % (name, qualifier)
|
502 |
else:
|
503 |
fqn = name |
504 |
return ("%s value %s is not in range [%s, %s]" % |
505 |
(fqn, value, min_v, max_v)) |
506 |
return None |
507 |
|
508 |
|
509 |
def ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count, |
510 |
nic_count, disk_sizes, spindle_use, |
511 |
disk_template, |
512 |
_compute_fn=_ComputeMinMaxSpec): |
513 |
"""Verifies ipolicy against provided specs.
|
514 |
|
515 |
@type ipolicy: dict
|
516 |
@param ipolicy: The ipolicy
|
517 |
@type mem_size: int
|
518 |
@param mem_size: The memory size
|
519 |
@type cpu_count: int
|
520 |
@param cpu_count: Used cpu cores
|
521 |
@type disk_count: int
|
522 |
@param disk_count: Number of disks used
|
523 |
@type nic_count: int
|
524 |
@param nic_count: Number of nics used
|
525 |
@type disk_sizes: list of ints
|
526 |
@param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
|
527 |
@type spindle_use: int
|
528 |
@param spindle_use: The number of spindles this instance uses
|
529 |
@type disk_template: string
|
530 |
@param disk_template: The disk template of the instance
|
531 |
@param _compute_fn: The compute function (unittest only)
|
532 |
@return: A list of violations, or an empty list of no violations are found
|
533 |
|
534 |
"""
|
535 |
assert disk_count == len(disk_sizes) |
536 |
|
537 |
test_settings = [ |
538 |
(constants.ISPEC_MEM_SIZE, "", mem_size),
|
539 |
(constants.ISPEC_CPU_COUNT, "", cpu_count),
|
540 |
(constants.ISPEC_NIC_COUNT, "", nic_count),
|
541 |
(constants.ISPEC_SPINDLE_USE, "", spindle_use),
|
542 |
] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
|
543 |
for idx, d in enumerate(disk_sizes)] |
544 |
if disk_template != constants.DT_DISKLESS:
|
545 |
# This check doesn't make sense for diskless instances
|
546 |
test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
|
547 |
ret = [] |
548 |
allowed_dts = ipolicy[constants.IPOLICY_DTS] |
549 |
if disk_template not in allowed_dts: |
550 |
ret.append("Disk template %s is not allowed (allowed templates: %s)" %
|
551 |
(disk_template, utils.CommaJoin(allowed_dts))) |
552 |
|
553 |
min_errs = None
|
554 |
for minmax in ipolicy[constants.ISPECS_MINMAX]: |
555 |
errs = filter(None, |
556 |
(_compute_fn(name, qualifier, minmax, value) |
557 |
for (name, qualifier, value) in test_settings)) |
558 |
if min_errs is None or len(errs) < len(min_errs): |
559 |
min_errs = errs |
560 |
assert min_errs is not None |
561 |
return ret + min_errs
|
562 |
|
563 |
|
564 |
def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg, |
565 |
_compute_fn=ComputeIPolicySpecViolation): |
566 |
"""Compute if instance meets the specs of ipolicy.
|
567 |
|
568 |
@type ipolicy: dict
|
569 |
@param ipolicy: The ipolicy to verify against
|
570 |
@type instance: L{objects.Instance}
|
571 |
@param instance: The instance to verify
|
572 |
@type cfg: L{config.ConfigWriter}
|
573 |
@param cfg: Cluster configuration
|
574 |
@param _compute_fn: The function to verify ipolicy (unittest only)
|
575 |
@see: L{ComputeIPolicySpecViolation}
|
576 |
|
577 |
"""
|
578 |
ret = [] |
579 |
be_full = cfg.GetClusterInfo().FillBE(instance) |
580 |
mem_size = be_full[constants.BE_MAXMEM] |
581 |
cpu_count = be_full[constants.BE_VCPUS] |
582 |
es_flags = rpc.GetExclusiveStorageForNodes(cfg, instance.all_nodes) |
583 |
if any(es_flags.values()): |
584 |
# With exclusive storage use the actual spindles
|
585 |
try:
|
586 |
spindle_use = sum([disk.spindles for disk in instance.disks]) |
587 |
except TypeError: |
588 |
ret.append("Number of spindles not configured for disks of instance %s"
|
589 |
" while exclusive storage is enabled, try running gnt-cluster"
|
590 |
" repair-disk-sizes" % instance.name)
|
591 |
# _ComputeMinMaxSpec ignores 'None's
|
592 |
spindle_use = None
|
593 |
else:
|
594 |
spindle_use = be_full[constants.BE_SPINDLE_USE] |
595 |
disk_count = len(instance.disks)
|
596 |
disk_sizes = [disk.size for disk in instance.disks] |
597 |
nic_count = len(instance.nics)
|
598 |
disk_template = instance.disk_template |
599 |
|
600 |
return ret + _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
|
601 |
disk_sizes, spindle_use, disk_template) |
602 |
|
603 |
|
604 |
def _ComputeViolatingInstances(ipolicy, instances, cfg): |
605 |
"""Computes a set of instances who violates given ipolicy.
|
606 |
|
607 |
@param ipolicy: The ipolicy to verify
|
608 |
@type instances: L{objects.Instance}
|
609 |
@param instances: List of instances to verify
|
610 |
@type cfg: L{config.ConfigWriter}
|
611 |
@param cfg: Cluster configuration
|
612 |
@return: A frozenset of instance names violating the ipolicy
|
613 |
|
614 |
"""
|
615 |
return frozenset([inst.name for inst in instances |
616 |
if ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
|
617 |
|
618 |
|
619 |
def ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg): |
620 |
"""Computes a set of any instances that would violate the new ipolicy.
|
621 |
|
622 |
@param old_ipolicy: The current (still in-place) ipolicy
|
623 |
@param new_ipolicy: The new (to become) ipolicy
|
624 |
@param instances: List of instances to verify
|
625 |
@type cfg: L{config.ConfigWriter}
|
626 |
@param cfg: Cluster configuration
|
627 |
@return: A list of instances which violates the new ipolicy but
|
628 |
did not before
|
629 |
|
630 |
"""
|
631 |
return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) -
|
632 |
_ComputeViolatingInstances(old_ipolicy, instances, cfg)) |
633 |
|
634 |
|
635 |
def GetUpdatedParams(old_params, update_dict, |
636 |
use_default=True, use_none=False): |
637 |
"""Return the new version of a parameter dictionary.
|
638 |
|
639 |
@type old_params: dict
|
640 |
@param old_params: old parameters
|
641 |
@type update_dict: dict
|
642 |
@param update_dict: dict containing new parameter values, or
|
643 |
constants.VALUE_DEFAULT to reset the parameter to its default
|
644 |
value
|
645 |
@param use_default: boolean
|
646 |
@type use_default: whether to recognise L{constants.VALUE_DEFAULT}
|
647 |
values as 'to be deleted' values
|
648 |
@param use_none: boolean
|
649 |
@type use_none: whether to recognise C{None} values as 'to be
|
650 |
deleted' values
|
651 |
@rtype: dict
|
652 |
@return: the new parameter dictionary
|
653 |
|
654 |
"""
|
655 |
params_copy = copy.deepcopy(old_params) |
656 |
for key, val in update_dict.iteritems(): |
657 |
if ((use_default and val == constants.VALUE_DEFAULT) or |
658 |
(use_none and val is None)): |
659 |
try:
|
660 |
del params_copy[key]
|
661 |
except KeyError: |
662 |
pass
|
663 |
else:
|
664 |
params_copy[key] = val |
665 |
return params_copy
|
666 |
|
667 |
|
668 |
def GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False): |
669 |
"""Return the new version of an instance policy.
|
670 |
|
671 |
@param group_policy: whether this policy applies to a group and thus
|
672 |
we should support removal of policy entries
|
673 |
|
674 |
"""
|
675 |
ipolicy = copy.deepcopy(old_ipolicy) |
676 |
for key, value in new_ipolicy.items(): |
677 |
if key not in constants.IPOLICY_ALL_KEYS: |
678 |
raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key, |
679 |
errors.ECODE_INVAL) |
680 |
if (not value or value == [constants.VALUE_DEFAULT] or |
681 |
value == constants.VALUE_DEFAULT): |
682 |
if group_policy:
|
683 |
if key in ipolicy: |
684 |
del ipolicy[key]
|
685 |
else:
|
686 |
raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'" |
687 |
" on the cluster'" % key,
|
688 |
errors.ECODE_INVAL) |
689 |
else:
|
690 |
if key in constants.IPOLICY_PARAMETERS: |
691 |
# FIXME: we assume all such values are float
|
692 |
try:
|
693 |
ipolicy[key] = float(value)
|
694 |
except (TypeError, ValueError), err: |
695 |
raise errors.OpPrereqError("Invalid value for attribute" |
696 |
" '%s': '%s', error: %s" %
|
697 |
(key, value, err), errors.ECODE_INVAL) |
698 |
elif key == constants.ISPECS_MINMAX:
|
699 |
for minmax in value: |
700 |
for k in minmax.keys(): |
701 |
utils.ForceDictType(minmax[k], constants.ISPECS_PARAMETER_TYPES) |
702 |
ipolicy[key] = value |
703 |
elif key == constants.ISPECS_STD:
|
704 |
if group_policy:
|
705 |
msg = "%s cannot appear in group instance specs" % key
|
706 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
|
707 |
ipolicy[key] = GetUpdatedParams(old_ipolicy.get(key, {}), value, |
708 |
use_none=False, use_default=False) |
709 |
utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES) |
710 |
else:
|
711 |
# FIXME: we assume all others are lists; this should be redone
|
712 |
# in a nicer way
|
713 |
ipolicy[key] = list(value)
|
714 |
try:
|
715 |
objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
|
716 |
except errors.ConfigurationError, err:
|
717 |
raise errors.OpPrereqError("Invalid instance policy: %s" % err, |
718 |
errors.ECODE_INVAL) |
719 |
return ipolicy
|
720 |
|
721 |
|
722 |
def AnnotateDiskParams(instance, devs, cfg): |
723 |
"""Little helper wrapper to the rpc annotation method.
|
724 |
|
725 |
@param instance: The instance object
|
726 |
@type devs: List of L{objects.Disk}
|
727 |
@param devs: The root devices (not any of its children!)
|
728 |
@param cfg: The config object
|
729 |
@returns The annotated disk copies
|
730 |
@see L{rpc.node.AnnotateDiskParams}
|
731 |
|
732 |
"""
|
733 |
return rpc.AnnotateDiskParams(devs, cfg.GetInstanceDiskParams(instance))
|
734 |
|
735 |
|
736 |
def SupportsOob(cfg, node): |
737 |
"""Tells if node supports OOB.
|
738 |
|
739 |
@type cfg: L{config.ConfigWriter}
|
740 |
@param cfg: The cluster configuration
|
741 |
@type node: L{objects.Node}
|
742 |
@param node: The node
|
743 |
@return: The OOB script if supported or an empty string otherwise
|
744 |
|
745 |
"""
|
746 |
return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
|
747 |
|
748 |
|
749 |
def _UpdateAndVerifySubDict(base, updates, type_check): |
750 |
"""Updates and verifies a dict with sub dicts of the same type.
|
751 |
|
752 |
@param base: The dict with the old data
|
753 |
@param updates: The dict with the new data
|
754 |
@param type_check: Dict suitable to ForceDictType to verify correct types
|
755 |
@returns: A new dict with updated and verified values
|
756 |
|
757 |
"""
|
758 |
def fn(old, value): |
759 |
new = GetUpdatedParams(old, value) |
760 |
utils.ForceDictType(new, type_check) |
761 |
return new
|
762 |
|
763 |
ret = copy.deepcopy(base) |
764 |
ret.update(dict((key, fn(base.get(key, {}), value))
|
765 |
for key, value in updates.items())) |
766 |
return ret
|
767 |
|
768 |
|
769 |
def _FilterVmNodes(lu, node_uuids): |
770 |
"""Filters out non-vm_capable nodes from a list.
|
771 |
|
772 |
@type lu: L{LogicalUnit}
|
773 |
@param lu: the logical unit for which we check
|
774 |
@type node_uuids: list
|
775 |
@param node_uuids: the list of nodes on which we should check
|
776 |
@rtype: list
|
777 |
@return: the list of vm-capable nodes
|
778 |
|
779 |
"""
|
780 |
vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
|
781 |
return [uuid for uuid in node_uuids if uuid not in vm_nodes] |
782 |
|
783 |
|
784 |
def GetDefaultIAllocator(cfg, ialloc): |
785 |
"""Decides on which iallocator to use.
|
786 |
|
787 |
@type cfg: L{config.ConfigWriter}
|
788 |
@param cfg: Cluster configuration object
|
789 |
@type ialloc: string or None
|
790 |
@param ialloc: Iallocator specified in opcode
|
791 |
@rtype: string
|
792 |
@return: Iallocator name
|
793 |
|
794 |
"""
|
795 |
if not ialloc: |
796 |
# Use default iallocator
|
797 |
ialloc = cfg.GetDefaultIAllocator() |
798 |
|
799 |
if not ialloc: |
800 |
raise errors.OpPrereqError("No iallocator was specified, neither in the" |
801 |
" opcode nor as a cluster-wide default",
|
802 |
errors.ECODE_INVAL) |
803 |
|
804 |
return ialloc
|
805 |
|
806 |
|
807 |
def CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_node_uuids, |
808 |
cur_group_uuid): |
809 |
"""Checks if node groups for locked instances are still correct.
|
810 |
|
811 |
@type cfg: L{config.ConfigWriter}
|
812 |
@param cfg: Cluster configuration
|
813 |
@type instances: dict; string as key, L{objects.Instance} as value
|
814 |
@param instances: Dictionary, instance UUID as key, instance object as value
|
815 |
@type owned_groups: iterable of string
|
816 |
@param owned_groups: List of owned groups
|
817 |
@type owned_node_uuids: iterable of string
|
818 |
@param owned_node_uuids: List of owned nodes
|
819 |
@type cur_group_uuid: string or None
|
820 |
@param cur_group_uuid: Optional group UUID to check against instance's groups
|
821 |
|
822 |
"""
|
823 |
for (uuid, inst) in instances.items(): |
824 |
assert owned_node_uuids.issuperset(inst.all_nodes), \
|
825 |
"Instance %s's nodes changed while we kept the lock" % inst.name
|
826 |
|
827 |
inst_groups = CheckInstanceNodeGroups(cfg, uuid, owned_groups) |
828 |
|
829 |
assert cur_group_uuid is None or cur_group_uuid in inst_groups, \ |
830 |
"Instance %s has no node in group %s" % (inst.name, cur_group_uuid)
|
831 |
|
832 |
|
833 |
def CheckInstanceNodeGroups(cfg, inst_uuid, owned_groups, primary_only=False): |
834 |
"""Checks if the owned node groups are still correct for an instance.
|
835 |
|
836 |
@type cfg: L{config.ConfigWriter}
|
837 |
@param cfg: The cluster configuration
|
838 |
@type inst_uuid: string
|
839 |
@param inst_uuid: Instance UUID
|
840 |
@type owned_groups: set or frozenset
|
841 |
@param owned_groups: List of currently owned node groups
|
842 |
@type primary_only: boolean
|
843 |
@param primary_only: Whether to check node groups for only the primary node
|
844 |
|
845 |
"""
|
846 |
inst_groups = cfg.GetInstanceNodeGroups(inst_uuid, primary_only) |
847 |
|
848 |
if not owned_groups.issuperset(inst_groups): |
849 |
raise errors.OpPrereqError("Instance %s's node groups changed since" |
850 |
" locks were acquired, current groups are"
|
851 |
" are '%s', owning groups '%s'; retry the"
|
852 |
" operation" %
|
853 |
(cfg.GetInstanceName(inst_uuid), |
854 |
utils.CommaJoin(inst_groups), |
855 |
utils.CommaJoin(owned_groups)), |
856 |
errors.ECODE_STATE) |
857 |
|
858 |
return inst_groups
|
859 |
|
860 |
|
861 |
def LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes): |
862 |
"""Unpacks the result of change-group and node-evacuate iallocator requests.
|
863 |
|
864 |
Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
|
865 |
L{constants.IALLOCATOR_MODE_CHG_GROUP}.
|
866 |
|
867 |
@type lu: L{LogicalUnit}
|
868 |
@param lu: Logical unit instance
|
869 |
@type alloc_result: tuple/list
|
870 |
@param alloc_result: Result from iallocator
|
871 |
@type early_release: bool
|
872 |
@param early_release: Whether to release locks early if possible
|
873 |
@type use_nodes: bool
|
874 |
@param use_nodes: Whether to display node names instead of groups
|
875 |
|
876 |
"""
|
877 |
(moved, failed, jobs) = alloc_result |
878 |
|
879 |
if failed:
|
880 |
failreason = utils.CommaJoin("%s (%s)" % (name, reason)
|
881 |
for (name, reason) in failed) |
882 |
lu.LogWarning("Unable to evacuate instances %s", failreason)
|
883 |
raise errors.OpExecError("Unable to evacuate instances %s" % failreason) |
884 |
|
885 |
if moved:
|
886 |
lu.LogInfo("Instances to be moved: %s",
|
887 |
utils.CommaJoin( |
888 |
"%s (to %s)" %
|
889 |
(name, _NodeEvacDest(use_nodes, group, node_names)) |
890 |
for (name, group, node_names) in moved)) |
891 |
|
892 |
return [map(compat.partial(_SetOpEarlyRelease, early_release), |
893 |
map(opcodes.OpCode.LoadOpCode, ops))
|
894 |
for ops in jobs] |
895 |
|
896 |
|
897 |
def _NodeEvacDest(use_nodes, group, node_names): |
898 |
"""Returns group or nodes depending on caller's choice.
|
899 |
|
900 |
"""
|
901 |
if use_nodes:
|
902 |
return utils.CommaJoin(node_names)
|
903 |
else:
|
904 |
return group
|
905 |
|
906 |
|
907 |
def _SetOpEarlyRelease(early_release, op): |
908 |
"""Sets C{early_release} flag on opcodes if available.
|
909 |
|
910 |
"""
|
911 |
try:
|
912 |
op.early_release = early_release |
913 |
except AttributeError: |
914 |
assert not isinstance(op, opcodes.OpInstanceReplaceDisks) |
915 |
|
916 |
return op
|
917 |
|
918 |
|
919 |
def MapInstanceLvsToNodes(instances): |
920 |
"""Creates a map from (node, volume) to instance name.
|
921 |
|
922 |
@type instances: list of L{objects.Instance}
|
923 |
@rtype: dict; tuple of (node uuid, volume name) as key, L{objects.Instance}
|
924 |
object as value
|
925 |
|
926 |
"""
|
927 |
return dict(((node_uuid, vol), inst) |
928 |
for inst in instances |
929 |
for (node_uuid, vols) in inst.MapLVsByNode().items() |
930 |
for vol in vols) |
931 |
|
932 |
|
933 |
def CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels): |
934 |
"""Make sure that none of the given paramters is global.
|
935 |
|
936 |
If a global parameter is found, an L{errors.OpPrereqError} exception is
|
937 |
raised. This is used to avoid setting global parameters for individual nodes.
|
938 |
|
939 |
@type params: dictionary
|
940 |
@param params: Parameters to check
|
941 |
@type glob_pars: dictionary
|
942 |
@param glob_pars: Forbidden parameters
|
943 |
@type kind: string
|
944 |
@param kind: Kind of parameters (e.g. "node")
|
945 |
@type bad_levels: string
|
946 |
@param bad_levels: Level(s) at which the parameters are forbidden (e.g.
|
947 |
"instance")
|
948 |
@type good_levels: strings
|
949 |
@param good_levels: Level(s) at which the parameters are allowed (e.g.
|
950 |
"cluster or group")
|
951 |
|
952 |
"""
|
953 |
used_globals = glob_pars.intersection(params) |
954 |
if used_globals:
|
955 |
msg = ("The following %s parameters are global and cannot"
|
956 |
" be customized at %s level, please modify them at"
|
957 |
" %s level: %s" %
|
958 |
(kind, bad_levels, good_levels, utils.CommaJoin(used_globals))) |
959 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
|
960 |
|
961 |
|
962 |
def IsExclusiveStorageEnabledNode(cfg, node): |
963 |
"""Whether exclusive_storage is in effect for the given node.
|
964 |
|
965 |
@type cfg: L{config.ConfigWriter}
|
966 |
@param cfg: The cluster configuration
|
967 |
@type node: L{objects.Node}
|
968 |
@param node: The node
|
969 |
@rtype: bool
|
970 |
@return: The effective value of exclusive_storage
|
971 |
|
972 |
"""
|
973 |
return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE]
|
974 |
|
975 |
|
976 |
def CheckInstanceState(lu, instance, req_states, msg=None): |
977 |
"""Ensure that an instance is in one of the required states.
|
978 |
|
979 |
@param lu: the LU on behalf of which we make the check
|
980 |
@param instance: the instance to check
|
981 |
@param msg: if passed, should be a message to replace the default one
|
982 |
@raise errors.OpPrereqError: if the instance is not in the required state
|
983 |
|
984 |
"""
|
985 |
if msg is None: |
986 |
msg = ("can't use instance from outside %s states" %
|
987 |
utils.CommaJoin(req_states)) |
988 |
if instance.admin_state not in req_states: |
989 |
raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" % |
990 |
(instance.name, instance.admin_state, msg), |
991 |
errors.ECODE_STATE) |
992 |
|
993 |
if constants.ADMINST_UP not in req_states: |
994 |
pnode_uuid = instance.primary_node |
995 |
if not lu.cfg.GetNodeInfo(pnode_uuid).offline: |
996 |
all_hvparams = lu.cfg.GetClusterInfo().hvparams |
997 |
ins_l = lu.rpc.call_instance_list( |
998 |
[pnode_uuid], [instance.hypervisor], all_hvparams)[pnode_uuid] |
999 |
ins_l.Raise("Can't contact node %s for instance information" %
|
1000 |
lu.cfg.GetNodeName(pnode_uuid), |
1001 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
1002 |
if instance.name in ins_l.payload: |
1003 |
raise errors.OpPrereqError("Instance %s is running, %s" % |
1004 |
(instance.name, msg), errors.ECODE_STATE) |
1005 |
else:
|
1006 |
lu.LogWarning("Primary node offline, ignoring check that instance"
|
1007 |
" is down")
|
1008 |
|
1009 |
|
1010 |
def CheckIAllocatorOrNode(lu, iallocator_slot, node_slot): |
1011 |
"""Check the sanity of iallocator and node arguments and use the
|
1012 |
cluster-wide iallocator if appropriate.
|
1013 |
|
1014 |
Check that at most one of (iallocator, node) is specified. If none is
|
1015 |
specified, or the iallocator is L{constants.DEFAULT_IALLOCATOR_SHORTCUT},
|
1016 |
then the LU's opcode's iallocator slot is filled with the cluster-wide
|
1017 |
default iallocator.
|
1018 |
|
1019 |
@type iallocator_slot: string
|
1020 |
@param iallocator_slot: the name of the opcode iallocator slot
|
1021 |
@type node_slot: string
|
1022 |
@param node_slot: the name of the opcode target node slot
|
1023 |
|
1024 |
"""
|
1025 |
node = getattr(lu.op, node_slot, None) |
1026 |
ialloc = getattr(lu.op, iallocator_slot, None) |
1027 |
if node == []:
|
1028 |
node = None
|
1029 |
|
1030 |
if node is not None and ialloc is not None: |
1031 |
raise errors.OpPrereqError("Do not specify both, iallocator and node", |
1032 |
errors.ECODE_INVAL) |
1033 |
elif ((node is None and ialloc is None) or |
1034 |
ialloc == constants.DEFAULT_IALLOCATOR_SHORTCUT): |
1035 |
default_iallocator = lu.cfg.GetDefaultIAllocator() |
1036 |
if default_iallocator:
|
1037 |
setattr(lu.op, iallocator_slot, default_iallocator)
|
1038 |
else:
|
1039 |
raise errors.OpPrereqError("No iallocator or node given and no" |
1040 |
" cluster-wide default iallocator found;"
|
1041 |
" please specify either an iallocator or a"
|
1042 |
" node, or set a cluster-wide default"
|
1043 |
" iallocator", errors.ECODE_INVAL)
|
1044 |
|
1045 |
|
1046 |
def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_uuid, prereq): |
1047 |
faulty = [] |
1048 |
|
1049 |
result = rpc_runner.call_blockdev_getmirrorstatus( |
1050 |
node_uuid, (instance.disks, instance)) |
1051 |
result.Raise("Failed to get disk status from node %s" %
|
1052 |
cfg.GetNodeName(node_uuid), |
1053 |
prereq=prereq, ecode=errors.ECODE_ENVIRON) |
1054 |
|
1055 |
for idx, bdev_status in enumerate(result.payload): |
1056 |
if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY: |
1057 |
faulty.append(idx) |
1058 |
|
1059 |
return faulty
|
1060 |
|
1061 |
|
1062 |
def CheckNodeOnline(lu, node_uuid, msg=None): |
1063 |
"""Ensure that a given node is online.
|
1064 |
|
1065 |
@param lu: the LU on behalf of which we make the check
|
1066 |
@param node_uuid: the node to check
|
1067 |
@param msg: if passed, should be a message to replace the default one
|
1068 |
@raise errors.OpPrereqError: if the node is offline
|
1069 |
|
1070 |
"""
|
1071 |
if msg is None: |
1072 |
msg = "Can't use offline node"
|
1073 |
if lu.cfg.GetNodeInfo(node_uuid).offline:
|
1074 |
raise errors.OpPrereqError("%s: %s" % (msg, lu.cfg.GetNodeName(node_uuid)), |
1075 |
errors.ECODE_STATE) |
1076 |
|
1077 |
|
1078 |
def CheckDiskTemplateEnabled(cluster, disk_template): |
1079 |
"""Helper function to check if a disk template is enabled.
|
1080 |
|
1081 |
@type cluster: C{objects.Cluster}
|
1082 |
@param cluster: the cluster's configuration
|
1083 |
@type disk_template: str
|
1084 |
@param disk_template: the disk template to be checked
|
1085 |
|
1086 |
"""
|
1087 |
assert disk_template is not None |
1088 |
if disk_template not in constants.DISK_TEMPLATES: |
1089 |
raise errors.OpPrereqError("'%s' is not a valid disk template." |
1090 |
" Valid disk templates are: %s" %
|
1091 |
(disk_template, |
1092 |
",".join(constants.DISK_TEMPLATES)))
|
1093 |
if not disk_template in cluster.enabled_disk_templates: |
1094 |
raise errors.OpPrereqError("Disk template '%s' is not enabled in cluster." |
1095 |
" Enabled disk templates are: %s" %
|
1096 |
(disk_template, |
1097 |
",".join(cluster.enabled_disk_templates)))
|
1098 |
|
1099 |
|
1100 |
def CheckStorageTypeEnabled(cluster, storage_type): |
1101 |
"""Helper function to check if a storage type is enabled.
|
1102 |
|
1103 |
@type cluster: C{objects.Cluster}
|
1104 |
@param cluster: the cluster's configuration
|
1105 |
@type storage_type: str
|
1106 |
@param storage_type: the storage type to be checked
|
1107 |
|
1108 |
"""
|
1109 |
assert storage_type is not None |
1110 |
assert storage_type in constants.STORAGE_TYPES |
1111 |
# special case for lvm-pv, because it cannot be enabled
|
1112 |
# via disk templates
|
1113 |
if storage_type == constants.ST_LVM_PV:
|
1114 |
CheckStorageTypeEnabled(cluster, constants.ST_LVM_VG) |
1115 |
else:
|
1116 |
possible_disk_templates = \ |
1117 |
utils.storage.GetDiskTemplatesOfStorageTypes(storage_type) |
1118 |
for disk_template in possible_disk_templates: |
1119 |
if disk_template in cluster.enabled_disk_templates: |
1120 |
return
|
1121 |
raise errors.OpPrereqError("No disk template of storage type '%s' is" |
1122 |
" enabled in this cluster. Enabled disk"
|
1123 |
" templates are: %s" % (storage_type,
|
1124 |
",".join(cluster.enabled_disk_templates)))
|
1125 |
|
1126 |
|
1127 |
def CheckIpolicyVsDiskTemplates(ipolicy, enabled_disk_templates): |
1128 |
"""Checks ipolicy disk templates against enabled disk tempaltes.
|
1129 |
|
1130 |
@type ipolicy: dict
|
1131 |
@param ipolicy: the new ipolicy
|
1132 |
@type enabled_disk_templates: list of string
|
1133 |
@param enabled_disk_templates: list of enabled disk templates on the
|
1134 |
cluster
|
1135 |
@raises errors.OpPrereqError: if there is at least one allowed disk
|
1136 |
template that is not also enabled.
|
1137 |
|
1138 |
"""
|
1139 |
assert constants.IPOLICY_DTS in ipolicy |
1140 |
allowed_disk_templates = ipolicy[constants.IPOLICY_DTS] |
1141 |
not_enabled = set(allowed_disk_templates) - set(enabled_disk_templates) |
1142 |
if not_enabled:
|
1143 |
raise errors.OpPrereqError("The following disk template are allowed" |
1144 |
" by the ipolicy, but not enabled on the"
|
1145 |
" cluster: %s" % utils.CommaJoin(not_enabled))
|
1146 |
|
1147 |
|
1148 |
def CheckDiskAccessModeValidity(parameters): |
1149 |
"""Checks if the access parameter is legal.
|
1150 |
|
1151 |
@see: L{CheckDiskAccessModeConsistency} for cluster consistency checks.
|
1152 |
@raise errors.OpPrereqError: if the check fails.
|
1153 |
|
1154 |
"""
|
1155 |
for disk_template in parameters: |
1156 |
access = parameters[disk_template].get(constants.LDP_ACCESS, |
1157 |
constants.DISK_KERNELSPACE) |
1158 |
if access not in constants.DISK_VALID_ACCESS_MODES: |
1159 |
valid_vals_str = utils.CommaJoin(constants.DISK_VALID_ACCESS_MODES) |
1160 |
raise errors.OpPrereqError("Invalid value of '{d}:{a}': '{v}' (expected" |
1161 |
" one of {o})".format(d=disk_template,
|
1162 |
a=constants.LDP_ACCESS, |
1163 |
v=access, |
1164 |
o=valid_vals_str)) |
1165 |
|
1166 |
|
1167 |
def CheckDiskAccessModeConsistency(parameters, cfg, group=None): |
1168 |
"""Checks if the access param is consistent with the cluster configuration.
|
1169 |
|
1170 |
@note: requires a configuration lock to run.
|
1171 |
@param parameters: the parameters to validate
|
1172 |
@param cfg: the cfg object of the cluster
|
1173 |
@param group: if set, only check for consistency within this group.
|
1174 |
@raise errors.OpPrereqError: if the LU attempts to change the access parameter
|
1175 |
to an invalid value, such as "pink bunny".
|
1176 |
@raise errors.OpPrereqError: if the LU attempts to change the access parameter
|
1177 |
to an inconsistent value, such as asking for RBD
|
1178 |
userspace access to the chroot hypervisor.
|
1179 |
|
1180 |
"""
|
1181 |
CheckDiskAccessModeValidity(parameters) |
1182 |
|
1183 |
for disk_template in parameters: |
1184 |
access = parameters[disk_template].get(constants.LDP_ACCESS, |
1185 |
constants.DISK_KERNELSPACE) |
1186 |
|
1187 |
if disk_template not in constants.DTS_HAVE_ACCESS: |
1188 |
continue
|
1189 |
|
1190 |
#Check the combination of instance hypervisor, disk template and access
|
1191 |
#protocol is sane.
|
1192 |
inst_uuids = cfg.GetNodeGroupInstances(group) if group else \ |
1193 |
cfg.GetInstanceList() |
1194 |
|
1195 |
for entry in inst_uuids: |
1196 |
inst = cfg.GetInstanceInfo(entry) |
1197 |
hv = inst.hypervisor |
1198 |
dt = inst.disk_template |
1199 |
|
1200 |
if not IsValidDiskAccessModeCombination(hv, dt, access): |
1201 |
raise errors.OpPrereqError("Instance {i}: cannot use '{a}' access" |
1202 |
" setting with {h} hypervisor and {d} disk"
|
1203 |
" type.".format(i=inst.name,
|
1204 |
a=access, |
1205 |
h=hv, |
1206 |
d=dt)) |
1207 |
|
1208 |
|
1209 |
def IsValidDiskAccessModeCombination(hv, disk_template, mode): |
1210 |
"""Checks if an hypervisor can read a disk template with given mode.
|
1211 |
|
1212 |
@param hv: the hypervisor that will access the data
|
1213 |
@param disk_template: the disk template the data is stored as
|
1214 |
@param mode: how the hypervisor should access the data
|
1215 |
@return: True if the hypervisor can read a given read disk_template
|
1216 |
in the specified mode.
|
1217 |
|
1218 |
"""
|
1219 |
if mode == constants.DISK_KERNELSPACE:
|
1220 |
return True |
1221 |
|
1222 |
if (hv == constants.HT_KVM and |
1223 |
disk_template in (constants.DT_RBD, constants.DT_GLUSTER) and |
1224 |
mode == constants.DISK_USERSPACE): |
1225 |
return True |
1226 |
|
1227 |
# Everything else:
|
1228 |
return False |
1229 |
|
1230 |
|
1231 |
def AddNodeCertToCandidateCerts(lu, node_uuid, cluster): |
1232 |
"""Add the node's client SSL certificate digest to the candidate certs.
|
1233 |
|
1234 |
@type node_uuid: string
|
1235 |
@param node_uuid: the node's UUID
|
1236 |
@type cluster: C{object.Cluster}
|
1237 |
@param cluster: the cluster's configuration
|
1238 |
|
1239 |
"""
|
1240 |
result = lu.rpc.call_node_crypto_tokens( |
1241 |
node_uuid, |
1242 |
[(constants.CRYPTO_TYPE_SSL_DIGEST, constants.CRYPTO_ACTION_GET, |
1243 |
None)])
|
1244 |
result.Raise("Could not retrieve the node's (uuid %s) SSL digest."
|
1245 |
% node_uuid) |
1246 |
((crypto_type, digest), ) = result.payload |
1247 |
assert crypto_type == constants.CRYPTO_TYPE_SSL_DIGEST
|
1248 |
|
1249 |
utils.AddNodeToCandidateCerts(node_uuid, digest, cluster.candidate_certs) |
1250 |
|
1251 |
|
1252 |
def RemoveNodeCertFromCandidateCerts(node_uuid, cluster): |
1253 |
"""Removes the node's certificate from the candidate certificates list.
|
1254 |
|
1255 |
@type node_uuid: string
|
1256 |
@param node_uuid: the node's UUID
|
1257 |
@type cluster: C{objects.Cluster}
|
1258 |
@param cluster: the cluster's configuration
|
1259 |
|
1260 |
"""
|
1261 |
utils.RemoveNodeFromCandidateCerts(node_uuid, cluster.candidate_certs) |
1262 |
|
1263 |
|
1264 |
def CreateNewClientCert(lu, node_uuid, filename=None): |
1265 |
"""Creates a new client SSL certificate for the node.
|
1266 |
|
1267 |
@type node_uuid: string
|
1268 |
@param node_uuid: the node's UUID
|
1269 |
@type filename: string
|
1270 |
@param filename: the certificate's filename
|
1271 |
@rtype: string
|
1272 |
@return: the digest of the newly created certificate
|
1273 |
|
1274 |
"""
|
1275 |
options = {} |
1276 |
if filename:
|
1277 |
options[constants.CRYPTO_OPTION_CERT_FILE] = filename |
1278 |
result = lu.rpc.call_node_crypto_tokens( |
1279 |
node_uuid, |
1280 |
[(constants.CRYPTO_TYPE_SSL_DIGEST, |
1281 |
constants.CRYPTO_ACTION_CREATE, |
1282 |
options)]) |
1283 |
result.Raise("Could not create the node's (uuid %s) SSL client"
|
1284 |
" certificate." % node_uuid)
|
1285 |
((crypto_type, new_digest), ) = result.payload |
1286 |
assert crypto_type == constants.CRYPTO_TYPE_SSL_DIGEST
|
1287 |
return new_digest
|
1288 |
|
1289 |
|
1290 |
def AddInstanceCommunicationNetworkOp(network): |
1291 |
"""Create an OpCode that adds the instance communication network.
|
1292 |
|
1293 |
This OpCode contains the configuration necessary for the instance
|
1294 |
communication network.
|
1295 |
|
1296 |
@type network: string
|
1297 |
@param network: name or UUID of the instance communication network
|
1298 |
|
1299 |
@rtype: L{ganeti.opcodes.OpCode}
|
1300 |
@return: OpCode that creates the instance communication network
|
1301 |
|
1302 |
"""
|
1303 |
return opcodes.OpNetworkAdd(
|
1304 |
network_name=network, |
1305 |
gateway=None,
|
1306 |
network=constants.INSTANCE_COMMUNICATION_NETWORK4, |
1307 |
gateway6=None,
|
1308 |
network6=constants.INSTANCE_COMMUNICATION_NETWORK6, |
1309 |
mac_prefix=constants.INSTANCE_COMMUNICATION_MAC_PREFIX, |
1310 |
add_reserved_ips=None,
|
1311 |
conflicts_check=True,
|
1312 |
tags=[]) |
1313 |
|
1314 |
|
1315 |
def ConnectInstanceCommunicationNetworkOp(group_uuid, network): |
1316 |
"""Create an OpCode that connects a group to the instance
|
1317 |
communication network.
|
1318 |
|
1319 |
This OpCode contains the configuration necessary for the instance
|
1320 |
communication network.
|
1321 |
|
1322 |
@type group_uuid: string
|
1323 |
@param group_uuid: UUID of the group to connect
|
1324 |
|
1325 |
@type network: string
|
1326 |
@param network: name or UUID of the network to connect to, i.e., the
|
1327 |
instance communication network
|
1328 |
|
1329 |
@rtype: L{ganeti.opcodes.OpCode}
|
1330 |
@return: OpCode that connects the group to the instance
|
1331 |
communication network
|
1332 |
|
1333 |
"""
|
1334 |
return opcodes.OpNetworkConnect(
|
1335 |
group_name=group_uuid, |
1336 |
network_name=network, |
1337 |
network_mode=constants.NIC_MODE_ROUTED, |
1338 |
network_link=constants.INSTANCE_COMMUNICATION_NETWORK_LINK, |
1339 |
conflicts_check=True)
|