root / lib / cmdlib / instance_utils.py @ 809a055b
History | View | Annotate | Download (20.4 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Utility function mainly, but not only used by instance LU's."""
|
23 |
|
24 |
import logging |
25 |
import os |
26 |
|
27 |
from ganeti import constants |
28 |
from ganeti import errors |
29 |
from ganeti import locking |
30 |
from ganeti import network |
31 |
from ganeti import objects |
32 |
from ganeti import pathutils |
33 |
from ganeti import utils |
34 |
from ganeti.cmdlib.common import AnnotateDiskParams, \ |
35 |
ComputeIPolicyInstanceViolation, CheckDiskTemplateEnabled |
36 |
|
37 |
|
38 |
def BuildInstanceHookEnv(name, primary_node_name, secondary_node_names, os_type, |
39 |
status, minmem, maxmem, vcpus, nics, disk_template, |
40 |
disks, bep, hvp, hypervisor_name, tags): |
41 |
"""Builds instance related env variables for hooks
|
42 |
|
43 |
This builds the hook environment from individual variables.
|
44 |
|
45 |
@type name: string
|
46 |
@param name: the name of the instance
|
47 |
@type primary_node_name: string
|
48 |
@param primary_node_name: the name of the instance's primary node
|
49 |
@type secondary_node_names: list
|
50 |
@param secondary_node_names: list of secondary nodes as strings
|
51 |
@type os_type: string
|
52 |
@param os_type: the name of the instance's OS
|
53 |
@type status: string
|
54 |
@param status: the desired status of the instance
|
55 |
@type minmem: string
|
56 |
@param minmem: the minimum memory size of the instance
|
57 |
@type maxmem: string
|
58 |
@param maxmem: the maximum memory size of the instance
|
59 |
@type vcpus: string
|
60 |
@param vcpus: the count of VCPUs the instance has
|
61 |
@type nics: list
|
62 |
@param nics: list of tuples (name, uuid, ip, mac, mode, link, vlan, net,
|
63 |
netinfo) representing the NICs the instance has
|
64 |
@type disk_template: string
|
65 |
@param disk_template: the disk template of the instance
|
66 |
@type disks: list
|
67 |
@param disks: list of tuples (name, uuid, size, mode)
|
68 |
@type bep: dict
|
69 |
@param bep: the backend parameters for the instance
|
70 |
@type hvp: dict
|
71 |
@param hvp: the hypervisor parameters for the instance
|
72 |
@type hypervisor_name: string
|
73 |
@param hypervisor_name: the hypervisor for the instance
|
74 |
@type tags: list
|
75 |
@param tags: list of instance tags as strings
|
76 |
@rtype: dict
|
77 |
@return: the hook environment for this instance
|
78 |
|
79 |
"""
|
80 |
env = { |
81 |
"OP_TARGET": name,
|
82 |
"INSTANCE_NAME": name,
|
83 |
"INSTANCE_PRIMARY": primary_node_name,
|
84 |
"INSTANCE_SECONDARIES": " ".join(secondary_node_names), |
85 |
"INSTANCE_OS_TYPE": os_type,
|
86 |
"INSTANCE_STATUS": status,
|
87 |
"INSTANCE_MINMEM": minmem,
|
88 |
"INSTANCE_MAXMEM": maxmem,
|
89 |
# TODO(2.9) remove deprecated "memory" value
|
90 |
"INSTANCE_MEMORY": maxmem,
|
91 |
"INSTANCE_VCPUS": vcpus,
|
92 |
"INSTANCE_DISK_TEMPLATE": disk_template,
|
93 |
"INSTANCE_HYPERVISOR": hypervisor_name,
|
94 |
} |
95 |
if nics:
|
96 |
nic_count = len(nics)
|
97 |
for idx, (name, uuid, ip, mac, mode, link, vlan, net, netinfo) \
|
98 |
in enumerate(nics): |
99 |
if ip is None: |
100 |
ip = ""
|
101 |
if name:
|
102 |
env["INSTANCE_NIC%d_NAME" % idx] = name
|
103 |
env["INSTANCE_NIC%d_UUID" % idx] = uuid
|
104 |
env["INSTANCE_NIC%d_IP" % idx] = ip
|
105 |
env["INSTANCE_NIC%d_MAC" % idx] = mac
|
106 |
env["INSTANCE_NIC%d_MODE" % idx] = mode
|
107 |
env["INSTANCE_NIC%d_LINK" % idx] = link
|
108 |
env["INSTANCE_NIC%d_VLAN" % idx] = vlan
|
109 |
if netinfo:
|
110 |
nobj = objects.Network.FromDict(netinfo) |
111 |
env.update(nobj.HooksDict("INSTANCE_NIC%d_" % idx))
|
112 |
elif network:
|
113 |
# FIXME: broken network reference: the instance NIC specifies a
|
114 |
# network, but the relevant network entry was not in the config. This
|
115 |
# should be made impossible.
|
116 |
env["INSTANCE_NIC%d_NETWORK_NAME" % idx] = net
|
117 |
if mode == constants.NIC_MODE_BRIDGED or \ |
118 |
mode == constants.NIC_MODE_OVS: |
119 |
env["INSTANCE_NIC%d_BRIDGE" % idx] = link
|
120 |
else:
|
121 |
nic_count = 0
|
122 |
|
123 |
env["INSTANCE_NIC_COUNT"] = nic_count
|
124 |
|
125 |
if disks:
|
126 |
disk_count = len(disks)
|
127 |
for idx, (name, uuid, size, mode) in enumerate(disks): |
128 |
if name:
|
129 |
env["INSTANCE_DISK%d_NAME" % idx] = name
|
130 |
env["INSTANCE_DISK%d_UUID" % idx] = uuid
|
131 |
env["INSTANCE_DISK%d_SIZE" % idx] = size
|
132 |
env["INSTANCE_DISK%d_MODE" % idx] = mode
|
133 |
else:
|
134 |
disk_count = 0
|
135 |
|
136 |
env["INSTANCE_DISK_COUNT"] = disk_count
|
137 |
|
138 |
if not tags: |
139 |
tags = [] |
140 |
|
141 |
env["INSTANCE_TAGS"] = " ".join(tags) |
142 |
|
143 |
for source, kind in [(bep, "BE"), (hvp, "HV")]: |
144 |
for key, value in source.items(): |
145 |
env["INSTANCE_%s_%s" % (kind, key)] = value
|
146 |
|
147 |
return env
|
148 |
|
149 |
|
150 |
def BuildInstanceHookEnvByObject(lu, instance, secondary_nodes=None, |
151 |
disks=None, override=None): |
152 |
"""Builds instance related env variables for hooks from an object.
|
153 |
|
154 |
@type lu: L{LogicalUnit}
|
155 |
@param lu: the logical unit on whose behalf we execute
|
156 |
@type instance: L{objects.Instance}
|
157 |
@param instance: the instance for which we should build the
|
158 |
environment
|
159 |
@type override: dict
|
160 |
@param override: dictionary with key/values that will override
|
161 |
our values
|
162 |
@rtype: dict
|
163 |
@return: the hook environment dictionary
|
164 |
|
165 |
"""
|
166 |
cluster = lu.cfg.GetClusterInfo() |
167 |
bep = cluster.FillBE(instance) |
168 |
hvp = cluster.FillHV(instance) |
169 |
|
170 |
# Override secondary_nodes
|
171 |
if secondary_nodes is None: |
172 |
secondary_nodes = lu.cfg.GetInstanceSecondaryNodes(instance.uuid) |
173 |
|
174 |
# Override disks
|
175 |
if disks is None: |
176 |
disks = lu.cfg.GetInstanceDisks(instance.uuid) |
177 |
|
178 |
args = { |
179 |
"name": instance.name,
|
180 |
"primary_node_name": lu.cfg.GetNodeName(instance.primary_node),
|
181 |
"secondary_node_names": lu.cfg.GetNodeNames(secondary_nodes),
|
182 |
"os_type": instance.os,
|
183 |
"status": instance.admin_state,
|
184 |
"maxmem": bep[constants.BE_MAXMEM],
|
185 |
"minmem": bep[constants.BE_MINMEM],
|
186 |
"vcpus": bep[constants.BE_VCPUS],
|
187 |
"nics": NICListToTuple(lu, instance.nics),
|
188 |
"disk_template": instance.disk_template,
|
189 |
"disks": [(disk.name, disk.uuid, disk.size, disk.mode)
|
190 |
for disk in disks], |
191 |
"bep": bep,
|
192 |
"hvp": hvp,
|
193 |
"hypervisor_name": instance.hypervisor,
|
194 |
"tags": instance.tags,
|
195 |
} |
196 |
if override:
|
197 |
args.update(override) |
198 |
return BuildInstanceHookEnv(**args) # pylint: disable=W0142 |
199 |
|
200 |
|
201 |
def GetClusterDomainSecret(): |
202 |
"""Reads the cluster domain secret.
|
203 |
|
204 |
"""
|
205 |
return utils.ReadOneLineFile(pathutils.CLUSTER_DOMAIN_SECRET_FILE,
|
206 |
strict=True)
|
207 |
|
208 |
|
209 |
def CheckNodeNotDrained(lu, node_uuid): |
210 |
"""Ensure that a given node is not drained.
|
211 |
|
212 |
@param lu: the LU on behalf of which we make the check
|
213 |
@param node_uuid: the node to check
|
214 |
@raise errors.OpPrereqError: if the node is drained
|
215 |
|
216 |
"""
|
217 |
node = lu.cfg.GetNodeInfo(node_uuid) |
218 |
if node.drained:
|
219 |
raise errors.OpPrereqError("Can't use drained node %s" % node.name, |
220 |
errors.ECODE_STATE) |
221 |
|
222 |
|
223 |
def CheckNodeVmCapable(lu, node_uuid): |
224 |
"""Ensure that a given node is vm capable.
|
225 |
|
226 |
@param lu: the LU on behalf of which we make the check
|
227 |
@param node_uuid: the node to check
|
228 |
@raise errors.OpPrereqError: if the node is not vm capable
|
229 |
|
230 |
"""
|
231 |
if not lu.cfg.GetNodeInfo(node_uuid).vm_capable: |
232 |
raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node_uuid, |
233 |
errors.ECODE_STATE) |
234 |
|
235 |
|
236 |
def RemoveInstance(lu, feedback_fn, instance, ignore_failures): |
237 |
"""Utility function to remove an instance.
|
238 |
|
239 |
"""
|
240 |
logging.info("Removing block devices for instance %s", instance.name)
|
241 |
|
242 |
if not RemoveDisks(lu, instance, ignore_failures=ignore_failures): |
243 |
if not ignore_failures: |
244 |
raise errors.OpExecError("Can't remove instance's disks") |
245 |
feedback_fn("Warning: can't remove instance's disks")
|
246 |
|
247 |
logging.info("Removing instance's disks")
|
248 |
for disk in instance.disks: |
249 |
lu.cfg.RemoveInstanceDisk(instance.uuid, disk) |
250 |
|
251 |
logging.info("Removing instance %s out of cluster config", instance.name)
|
252 |
lu.cfg.RemoveInstance(instance.uuid) |
253 |
|
254 |
|
255 |
def RemoveDisks(lu, instance, target_node_uuid=None, ignore_failures=False): |
256 |
"""Remove all disks for an instance.
|
257 |
|
258 |
This abstracts away some work from `AddInstance()` and
|
259 |
`RemoveInstance()`. Note that in case some of the devices couldn't
|
260 |
be removed, the removal will continue with the other ones.
|
261 |
|
262 |
@type lu: L{LogicalUnit}
|
263 |
@param lu: the logical unit on whose behalf we execute
|
264 |
@type instance: L{objects.Instance}
|
265 |
@param instance: the instance whose disks we should remove
|
266 |
@type target_node_uuid: string
|
267 |
@param target_node_uuid: used to override the node on which to remove the
|
268 |
disks
|
269 |
@rtype: boolean
|
270 |
@return: the success of the removal
|
271 |
|
272 |
"""
|
273 |
logging.info("Removing block devices for instance %s", instance.name)
|
274 |
|
275 |
all_result = True
|
276 |
ports_to_release = set()
|
277 |
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid) |
278 |
anno_disks = AnnotateDiskParams(instance, inst_disks, lu.cfg) |
279 |
for (idx, device) in enumerate(anno_disks): |
280 |
if target_node_uuid:
|
281 |
edata = [(target_node_uuid, device)] |
282 |
else:
|
283 |
edata = device.ComputeNodeTree(instance.primary_node) |
284 |
for node_uuid, disk in edata: |
285 |
result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance)) |
286 |
if result.fail_msg:
|
287 |
lu.LogWarning("Could not remove disk %s on node %s,"
|
288 |
" continuing anyway: %s", idx,
|
289 |
lu.cfg.GetNodeName(node_uuid), result.fail_msg) |
290 |
if not (result.offline and node_uuid != instance.primary_node): |
291 |
all_result = False
|
292 |
|
293 |
# if this is a DRBD disk, return its port to the pool
|
294 |
if device.dev_type in constants.DTS_DRBD: |
295 |
ports_to_release.add(device.logical_id[2])
|
296 |
|
297 |
if all_result or ignore_failures: |
298 |
for port in ports_to_release: |
299 |
lu.cfg.AddTcpUdpPort(port) |
300 |
|
301 |
CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template) |
302 |
|
303 |
if instance.disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]: |
304 |
if len(inst_disks) > 0: |
305 |
file_storage_dir = os.path.dirname(inst_disks[0].logical_id[1]) |
306 |
else:
|
307 |
if instance.disk_template == constants.DT_SHARED_FILE:
|
308 |
file_storage_dir = utils.PathJoin(lu.cfg.GetSharedFileStorageDir(), |
309 |
instance.name) |
310 |
else:
|
311 |
file_storage_dir = utils.PathJoin(lu.cfg.GetFileStorageDir(), |
312 |
instance.name) |
313 |
if target_node_uuid:
|
314 |
tgt = target_node_uuid |
315 |
else:
|
316 |
tgt = instance.primary_node |
317 |
result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir) |
318 |
if result.fail_msg:
|
319 |
lu.LogWarning("Could not remove directory '%s' on node %s: %s",
|
320 |
file_storage_dir, lu.cfg.GetNodeName(tgt), result.fail_msg) |
321 |
all_result = False
|
322 |
|
323 |
return all_result
|
324 |
|
325 |
|
326 |
def NICToTuple(lu, nic): |
327 |
"""Build a tupple of nic information.
|
328 |
|
329 |
@type lu: L{LogicalUnit}
|
330 |
@param lu: the logical unit on whose behalf we execute
|
331 |
@type nic: L{objects.NIC}
|
332 |
@param nic: nic to convert to hooks tuple
|
333 |
|
334 |
"""
|
335 |
cluster = lu.cfg.GetClusterInfo() |
336 |
filled_params = cluster.SimpleFillNIC(nic.nicparams) |
337 |
mode = filled_params[constants.NIC_MODE] |
338 |
link = filled_params[constants.NIC_LINK] |
339 |
vlan = filled_params[constants.NIC_VLAN] |
340 |
netinfo = None
|
341 |
if nic.network:
|
342 |
nobj = lu.cfg.GetNetwork(nic.network) |
343 |
netinfo = objects.Network.ToDict(nobj) |
344 |
return (nic.name, nic.uuid, nic.ip, nic.mac, mode, link, vlan,
|
345 |
nic.network, netinfo) |
346 |
|
347 |
|
348 |
def NICListToTuple(lu, nics): |
349 |
"""Build a list of nic information tuples.
|
350 |
|
351 |
This list is suitable to be passed to _BuildInstanceHookEnv or as a return
|
352 |
value in LUInstanceQueryData.
|
353 |
|
354 |
@type lu: L{LogicalUnit}
|
355 |
@param lu: the logical unit on whose behalf we execute
|
356 |
@type nics: list of L{objects.NIC}
|
357 |
@param nics: list of nics to convert to hooks tuples
|
358 |
|
359 |
"""
|
360 |
hooks_nics = [] |
361 |
for nic in nics: |
362 |
hooks_nics.append(NICToTuple(lu, nic)) |
363 |
return hooks_nics
|
364 |
|
365 |
|
366 |
def CopyLockList(names): |
367 |
"""Makes a copy of a list of lock names.
|
368 |
|
369 |
Handles L{locking.ALL_SET} correctly.
|
370 |
|
371 |
"""
|
372 |
if names == locking.ALL_SET:
|
373 |
return locking.ALL_SET
|
374 |
else:
|
375 |
return names[:]
|
376 |
|
377 |
|
378 |
def ReleaseLocks(lu, level, names=None, keep=None): |
379 |
"""Releases locks owned by an LU.
|
380 |
|
381 |
@type lu: L{LogicalUnit}
|
382 |
@param level: Lock level
|
383 |
@type names: list or None
|
384 |
@param names: Names of locks to release
|
385 |
@type keep: list or None
|
386 |
@param keep: Names of locks to retain
|
387 |
|
388 |
"""
|
389 |
logging.debug("Lu %s ReleaseLocks %s names=%s, keep=%s",
|
390 |
lu.wconfdcontext, level, names, keep) |
391 |
assert not (keep is not None and names is not None), \ |
392 |
"Only one of the 'names' and the 'keep' parameters can be given"
|
393 |
|
394 |
if names is not None: |
395 |
should_release = names.__contains__ |
396 |
elif keep:
|
397 |
should_release = lambda name: name not in keep |
398 |
else:
|
399 |
should_release = None
|
400 |
|
401 |
levelname = locking.LEVEL_NAMES[level] |
402 |
|
403 |
owned = lu.owned_locks(level) |
404 |
if not owned: |
405 |
# Not owning any lock at this level, do nothing
|
406 |
pass
|
407 |
|
408 |
elif should_release:
|
409 |
retain = [] |
410 |
release = [] |
411 |
|
412 |
# Determine which locks to release
|
413 |
for name in owned: |
414 |
if should_release(name):
|
415 |
release.append(name) |
416 |
else:
|
417 |
retain.append(name) |
418 |
|
419 |
assert len(lu.owned_locks(level)) == (len(retain) + len(release)) |
420 |
|
421 |
# Release just some locks
|
422 |
lu.WConfdClient().TryUpdateLocks( |
423 |
lu.release_request(level, release)) |
424 |
assert frozenset(lu.owned_locks(level)) == frozenset(retain) |
425 |
else:
|
426 |
lu.WConfdClient().FreeLocksLevel(levelname) |
427 |
|
428 |
|
429 |
def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group, |
430 |
target_group, cfg, |
431 |
_compute_fn=ComputeIPolicyInstanceViolation): |
432 |
"""Compute if instance meets the specs of the new target group.
|
433 |
|
434 |
@param ipolicy: The ipolicy to verify
|
435 |
@param instance: The instance object to verify
|
436 |
@param current_group: The current group of the instance
|
437 |
@param target_group: The new group of the instance
|
438 |
@type cfg: L{config.ConfigWriter}
|
439 |
@param cfg: Cluster configuration
|
440 |
@param _compute_fn: The function to verify ipolicy (unittest only)
|
441 |
@see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
|
442 |
|
443 |
"""
|
444 |
if current_group == target_group:
|
445 |
return []
|
446 |
else:
|
447 |
return _compute_fn(ipolicy, instance, cfg)
|
448 |
|
449 |
|
450 |
def CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False, |
451 |
_compute_fn=_ComputeIPolicyNodeViolation): |
452 |
"""Checks that the target node is correct in terms of instance policy.
|
453 |
|
454 |
@param ipolicy: The ipolicy to verify
|
455 |
@param instance: The instance object to verify
|
456 |
@param node: The new node to relocate
|
457 |
@type cfg: L{config.ConfigWriter}
|
458 |
@param cfg: Cluster configuration
|
459 |
@param ignore: Ignore violations of the ipolicy
|
460 |
@param _compute_fn: The function to verify ipolicy (unittest only)
|
461 |
@see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
|
462 |
|
463 |
"""
|
464 |
primary_node = lu.cfg.GetNodeInfo(instance.primary_node) |
465 |
res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg) |
466 |
|
467 |
if res:
|
468 |
msg = ("Instance does not meet target node group's (%s) instance"
|
469 |
" policy: %s") % (node.group, utils.CommaJoin(res))
|
470 |
if ignore:
|
471 |
lu.LogWarning(msg) |
472 |
else:
|
473 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
|
474 |
|
475 |
|
476 |
def GetInstanceInfoText(instance): |
477 |
"""Compute that text that should be added to the disk's metadata.
|
478 |
|
479 |
"""
|
480 |
return "originstname+%s" % instance.name |
481 |
|
482 |
|
483 |
def CheckNodeFreeMemory(lu, node_uuid, reason, requested, hvname, hvparams): |
484 |
"""Checks if a node has enough free memory.
|
485 |
|
486 |
This function checks if a given node has the needed amount of free
|
487 |
memory. In case the node has less memory or we cannot get the
|
488 |
information from the node, this function raises an OpPrereqError
|
489 |
exception.
|
490 |
|
491 |
@type lu: C{LogicalUnit}
|
492 |
@param lu: a logical unit from which we get configuration data
|
493 |
@type node_uuid: C{str}
|
494 |
@param node_uuid: the node to check
|
495 |
@type reason: C{str}
|
496 |
@param reason: string to use in the error message
|
497 |
@type requested: C{int}
|
498 |
@param requested: the amount of memory in MiB to check for
|
499 |
@type hvname: string
|
500 |
@param hvname: the hypervisor's name
|
501 |
@type hvparams: dict of strings
|
502 |
@param hvparams: the hypervisor's parameters
|
503 |
@rtype: integer
|
504 |
@return: node current free memory
|
505 |
@raise errors.OpPrereqError: if the node doesn't have enough memory, or
|
506 |
we cannot check the node
|
507 |
|
508 |
"""
|
509 |
node_name = lu.cfg.GetNodeName(node_uuid) |
510 |
nodeinfo = lu.rpc.call_node_info([node_uuid], None, [(hvname, hvparams)])
|
511 |
nodeinfo[node_uuid].Raise("Can't get data from node %s" % node_name,
|
512 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
513 |
(_, _, (hv_info, )) = nodeinfo[node_uuid].payload |
514 |
|
515 |
free_mem = hv_info.get("memory_free", None) |
516 |
if not isinstance(free_mem, int): |
517 |
raise errors.OpPrereqError("Can't compute free memory on node %s, result" |
518 |
" was '%s'" % (node_name, free_mem),
|
519 |
errors.ECODE_ENVIRON) |
520 |
if requested > free_mem:
|
521 |
raise errors.OpPrereqError("Not enough memory on node %s for %s:" |
522 |
" needed %s MiB, available %s MiB" %
|
523 |
(node_name, reason, requested, free_mem), |
524 |
errors.ECODE_NORES) |
525 |
return free_mem
|
526 |
|
527 |
|
528 |
def CheckInstanceBridgesExist(lu, instance, node_uuid=None): |
529 |
"""Check that the brigdes needed by an instance exist.
|
530 |
|
531 |
"""
|
532 |
if node_uuid is None: |
533 |
node_uuid = instance.primary_node |
534 |
CheckNicsBridgesExist(lu, instance.nics, node_uuid) |
535 |
|
536 |
|
537 |
def CheckNicsBridgesExist(lu, nics, node_uuid): |
538 |
"""Check that the brigdes needed by a list of nics exist.
|
539 |
|
540 |
"""
|
541 |
cluster = lu.cfg.GetClusterInfo() |
542 |
paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in nics] |
543 |
brlist = [params[constants.NIC_LINK] for params in paramslist |
544 |
if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
|
545 |
if brlist:
|
546 |
result = lu.rpc.call_bridges_exist(node_uuid, brlist) |
547 |
result.Raise("Error checking bridges on destination node '%s'" %
|
548 |
lu.cfg.GetNodeName(node_uuid), prereq=True,
|
549 |
ecode=errors.ECODE_ENVIRON) |
550 |
|
551 |
|
552 |
def UpdateMetadata(feedback_fn, rpc, instance, |
553 |
osparams_public=None,
|
554 |
osparams_private=None,
|
555 |
osparams_secret=None):
|
556 |
"""Updates instance metadata on the metadata daemon on the
|
557 |
instance's primary node.
|
558 |
|
559 |
In case the RPC fails, this function simply issues a warning and
|
560 |
proceeds normally.
|
561 |
|
562 |
@type feedback_fn: callable
|
563 |
@param feedback_fn: function used send feedback back to the caller
|
564 |
|
565 |
@type rpc: L{rpc.node.RpcRunner}
|
566 |
@param rpc: RPC runner
|
567 |
|
568 |
@type instance: L{objects.Instance}
|
569 |
@param instance: instance for which the metadata should be updated
|
570 |
|
571 |
@type osparams_public: NoneType or dict
|
572 |
@param osparams_public: public OS parameters used to override those
|
573 |
defined in L{instance}
|
574 |
|
575 |
@type osparams_private: NoneType or dict
|
576 |
@param osparams_private: private OS parameters used to override those
|
577 |
defined in L{instance}
|
578 |
|
579 |
@type osparams_secret: NoneType or dict
|
580 |
@param osparams_secret: secret OS parameters used to override those
|
581 |
defined in L{instance}
|
582 |
|
583 |
@rtype: NoneType
|
584 |
@return: None
|
585 |
|
586 |
"""
|
587 |
data = instance.ToDict() |
588 |
|
589 |
if osparams_public is not None: |
590 |
data["osparams_public"] = osparams_public
|
591 |
|
592 |
if osparams_private is not None: |
593 |
data["osparams_private"] = osparams_private
|
594 |
|
595 |
if osparams_secret is not None: |
596 |
data["osparams_secret"] = osparams_secret
|
597 |
else:
|
598 |
data["osparams_secret"] = {}
|
599 |
|
600 |
result = rpc.call_instance_metadata_modify(instance.primary_node, data) |
601 |
result.Warn("Could not update metadata for instance '%s'" % instance.name,
|
602 |
feedback_fn) |
603 |
|
604 |
|
605 |
def CheckCompressionTool(lu, compression_tool): |
606 |
""" Checks if the provided compression tool is allowed to be used.
|
607 |
|
608 |
@type compression_tool: string
|
609 |
@param compression_tool: Compression tool to use for importing or exporting
|
610 |
the instance
|
611 |
|
612 |
@rtype: NoneType
|
613 |
@return: None
|
614 |
|
615 |
@raise errors.OpPrereqError: If the tool is not enabled by Ganeti or
|
616 |
whitelisted
|
617 |
|
618 |
"""
|
619 |
allowed_tools = lu.cfg.GetCompressionTools() |
620 |
if (compression_tool != constants.IEC_NONE and |
621 |
compression_tool not in allowed_tools): |
622 |
raise errors.OpPrereqError(
|
623 |
"Compression tool not allowed, tools allowed are [%s]"
|
624 |
% ", ".join(allowed_tools)
|
625 |
) |