root / lib / cmdlib / instance.py @ 64981f25
History | View | Annotate | Download (145.7 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Logical units dealing with instances."""
|
23 |
|
24 |
import OpenSSL |
25 |
import copy |
26 |
import logging |
27 |
import os |
28 |
|
29 |
from ganeti import compat |
30 |
from ganeti import constants |
31 |
from ganeti import errors |
32 |
from ganeti import ht |
33 |
from ganeti import hypervisor |
34 |
from ganeti import locking |
35 |
from ganeti.masterd import iallocator |
36 |
from ganeti import masterd |
37 |
from ganeti import netutils |
38 |
from ganeti import objects |
39 |
from ganeti import pathutils |
40 |
from ganeti import rpc |
41 |
from ganeti import utils |
42 |
|
43 |
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs |
44 |
|
45 |
from ganeti.cmdlib.common import INSTANCE_DOWN, \ |
46 |
INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \ |
47 |
ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \ |
48 |
LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \ |
49 |
IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \ |
50 |
AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \ |
51 |
ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \ |
52 |
CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination |
53 |
from ganeti.cmdlib.instance_storage import CreateDisks, \ |
54 |
CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \ |
55 |
IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \ |
56 |
CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \ |
57 |
StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \ |
58 |
CheckSpindlesExclusiveStorage
|
59 |
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \ |
60 |
GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \ |
61 |
NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \ |
62 |
ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \ |
63 |
GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \ |
64 |
CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS |
65 |
|
66 |
import ganeti.masterd.instance |
67 |
|
68 |
|
69 |
#: Type description for changes as returned by L{_ApplyContainerMods}'s
|
70 |
#: callbacks
|
71 |
_TApplyContModsCbChanges = \ |
72 |
ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
|
73 |
ht.TNonEmptyString, |
74 |
ht.TAny, |
75 |
]))) |
76 |
|
77 |
|
78 |
def _CheckHostnameSane(lu, name): |
79 |
"""Ensures that a given hostname resolves to a 'sane' name.
|
80 |
|
81 |
The given name is required to be a prefix of the resolved hostname,
|
82 |
to prevent accidental mismatches.
|
83 |
|
84 |
@param lu: the logical unit on behalf of which we're checking
|
85 |
@param name: the name we should resolve and check
|
86 |
@return: the resolved hostname object
|
87 |
|
88 |
"""
|
89 |
hostname = netutils.GetHostname(name=name) |
90 |
if hostname.name != name:
|
91 |
lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
|
92 |
if not utils.MatchNameComponent(name, [hostname.name]): |
93 |
raise errors.OpPrereqError(("Resolved hostname '%s' does not look the" |
94 |
" same as given hostname '%s'") %
|
95 |
(hostname.name, name), errors.ECODE_INVAL) |
96 |
return hostname
|
97 |
|
98 |
|
99 |
def _CheckOpportunisticLocking(op): |
100 |
"""Generate error if opportunistic locking is not possible.
|
101 |
|
102 |
"""
|
103 |
if op.opportunistic_locking and not op.iallocator: |
104 |
raise errors.OpPrereqError("Opportunistic locking is only available in" |
105 |
" combination with an instance allocator",
|
106 |
errors.ECODE_INVAL) |
107 |
|
108 |
|
109 |
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist): |
110 |
"""Wrapper around IAReqInstanceAlloc.
|
111 |
|
112 |
@param op: The instance opcode
|
113 |
@param disks: The computed disks
|
114 |
@param nics: The computed nics
|
115 |
@param beparams: The full filled beparams
|
116 |
@param node_name_whitelist: List of nodes which should appear as online to the
|
117 |
allocator (unless the node is already marked offline)
|
118 |
|
119 |
@returns: A filled L{iallocator.IAReqInstanceAlloc}
|
120 |
|
121 |
"""
|
122 |
spindle_use = beparams[constants.BE_SPINDLE_USE] |
123 |
return iallocator.IAReqInstanceAlloc(name=op.instance_name,
|
124 |
disk_template=op.disk_template, |
125 |
tags=op.tags, |
126 |
os=op.os_type, |
127 |
vcpus=beparams[constants.BE_VCPUS], |
128 |
memory=beparams[constants.BE_MAXMEM], |
129 |
spindle_use=spindle_use, |
130 |
disks=disks, |
131 |
nics=[n.ToDict() for n in nics], |
132 |
hypervisor=op.hypervisor, |
133 |
node_whitelist=node_name_whitelist) |
134 |
|
135 |
|
136 |
def _ComputeFullBeParams(op, cluster): |
137 |
"""Computes the full beparams.
|
138 |
|
139 |
@param op: The instance opcode
|
140 |
@param cluster: The cluster config object
|
141 |
|
142 |
@return: The fully filled beparams
|
143 |
|
144 |
"""
|
145 |
default_beparams = cluster.beparams[constants.PP_DEFAULT] |
146 |
for param, value in op.beparams.iteritems(): |
147 |
if value == constants.VALUE_AUTO:
|
148 |
op.beparams[param] = default_beparams[param] |
149 |
objects.UpgradeBeParams(op.beparams) |
150 |
utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES) |
151 |
return cluster.SimpleFillBE(op.beparams)
|
152 |
|
153 |
|
154 |
def _ComputeNics(op, cluster, default_ip, cfg, ec_id): |
155 |
"""Computes the nics.
|
156 |
|
157 |
@param op: The instance opcode
|
158 |
@param cluster: Cluster configuration object
|
159 |
@param default_ip: The default ip to assign
|
160 |
@param cfg: An instance of the configuration object
|
161 |
@param ec_id: Execution context ID
|
162 |
|
163 |
@returns: The build up nics
|
164 |
|
165 |
"""
|
166 |
nics = [] |
167 |
for nic in op.nics: |
168 |
nic_mode_req = nic.get(constants.INIC_MODE, None)
|
169 |
nic_mode = nic_mode_req |
170 |
if nic_mode is None or nic_mode == constants.VALUE_AUTO: |
171 |
nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE] |
172 |
|
173 |
net = nic.get(constants.INIC_NETWORK, None)
|
174 |
link = nic.get(constants.NIC_LINK, None)
|
175 |
ip = nic.get(constants.INIC_IP, None)
|
176 |
vlan = nic.get(constants.INIC_VLAN, None)
|
177 |
|
178 |
if net is None or net.lower() == constants.VALUE_NONE: |
179 |
net = None
|
180 |
else:
|
181 |
if nic_mode_req is not None or link is not None: |
182 |
raise errors.OpPrereqError("If network is given, no mode or link" |
183 |
" is allowed to be passed",
|
184 |
errors.ECODE_INVAL) |
185 |
|
186 |
# ip validity checks
|
187 |
if ip is None or ip.lower() == constants.VALUE_NONE: |
188 |
nic_ip = None
|
189 |
elif ip.lower() == constants.VALUE_AUTO:
|
190 |
if not op.name_check: |
191 |
raise errors.OpPrereqError("IP address set to auto but name checks" |
192 |
" have been skipped",
|
193 |
errors.ECODE_INVAL) |
194 |
nic_ip = default_ip |
195 |
else:
|
196 |
# We defer pool operations until later, so that the iallocator has
|
197 |
# filled in the instance's node(s) dimara
|
198 |
if ip.lower() == constants.NIC_IP_POOL:
|
199 |
if net is None: |
200 |
raise errors.OpPrereqError("if ip=pool, parameter network" |
201 |
" must be passed too",
|
202 |
errors.ECODE_INVAL) |
203 |
|
204 |
elif not netutils.IPAddress.IsValid(ip): |
205 |
raise errors.OpPrereqError("Invalid IP address '%s'" % ip, |
206 |
errors.ECODE_INVAL) |
207 |
|
208 |
nic_ip = ip |
209 |
|
210 |
# TODO: check the ip address for uniqueness
|
211 |
if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip: |
212 |
raise errors.OpPrereqError("Routed nic mode requires an ip address", |
213 |
errors.ECODE_INVAL) |
214 |
|
215 |
# MAC address verification
|
216 |
mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO) |
217 |
if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): |
218 |
mac = utils.NormalizeAndValidateMac(mac) |
219 |
|
220 |
try:
|
221 |
# TODO: We need to factor this out
|
222 |
cfg.ReserveMAC(mac, ec_id) |
223 |
except errors.ReservationError:
|
224 |
raise errors.OpPrereqError("MAC address %s already in use" |
225 |
" in cluster" % mac,
|
226 |
errors.ECODE_NOTUNIQUE) |
227 |
|
228 |
# Build nic parameters
|
229 |
nicparams = {} |
230 |
if nic_mode_req:
|
231 |
nicparams[constants.NIC_MODE] = nic_mode |
232 |
if link:
|
233 |
nicparams[constants.NIC_LINK] = link |
234 |
if vlan:
|
235 |
nicparams[constants.NIC_VLAN] = vlan |
236 |
|
237 |
check_params = cluster.SimpleFillNIC(nicparams) |
238 |
objects.NIC.CheckParameterSyntax(check_params) |
239 |
net_uuid = cfg.LookupNetwork(net) |
240 |
name = nic.get(constants.INIC_NAME, None)
|
241 |
if name is not None and name.lower() == constants.VALUE_NONE: |
242 |
name = None
|
243 |
nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name, |
244 |
network=net_uuid, nicparams=nicparams) |
245 |
nic_obj.uuid = cfg.GenerateUniqueID(ec_id) |
246 |
nics.append(nic_obj) |
247 |
|
248 |
return nics
|
249 |
|
250 |
|
251 |
def _CheckForConflictingIp(lu, ip, node_uuid): |
252 |
"""In case of conflicting IP address raise error.
|
253 |
|
254 |
@type ip: string
|
255 |
@param ip: IP address
|
256 |
@type node_uuid: string
|
257 |
@param node_uuid: node UUID
|
258 |
|
259 |
"""
|
260 |
(conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid) |
261 |
if conf_net is not None: |
262 |
raise errors.OpPrereqError(("The requested IP address (%s) belongs to" |
263 |
" network %s, but the target NIC does not." %
|
264 |
(ip, conf_net)), |
265 |
errors.ECODE_STATE) |
266 |
|
267 |
return (None, None) |
268 |
|
269 |
|
270 |
def _ComputeIPolicyInstanceSpecViolation( |
271 |
ipolicy, instance_spec, disk_template, |
272 |
_compute_fn=ComputeIPolicySpecViolation): |
273 |
"""Compute if instance specs meets the specs of ipolicy.
|
274 |
|
275 |
@type ipolicy: dict
|
276 |
@param ipolicy: The ipolicy to verify against
|
277 |
@param instance_spec: dict
|
278 |
@param instance_spec: The instance spec to verify
|
279 |
@type disk_template: string
|
280 |
@param disk_template: the disk template of the instance
|
281 |
@param _compute_fn: The function to verify ipolicy (unittest only)
|
282 |
@see: L{ComputeIPolicySpecViolation}
|
283 |
|
284 |
"""
|
285 |
mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
|
286 |
cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
|
287 |
disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
|
288 |
disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, []) |
289 |
nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
|
290 |
spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
|
291 |
|
292 |
return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
|
293 |
disk_sizes, spindle_use, disk_template) |
294 |
|
295 |
|
296 |
def _CheckOSVariant(os_obj, name): |
297 |
"""Check whether an OS name conforms to the os variants specification.
|
298 |
|
299 |
@type os_obj: L{objects.OS}
|
300 |
@param os_obj: OS object to check
|
301 |
@type name: string
|
302 |
@param name: OS name passed by the user, to check for validity
|
303 |
|
304 |
"""
|
305 |
variant = objects.OS.GetVariant(name) |
306 |
if not os_obj.supported_variants: |
307 |
if variant:
|
308 |
raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'" |
309 |
" passed)" % (os_obj.name, variant),
|
310 |
errors.ECODE_INVAL) |
311 |
return
|
312 |
if not variant: |
313 |
raise errors.OpPrereqError("OS name must include a variant", |
314 |
errors.ECODE_INVAL) |
315 |
|
316 |
if variant not in os_obj.supported_variants: |
317 |
raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL) |
318 |
|
319 |
|
320 |
class LUInstanceCreate(LogicalUnit): |
321 |
"""Create an instance.
|
322 |
|
323 |
"""
|
324 |
HPATH = "instance-add"
|
325 |
HTYPE = constants.HTYPE_INSTANCE |
326 |
REQ_BGL = False
|
327 |
|
328 |
def _CheckDiskTemplateValid(self): |
329 |
"""Checks validity of disk template.
|
330 |
|
331 |
"""
|
332 |
cluster = self.cfg.GetClusterInfo()
|
333 |
if self.op.disk_template is None: |
334 |
# FIXME: It would be better to take the default disk template from the
|
335 |
# ipolicy, but for the ipolicy we need the primary node, which we get from
|
336 |
# the iallocator, which wants the disk template as input. To solve this
|
337 |
# chicken-and-egg problem, it should be possible to specify just a node
|
338 |
# group from the iallocator and take the ipolicy from that.
|
339 |
self.op.disk_template = cluster.enabled_disk_templates[0] |
340 |
CheckDiskTemplateEnabled(cluster, self.op.disk_template)
|
341 |
|
342 |
def _CheckDiskArguments(self): |
343 |
"""Checks validity of disk-related arguments.
|
344 |
|
345 |
"""
|
346 |
# check that disk's names are unique and valid
|
347 |
utils.ValidateDeviceNames("disk", self.op.disks) |
348 |
|
349 |
self._CheckDiskTemplateValid()
|
350 |
|
351 |
# check disks. parameter names and consistent adopt/no-adopt strategy
|
352 |
has_adopt = has_no_adopt = False
|
353 |
for disk in self.op.disks: |
354 |
if self.op.disk_template != constants.DT_EXT: |
355 |
utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES) |
356 |
if constants.IDISK_ADOPT in disk: |
357 |
has_adopt = True
|
358 |
else:
|
359 |
has_no_adopt = True
|
360 |
if has_adopt and has_no_adopt: |
361 |
raise errors.OpPrereqError("Either all disks are adopted or none is", |
362 |
errors.ECODE_INVAL) |
363 |
if has_adopt:
|
364 |
if self.op.disk_template not in constants.DTS_MAY_ADOPT: |
365 |
raise errors.OpPrereqError("Disk adoption is not supported for the" |
366 |
" '%s' disk template" %
|
367 |
self.op.disk_template,
|
368 |
errors.ECODE_INVAL) |
369 |
if self.op.iallocator is not None: |
370 |
raise errors.OpPrereqError("Disk adoption not allowed with an" |
371 |
" iallocator script", errors.ECODE_INVAL)
|
372 |
if self.op.mode == constants.INSTANCE_IMPORT: |
373 |
raise errors.OpPrereqError("Disk adoption not allowed for" |
374 |
" instance import", errors.ECODE_INVAL)
|
375 |
else:
|
376 |
if self.op.disk_template in constants.DTS_MUST_ADOPT: |
377 |
raise errors.OpPrereqError("Disk template %s requires disk adoption," |
378 |
" but no 'adopt' parameter given" %
|
379 |
self.op.disk_template,
|
380 |
errors.ECODE_INVAL) |
381 |
|
382 |
self.adopt_disks = has_adopt
|
383 |
|
384 |
def _CheckVLANArguments(self): |
385 |
""" Check validity of VLANs if given
|
386 |
|
387 |
"""
|
388 |
for nic in self.op.nics: |
389 |
vlan = nic.get(constants.INIC_VLAN, None)
|
390 |
if vlan:
|
391 |
if vlan[0] == ".": |
392 |
# vlan starting with dot means single untagged vlan,
|
393 |
# might be followed by trunk (:)
|
394 |
if not vlan[1:].isdigit(): |
395 |
vlanlist = vlan[1:].split(':') |
396 |
for vl in vlanlist: |
397 |
if not vl.isdigit(): |
398 |
raise errors.OpPrereqError("Specified VLAN parameter is " |
399 |
"invalid : %s" % vlan,
|
400 |
errors.ECODE_INVAL) |
401 |
elif vlan[0] == ":": |
402 |
# Trunk - tagged only
|
403 |
vlanlist = vlan[1:].split(':') |
404 |
for vl in vlanlist: |
405 |
if not vl.isdigit(): |
406 |
raise errors.OpPrereqError("Specified VLAN parameter is invalid" |
407 |
" : %s" % vlan, errors.ECODE_INVAL)
|
408 |
elif vlan.isdigit():
|
409 |
# This is the simplest case. No dots, only single digit
|
410 |
# -> Create untagged access port, dot needs to be added
|
411 |
nic[constants.INIC_VLAN] = "." + vlan
|
412 |
else:
|
413 |
raise errors.OpPrereqError("Specified VLAN parameter is invalid" |
414 |
" : %s" % vlan, errors.ECODE_INVAL)
|
415 |
|
416 |
def CheckArguments(self): |
417 |
"""Check arguments.
|
418 |
|
419 |
"""
|
420 |
# do not require name_check to ease forward/backward compatibility
|
421 |
# for tools
|
422 |
if self.op.no_install and self.op.start: |
423 |
self.LogInfo("No-installation mode selected, disabling startup") |
424 |
self.op.start = False |
425 |
# validate/normalize the instance name
|
426 |
self.op.instance_name = \
|
427 |
netutils.Hostname.GetNormalizedName(self.op.instance_name)
|
428 |
|
429 |
if self.op.ip_check and not self.op.name_check: |
430 |
# TODO: make the ip check more flexible and not depend on the name check
|
431 |
raise errors.OpPrereqError("Cannot do IP address check without a name" |
432 |
" check", errors.ECODE_INVAL)
|
433 |
|
434 |
# check nics' parameter names
|
435 |
for nic in self.op.nics: |
436 |
utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES) |
437 |
# check that NIC's parameters names are unique and valid
|
438 |
utils.ValidateDeviceNames("NIC", self.op.nics) |
439 |
|
440 |
self._CheckVLANArguments()
|
441 |
|
442 |
self._CheckDiskArguments()
|
443 |
assert self.op.disk_template is not None |
444 |
|
445 |
# instance name verification
|
446 |
if self.op.name_check: |
447 |
self.hostname = _CheckHostnameSane(self, self.op.instance_name) |
448 |
self.op.instance_name = self.hostname.name |
449 |
# used in CheckPrereq for ip ping check
|
450 |
self.check_ip = self.hostname.ip |
451 |
else:
|
452 |
self.check_ip = None |
453 |
|
454 |
# file storage checks
|
455 |
if (self.op.file_driver and |
456 |
not self.op.file_driver in constants.FILE_DRIVER): |
457 |
raise errors.OpPrereqError("Invalid file driver name '%s'" % |
458 |
self.op.file_driver, errors.ECODE_INVAL)
|
459 |
|
460 |
# set default file_driver if unset and required
|
461 |
if (not self.op.file_driver and |
462 |
self.op.disk_template in constants.DTS_FILEBASED): |
463 |
self.op.file_driver = constants.FD_LOOP
|
464 |
|
465 |
### Node/iallocator related checks
|
466 |
CheckIAllocatorOrNode(self, "iallocator", "pnode") |
467 |
|
468 |
if self.op.pnode is not None: |
469 |
if self.op.disk_template in constants.DTS_INT_MIRROR: |
470 |
if self.op.snode is None: |
471 |
raise errors.OpPrereqError("The networked disk templates need" |
472 |
" a mirror node", errors.ECODE_INVAL)
|
473 |
elif self.op.snode: |
474 |
self.LogWarning("Secondary node will be ignored on non-mirrored disk" |
475 |
" template")
|
476 |
self.op.snode = None |
477 |
|
478 |
_CheckOpportunisticLocking(self.op)
|
479 |
|
480 |
if self.op.mode == constants.INSTANCE_IMPORT: |
481 |
# On import force_variant must be True, because if we forced it at
|
482 |
# initial install, our only chance when importing it back is that it
|
483 |
# works again!
|
484 |
self.op.force_variant = True |
485 |
|
486 |
if self.op.no_install: |
487 |
self.LogInfo("No-installation mode has no effect during import") |
488 |
|
489 |
elif self.op.mode == constants.INSTANCE_CREATE: |
490 |
if self.op.os_type is None: |
491 |
raise errors.OpPrereqError("No guest OS specified", |
492 |
errors.ECODE_INVAL) |
493 |
if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os: |
494 |
raise errors.OpPrereqError("Guest OS '%s' is not allowed for" |
495 |
" installation" % self.op.os_type, |
496 |
errors.ECODE_STATE) |
497 |
elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT: |
498 |
self._cds = GetClusterDomainSecret()
|
499 |
|
500 |
# Check handshake to ensure both clusters have the same domain secret
|
501 |
src_handshake = self.op.source_handshake
|
502 |
if not src_handshake: |
503 |
raise errors.OpPrereqError("Missing source handshake", |
504 |
errors.ECODE_INVAL) |
505 |
|
506 |
errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
|
507 |
src_handshake) |
508 |
if errmsg:
|
509 |
raise errors.OpPrereqError("Invalid handshake: %s" % errmsg, |
510 |
errors.ECODE_INVAL) |
511 |
|
512 |
# Load and check source CA
|
513 |
self.source_x509_ca_pem = self.op.source_x509_ca |
514 |
if not self.source_x509_ca_pem: |
515 |
raise errors.OpPrereqError("Missing source X509 CA", |
516 |
errors.ECODE_INVAL) |
517 |
|
518 |
try:
|
519 |
(cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
|
520 |
self._cds)
|
521 |
except OpenSSL.crypto.Error, err:
|
522 |
raise errors.OpPrereqError("Unable to load source X509 CA (%s)" % |
523 |
(err, ), errors.ECODE_INVAL) |
524 |
|
525 |
(errcode, msg) = utils.VerifyX509Certificate(cert, None, None) |
526 |
if errcode is not None: |
527 |
raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ), |
528 |
errors.ECODE_INVAL) |
529 |
|
530 |
self.source_x509_ca = cert
|
531 |
|
532 |
src_instance_name = self.op.source_instance_name
|
533 |
if not src_instance_name: |
534 |
raise errors.OpPrereqError("Missing source instance name", |
535 |
errors.ECODE_INVAL) |
536 |
|
537 |
self.source_instance_name = \
|
538 |
netutils.GetHostname(name=src_instance_name).name |
539 |
|
540 |
else:
|
541 |
raise errors.OpPrereqError("Invalid instance creation mode %r" % |
542 |
self.op.mode, errors.ECODE_INVAL)
|
543 |
|
544 |
def ExpandNames(self): |
545 |
"""ExpandNames for CreateInstance.
|
546 |
|
547 |
Figure out the right locks for instance creation.
|
548 |
|
549 |
"""
|
550 |
self.needed_locks = {}
|
551 |
|
552 |
# this is just a preventive check, but someone might still add this
|
553 |
# instance in the meantime, and creation will fail at lock-add time
|
554 |
if self.op.instance_name in\ |
555 |
[inst.name for inst in self.cfg.GetAllInstancesInfo().values()]: |
556 |
raise errors.OpPrereqError("Instance '%s' is already in the cluster" % |
557 |
self.op.instance_name, errors.ECODE_EXISTS)
|
558 |
|
559 |
self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name |
560 |
|
561 |
if self.op.iallocator: |
562 |
# TODO: Find a solution to not lock all nodes in the cluster, e.g. by
|
563 |
# specifying a group on instance creation and then selecting nodes from
|
564 |
# that group
|
565 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
566 |
self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
|
567 |
|
568 |
if self.op.opportunistic_locking: |
569 |
self.opportunistic_locks[locking.LEVEL_NODE] = True |
570 |
self.opportunistic_locks[locking.LEVEL_NODE_RES] = True |
571 |
else:
|
572 |
(self.op.pnode_uuid, self.op.pnode) = \ |
573 |
ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode) |
574 |
nodelist = [self.op.pnode_uuid]
|
575 |
if self.op.snode is not None: |
576 |
(self.op.snode_uuid, self.op.snode) = \ |
577 |
ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode) |
578 |
nodelist.append(self.op.snode_uuid)
|
579 |
self.needed_locks[locking.LEVEL_NODE] = nodelist
|
580 |
|
581 |
# in case of import lock the source node too
|
582 |
if self.op.mode == constants.INSTANCE_IMPORT: |
583 |
src_node = self.op.src_node
|
584 |
src_path = self.op.src_path
|
585 |
|
586 |
if src_path is None: |
587 |
self.op.src_path = src_path = self.op.instance_name |
588 |
|
589 |
if src_node is None: |
590 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
591 |
self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
|
592 |
self.op.src_node = None |
593 |
if os.path.isabs(src_path):
|
594 |
raise errors.OpPrereqError("Importing an instance from a path" |
595 |
" requires a source node option",
|
596 |
errors.ECODE_INVAL) |
597 |
else:
|
598 |
(self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \ |
599 |
ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node) |
600 |
if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET: |
601 |
self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid) |
602 |
if not os.path.isabs(src_path): |
603 |
self.op.src_path = \
|
604 |
utils.PathJoin(pathutils.EXPORT_DIR, src_path) |
605 |
|
606 |
self.needed_locks[locking.LEVEL_NODE_RES] = \
|
607 |
CopyLockList(self.needed_locks[locking.LEVEL_NODE])
|
608 |
|
609 |
def _RunAllocator(self): |
610 |
"""Run the allocator based on input opcode.
|
611 |
|
612 |
"""
|
613 |
if self.op.opportunistic_locking: |
614 |
# Only consider nodes for which a lock is held
|
615 |
node_name_whitelist = self.cfg.GetNodeNames(
|
616 |
self.owned_locks(locking.LEVEL_NODE))
|
617 |
else:
|
618 |
node_name_whitelist = None
|
619 |
|
620 |
req = _CreateInstanceAllocRequest(self.op, self.disks, |
621 |
self.nics, self.be_full, |
622 |
node_name_whitelist) |
623 |
ial = iallocator.IAllocator(self.cfg, self.rpc, req) |
624 |
|
625 |
ial.Run(self.op.iallocator)
|
626 |
|
627 |
if not ial.success: |
628 |
# When opportunistic locks are used only a temporary failure is generated
|
629 |
if self.op.opportunistic_locking: |
630 |
ecode = errors.ECODE_TEMP_NORES |
631 |
else:
|
632 |
ecode = errors.ECODE_NORES |
633 |
|
634 |
raise errors.OpPrereqError("Can't compute nodes using" |
635 |
" iallocator '%s': %s" %
|
636 |
(self.op.iallocator, ial.info),
|
637 |
ecode) |
638 |
|
639 |
(self.op.pnode_uuid, self.op.pnode) = \ |
640 |
ExpandNodeUuidAndName(self.cfg, None, ial.result[0]) |
641 |
self.LogInfo("Selected nodes for instance %s via iallocator %s: %s", |
642 |
self.op.instance_name, self.op.iallocator, |
643 |
utils.CommaJoin(ial.result)) |
644 |
|
645 |
assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator" |
646 |
|
647 |
if req.RequiredNodes() == 2: |
648 |
(self.op.snode_uuid, self.op.snode) = \ |
649 |
ExpandNodeUuidAndName(self.cfg, None, ial.result[1]) |
650 |
|
651 |
def BuildHooksEnv(self): |
652 |
"""Build hooks env.
|
653 |
|
654 |
This runs on master, primary and secondary nodes of the instance.
|
655 |
|
656 |
"""
|
657 |
env = { |
658 |
"ADD_MODE": self.op.mode, |
659 |
} |
660 |
if self.op.mode == constants.INSTANCE_IMPORT: |
661 |
env["SRC_NODE"] = self.op.src_node |
662 |
env["SRC_PATH"] = self.op.src_path |
663 |
env["SRC_IMAGES"] = self.src_images |
664 |
|
665 |
env.update(BuildInstanceHookEnv( |
666 |
name=self.op.instance_name,
|
667 |
primary_node_name=self.op.pnode,
|
668 |
secondary_node_names=self.cfg.GetNodeNames(self.secondaries), |
669 |
status=self.op.start,
|
670 |
os_type=self.op.os_type,
|
671 |
minmem=self.be_full[constants.BE_MINMEM],
|
672 |
maxmem=self.be_full[constants.BE_MAXMEM],
|
673 |
vcpus=self.be_full[constants.BE_VCPUS],
|
674 |
nics=NICListToTuple(self, self.nics), |
675 |
disk_template=self.op.disk_template,
|
676 |
disks=[(d[constants.IDISK_NAME], d.get("uuid", ""), |
677 |
d[constants.IDISK_SIZE], d[constants.IDISK_MODE]) |
678 |
for d in self.disks], |
679 |
bep=self.be_full,
|
680 |
hvp=self.hv_full,
|
681 |
hypervisor_name=self.op.hypervisor,
|
682 |
tags=self.op.tags,
|
683 |
)) |
684 |
|
685 |
return env
|
686 |
|
687 |
def BuildHooksNodes(self): |
688 |
"""Build hooks nodes.
|
689 |
|
690 |
"""
|
691 |
nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries |
692 |
return nl, nl
|
693 |
|
694 |
def _ReadExportInfo(self): |
695 |
"""Reads the export information from disk.
|
696 |
|
697 |
It will override the opcode source node and path with the actual
|
698 |
information, if these two were not specified before.
|
699 |
|
700 |
@return: the export information
|
701 |
|
702 |
"""
|
703 |
assert self.op.mode == constants.INSTANCE_IMPORT |
704 |
|
705 |
if self.op.src_node_uuid is None: |
706 |
locked_nodes = self.owned_locks(locking.LEVEL_NODE)
|
707 |
exp_list = self.rpc.call_export_list(locked_nodes)
|
708 |
found = False
|
709 |
for node_uuid in exp_list: |
710 |
if exp_list[node_uuid].fail_msg:
|
711 |
continue
|
712 |
if self.op.src_path in exp_list[node_uuid].payload: |
713 |
found = True
|
714 |
self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name |
715 |
self.op.src_node_uuid = node_uuid
|
716 |
self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
|
717 |
self.op.src_path)
|
718 |
break
|
719 |
if not found: |
720 |
raise errors.OpPrereqError("No export found for relative path %s" % |
721 |
self.op.src_path, errors.ECODE_INVAL)
|
722 |
|
723 |
CheckNodeOnline(self, self.op.src_node_uuid) |
724 |
result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path) |
725 |
result.Raise("No export or invalid export found in dir %s" %
|
726 |
self.op.src_path)
|
727 |
|
728 |
export_info = objects.SerializableConfigParser.Loads(str(result.payload))
|
729 |
if not export_info.has_section(constants.INISECT_EXP): |
730 |
raise errors.ProgrammerError("Corrupted export config", |
731 |
errors.ECODE_ENVIRON) |
732 |
|
733 |
ei_version = export_info.get(constants.INISECT_EXP, "version")
|
734 |
if int(ei_version) != constants.EXPORT_VERSION: |
735 |
raise errors.OpPrereqError("Wrong export version %s (wanted %d)" % |
736 |
(ei_version, constants.EXPORT_VERSION), |
737 |
errors.ECODE_ENVIRON) |
738 |
return export_info
|
739 |
|
740 |
def _ReadExportParams(self, einfo): |
741 |
"""Use export parameters as defaults.
|
742 |
|
743 |
In case the opcode doesn't specify (as in override) some instance
|
744 |
parameters, then try to use them from the export information, if
|
745 |
that declares them.
|
746 |
|
747 |
"""
|
748 |
self.op.os_type = einfo.get(constants.INISECT_EXP, "os") |
749 |
|
750 |
if not self.op.disks: |
751 |
disks = [] |
752 |
# TODO: import the disk iv_name too
|
753 |
for idx in range(constants.MAX_DISKS): |
754 |
if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx): |
755 |
disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
|
756 |
disks.append({constants.IDISK_SIZE: disk_sz}) |
757 |
self.op.disks = disks
|
758 |
if not disks and self.op.disk_template != constants.DT_DISKLESS: |
759 |
raise errors.OpPrereqError("No disk info specified and the export" |
760 |
" is missing the disk information",
|
761 |
errors.ECODE_INVAL) |
762 |
|
763 |
if not self.op.nics: |
764 |
nics = [] |
765 |
for idx in range(constants.MAX_NICS): |
766 |
if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx): |
767 |
ndict = {} |
768 |
for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]: |
769 |
nic_param_name = "nic%d_%s" % (idx, name)
|
770 |
if einfo.has_option(constants.INISECT_INS, nic_param_name):
|
771 |
v = einfo.get(constants.INISECT_INS, nic_param_name) |
772 |
ndict[name] = v |
773 |
nics.append(ndict) |
774 |
else:
|
775 |
break
|
776 |
self.op.nics = nics
|
777 |
|
778 |
if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"): |
779 |
self.op.tags = einfo.get(constants.INISECT_INS, "tags").split() |
780 |
|
781 |
if (self.op.hypervisor is None and |
782 |
einfo.has_option(constants.INISECT_INS, "hypervisor")):
|
783 |
self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor") |
784 |
|
785 |
if einfo.has_section(constants.INISECT_HYP):
|
786 |
# use the export parameters but do not override the ones
|
787 |
# specified by the user
|
788 |
for name, value in einfo.items(constants.INISECT_HYP): |
789 |
if name not in self.op.hvparams: |
790 |
self.op.hvparams[name] = value
|
791 |
|
792 |
if einfo.has_section(constants.INISECT_BEP):
|
793 |
# use the parameters, without overriding
|
794 |
for name, value in einfo.items(constants.INISECT_BEP): |
795 |
if name not in self.op.beparams: |
796 |
self.op.beparams[name] = value
|
797 |
# Compatibility for the old "memory" be param
|
798 |
if name == constants.BE_MEMORY:
|
799 |
if constants.BE_MAXMEM not in self.op.beparams: |
800 |
self.op.beparams[constants.BE_MAXMEM] = value
|
801 |
if constants.BE_MINMEM not in self.op.beparams: |
802 |
self.op.beparams[constants.BE_MINMEM] = value
|
803 |
else:
|
804 |
# try to read the parameters old style, from the main section
|
805 |
for name in constants.BES_PARAMETERS: |
806 |
if (name not in self.op.beparams and |
807 |
einfo.has_option(constants.INISECT_INS, name)): |
808 |
self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
|
809 |
|
810 |
if einfo.has_section(constants.INISECT_OSP):
|
811 |
# use the parameters, without overriding
|
812 |
for name, value in einfo.items(constants.INISECT_OSP): |
813 |
if name not in self.op.osparams: |
814 |
self.op.osparams[name] = value
|
815 |
|
816 |
def _RevertToDefaults(self, cluster): |
817 |
"""Revert the instance parameters to the default values.
|
818 |
|
819 |
"""
|
820 |
# hvparams
|
821 |
hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {}) |
822 |
for name in self.op.hvparams.keys(): |
823 |
if name in hv_defs and hv_defs[name] == self.op.hvparams[name]: |
824 |
del self.op.hvparams[name] |
825 |
# beparams
|
826 |
be_defs = cluster.SimpleFillBE({}) |
827 |
for name in self.op.beparams.keys(): |
828 |
if name in be_defs and be_defs[name] == self.op.beparams[name]: |
829 |
del self.op.beparams[name] |
830 |
# nic params
|
831 |
nic_defs = cluster.SimpleFillNIC({}) |
832 |
for nic in self.op.nics: |
833 |
for name in constants.NICS_PARAMETERS: |
834 |
if name in nic and name in nic_defs and nic[name] == nic_defs[name]: |
835 |
del nic[name]
|
836 |
# osparams
|
837 |
os_defs = cluster.SimpleFillOS(self.op.os_type, {})
|
838 |
for name in self.op.osparams.keys(): |
839 |
if name in os_defs and os_defs[name] == self.op.osparams[name]: |
840 |
del self.op.osparams[name] |
841 |
|
842 |
def _CalculateFileStorageDir(self): |
843 |
"""Calculate final instance file storage dir.
|
844 |
|
845 |
"""
|
846 |
# file storage dir calculation/check
|
847 |
self.instance_file_storage_dir = None |
848 |
if self.op.disk_template in constants.DTS_FILEBASED: |
849 |
# build the full file storage dir path
|
850 |
joinargs = [] |
851 |
|
852 |
if self.op.disk_template == constants.DT_SHARED_FILE: |
853 |
get_fsd_fn = self.cfg.GetSharedFileStorageDir
|
854 |
else:
|
855 |
get_fsd_fn = self.cfg.GetFileStorageDir
|
856 |
|
857 |
cfg_storagedir = get_fsd_fn() |
858 |
if not cfg_storagedir: |
859 |
raise errors.OpPrereqError("Cluster file storage dir not defined", |
860 |
errors.ECODE_STATE) |
861 |
joinargs.append(cfg_storagedir) |
862 |
|
863 |
if self.op.file_storage_dir is not None: |
864 |
joinargs.append(self.op.file_storage_dir)
|
865 |
|
866 |
joinargs.append(self.op.instance_name)
|
867 |
|
868 |
# pylint: disable=W0142
|
869 |
self.instance_file_storage_dir = utils.PathJoin(*joinargs)
|
870 |
|
871 |
def CheckPrereq(self): # pylint: disable=R0914 |
872 |
"""Check prerequisites.
|
873 |
|
874 |
"""
|
875 |
self._CalculateFileStorageDir()
|
876 |
|
877 |
if self.op.mode == constants.INSTANCE_IMPORT: |
878 |
export_info = self._ReadExportInfo()
|
879 |
self._ReadExportParams(export_info)
|
880 |
self._old_instance_name = export_info.get(constants.INISECT_INS, "name") |
881 |
else:
|
882 |
self._old_instance_name = None |
883 |
|
884 |
if (not self.cfg.GetVGName() and |
885 |
self.op.disk_template not in constants.DTS_NOT_LVM): |
886 |
raise errors.OpPrereqError("Cluster does not support lvm-based" |
887 |
" instances", errors.ECODE_STATE)
|
888 |
|
889 |
if (self.op.hypervisor is None or |
890 |
self.op.hypervisor == constants.VALUE_AUTO):
|
891 |
self.op.hypervisor = self.cfg.GetHypervisorType() |
892 |
|
893 |
cluster = self.cfg.GetClusterInfo()
|
894 |
enabled_hvs = cluster.enabled_hypervisors |
895 |
if self.op.hypervisor not in enabled_hvs: |
896 |
raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the" |
897 |
" cluster (%s)" %
|
898 |
(self.op.hypervisor, ",".join(enabled_hvs)), |
899 |
errors.ECODE_STATE) |
900 |
|
901 |
# Check tag validity
|
902 |
for tag in self.op.tags: |
903 |
objects.TaggableObject.ValidateTag(tag) |
904 |
|
905 |
# check hypervisor parameter syntax (locally)
|
906 |
utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
|
907 |
filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, |
908 |
self.op.hvparams)
|
909 |
hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
|
910 |
hv_type.CheckParameterSyntax(filled_hvp) |
911 |
self.hv_full = filled_hvp
|
912 |
# check that we don't specify global parameters on an instance
|
913 |
CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor", |
914 |
"instance", "cluster") |
915 |
|
916 |
# fill and remember the beparams dict
|
917 |
self.be_full = _ComputeFullBeParams(self.op, cluster) |
918 |
|
919 |
# build os parameters
|
920 |
self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams) |
921 |
|
922 |
# now that hvp/bep are in final format, let's reset to defaults,
|
923 |
# if told to do so
|
924 |
if self.op.identify_defaults: |
925 |
self._RevertToDefaults(cluster)
|
926 |
|
927 |
# NIC buildup
|
928 |
self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg, |
929 |
self.proc.GetECId())
|
930 |
|
931 |
# disk checks/pre-build
|
932 |
default_vg = self.cfg.GetVGName()
|
933 |
self.disks = ComputeDisks(self.op, default_vg) |
934 |
|
935 |
if self.op.mode == constants.INSTANCE_IMPORT: |
936 |
disk_images = [] |
937 |
for idx in range(len(self.disks)): |
938 |
option = "disk%d_dump" % idx
|
939 |
if export_info.has_option(constants.INISECT_INS, option):
|
940 |
# FIXME: are the old os-es, disk sizes, etc. useful?
|
941 |
export_name = export_info.get(constants.INISECT_INS, option) |
942 |
image = utils.PathJoin(self.op.src_path, export_name)
|
943 |
disk_images.append(image) |
944 |
else:
|
945 |
disk_images.append(False)
|
946 |
|
947 |
self.src_images = disk_images
|
948 |
|
949 |
if self.op.instance_name == self._old_instance_name: |
950 |
for idx, nic in enumerate(self.nics): |
951 |
if nic.mac == constants.VALUE_AUTO:
|
952 |
nic_mac_ini = "nic%d_mac" % idx
|
953 |
nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini) |
954 |
|
955 |
# ENDIF: self.op.mode == constants.INSTANCE_IMPORT
|
956 |
|
957 |
# ip ping checks (we use the same ip that was resolved in ExpandNames)
|
958 |
if self.op.ip_check: |
959 |
if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT): |
960 |
raise errors.OpPrereqError("IP %s of instance %s already in use" % |
961 |
(self.check_ip, self.op.instance_name), |
962 |
errors.ECODE_NOTUNIQUE) |
963 |
|
964 |
#### mac address generation
|
965 |
# By generating here the mac address both the allocator and the hooks get
|
966 |
# the real final mac address rather than the 'auto' or 'generate' value.
|
967 |
# There is a race condition between the generation and the instance object
|
968 |
# creation, which means that we know the mac is valid now, but we're not
|
969 |
# sure it will be when we actually add the instance. If things go bad
|
970 |
# adding the instance will abort because of a duplicate mac, and the
|
971 |
# creation job will fail.
|
972 |
for nic in self.nics: |
973 |
if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE): |
974 |
nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId()) |
975 |
|
976 |
#### allocator run
|
977 |
|
978 |
if self.op.iallocator is not None: |
979 |
self._RunAllocator()
|
980 |
|
981 |
# Release all unneeded node locks
|
982 |
keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid, |
983 |
self.op.src_node_uuid])
|
984 |
ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
|
985 |
ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
|
986 |
ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
|
987 |
|
988 |
assert (self.owned_locks(locking.LEVEL_NODE) == |
989 |
self.owned_locks(locking.LEVEL_NODE_RES)), \
|
990 |
"Node locks differ from node resource locks"
|
991 |
|
992 |
#### node related checks
|
993 |
|
994 |
# check primary node
|
995 |
self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid) |
996 |
assert self.pnode is not None, \ |
997 |
"Cannot retrieve locked node %s" % self.op.pnode_uuid |
998 |
if pnode.offline:
|
999 |
raise errors.OpPrereqError("Cannot use offline primary node '%s'" % |
1000 |
pnode.name, errors.ECODE_STATE) |
1001 |
if pnode.drained:
|
1002 |
raise errors.OpPrereqError("Cannot use drained primary node '%s'" % |
1003 |
pnode.name, errors.ECODE_STATE) |
1004 |
if not pnode.vm_capable: |
1005 |
raise errors.OpPrereqError("Cannot use non-vm_capable primary node" |
1006 |
" '%s'" % pnode.name, errors.ECODE_STATE)
|
1007 |
|
1008 |
self.secondaries = []
|
1009 |
|
1010 |
# Fill in any IPs from IP pools. This must happen here, because we need to
|
1011 |
# know the nic's primary node, as specified by the iallocator
|
1012 |
for idx, nic in enumerate(self.nics): |
1013 |
net_uuid = nic.network |
1014 |
if net_uuid is not None: |
1015 |
nobj = self.cfg.GetNetwork(net_uuid)
|
1016 |
netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid) |
1017 |
if netparams is None: |
1018 |
raise errors.OpPrereqError("No netparams found for network" |
1019 |
" %s. Probably not connected to"
|
1020 |
" node's %s nodegroup" %
|
1021 |
(nobj.name, self.pnode.name),
|
1022 |
errors.ECODE_INVAL) |
1023 |
self.LogInfo("NIC/%d inherits netparams %s" % |
1024 |
(idx, netparams.values())) |
1025 |
nic.nicparams = dict(netparams)
|
1026 |
if nic.ip is not None: |
1027 |
if nic.ip.lower() == constants.NIC_IP_POOL:
|
1028 |
try:
|
1029 |
nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId()) |
1030 |
except errors.ReservationError:
|
1031 |
raise errors.OpPrereqError("Unable to get a free IP for NIC %d" |
1032 |
" from the address pool" % idx,
|
1033 |
errors.ECODE_STATE) |
1034 |
self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name) |
1035 |
else:
|
1036 |
try:
|
1037 |
self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId()) |
1038 |
except errors.ReservationError:
|
1039 |
raise errors.OpPrereqError("IP address %s already in use" |
1040 |
" or does not belong to network %s" %
|
1041 |
(nic.ip, nobj.name), |
1042 |
errors.ECODE_NOTUNIQUE) |
1043 |
|
1044 |
# net is None, ip None or given
|
1045 |
elif self.op.conflicts_check: |
1046 |
_CheckForConflictingIp(self, nic.ip, self.pnode.uuid) |
1047 |
|
1048 |
# mirror node verification
|
1049 |
if self.op.disk_template in constants.DTS_INT_MIRROR: |
1050 |
if self.op.snode_uuid == pnode.uuid: |
1051 |
raise errors.OpPrereqError("The secondary node cannot be the" |
1052 |
" primary node", errors.ECODE_INVAL)
|
1053 |
CheckNodeOnline(self, self.op.snode_uuid) |
1054 |
CheckNodeNotDrained(self, self.op.snode_uuid) |
1055 |
CheckNodeVmCapable(self, self.op.snode_uuid) |
1056 |
self.secondaries.append(self.op.snode_uuid) |
1057 |
|
1058 |
snode = self.cfg.GetNodeInfo(self.op.snode_uuid) |
1059 |
if pnode.group != snode.group:
|
1060 |
self.LogWarning("The primary and secondary nodes are in two" |
1061 |
" different node groups; the disk parameters"
|
1062 |
" from the first disk's node group will be"
|
1063 |
" used")
|
1064 |
|
1065 |
nodes = [pnode] |
1066 |
if self.op.disk_template in constants.DTS_INT_MIRROR: |
1067 |
nodes.append(snode) |
1068 |
has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n) |
1069 |
excl_stor = compat.any(map(has_es, nodes))
|
1070 |
if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE: |
1071 |
raise errors.OpPrereqError("Disk template %s not supported with" |
1072 |
" exclusive storage" % self.op.disk_template, |
1073 |
errors.ECODE_STATE) |
1074 |
for disk in self.disks: |
1075 |
CheckSpindlesExclusiveStorage(disk, excl_stor, True)
|
1076 |
|
1077 |
node_uuids = [pnode.uuid] + self.secondaries
|
1078 |
|
1079 |
if not self.adopt_disks: |
1080 |
if self.op.disk_template == constants.DT_RBD: |
1081 |
# _CheckRADOSFreeSpace() is just a placeholder.
|
1082 |
# Any function that checks prerequisites can be placed here.
|
1083 |
# Check if there is enough space on the RADOS cluster.
|
1084 |
CheckRADOSFreeSpace() |
1085 |
elif self.op.disk_template == constants.DT_EXT: |
1086 |
# FIXME: Function that checks prereqs if needed
|
1087 |
pass
|
1088 |
elif self.op.disk_template in constants.DTS_LVM: |
1089 |
# Check lv size requirements, if not adopting
|
1090 |
req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks) |
1091 |
CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
|
1092 |
else:
|
1093 |
# FIXME: add checks for other, non-adopting, non-lvm disk templates
|
1094 |
pass
|
1095 |
|
1096 |
elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data |
1097 |
all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG], |
1098 |
disk[constants.IDISK_ADOPT]) |
1099 |
for disk in self.disks]) |
1100 |
if len(all_lvs) != len(self.disks): |
1101 |
raise errors.OpPrereqError("Duplicate volume names given for adoption", |
1102 |
errors.ECODE_INVAL) |
1103 |
for lv_name in all_lvs: |
1104 |
try:
|
1105 |
# FIXME: lv_name here is "vg/lv" need to ensure that other calls
|
1106 |
# to ReserveLV uses the same syntax
|
1107 |
self.cfg.ReserveLV(lv_name, self.proc.GetECId()) |
1108 |
except errors.ReservationError:
|
1109 |
raise errors.OpPrereqError("LV named %s used by another instance" % |
1110 |
lv_name, errors.ECODE_NOTUNIQUE) |
1111 |
|
1112 |
vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
|
1113 |
vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
|
1114 |
|
1115 |
node_lvs = self.rpc.call_lv_list([pnode.uuid],
|
1116 |
vg_names.payload.keys())[pnode.uuid] |
1117 |
node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
|
1118 |
node_lvs = node_lvs.payload |
1119 |
|
1120 |
delta = all_lvs.difference(node_lvs.keys()) |
1121 |
if delta:
|
1122 |
raise errors.OpPrereqError("Missing logical volume(s): %s" % |
1123 |
utils.CommaJoin(delta), |
1124 |
errors.ECODE_INVAL) |
1125 |
online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]] |
1126 |
if online_lvs:
|
1127 |
raise errors.OpPrereqError("Online logical volumes found, cannot" |
1128 |
" adopt: %s" % utils.CommaJoin(online_lvs),
|
1129 |
errors.ECODE_STATE) |
1130 |
# update the size of disk based on what is found
|
1131 |
for dsk in self.disks: |
1132 |
dsk[constants.IDISK_SIZE] = \ |
1133 |
int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG], |
1134 |
dsk[constants.IDISK_ADOPT])][0]))
|
1135 |
|
1136 |
elif self.op.disk_template == constants.DT_BLOCK: |
1137 |
# Normalize and de-duplicate device paths
|
1138 |
all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
|
1139 |
for disk in self.disks]) |
1140 |
if len(all_disks) != len(self.disks): |
1141 |
raise errors.OpPrereqError("Duplicate disk names given for adoption", |
1142 |
errors.ECODE_INVAL) |
1143 |
baddisks = [d for d in all_disks |
1144 |
if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)] |
1145 |
if baddisks:
|
1146 |
raise errors.OpPrereqError("Device node(s) %s lie outside %s and" |
1147 |
" cannot be adopted" %
|
1148 |
(utils.CommaJoin(baddisks), |
1149 |
constants.ADOPTABLE_BLOCKDEV_ROOT), |
1150 |
errors.ECODE_INVAL) |
1151 |
|
1152 |
node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
|
1153 |
list(all_disks))[pnode.uuid]
|
1154 |
node_disks.Raise("Cannot get block device information from node %s" %
|
1155 |
pnode.name) |
1156 |
node_disks = node_disks.payload |
1157 |
delta = all_disks.difference(node_disks.keys()) |
1158 |
if delta:
|
1159 |
raise errors.OpPrereqError("Missing block device(s): %s" % |
1160 |
utils.CommaJoin(delta), |
1161 |
errors.ECODE_INVAL) |
1162 |
for dsk in self.disks: |
1163 |
dsk[constants.IDISK_SIZE] = \ |
1164 |
int(float(node_disks[dsk[constants.IDISK_ADOPT]])) |
1165 |
|
1166 |
# Check disk access param to be compatible with specified hypervisor
|
1167 |
node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid) |
1168 |
node_group = self.cfg.GetNodeGroup(node_info.group)
|
1169 |
disk_params = self.cfg.GetGroupDiskParams(node_group)
|
1170 |
access_type = disk_params[self.op.disk_template].get(
|
1171 |
constants.RBD_ACCESS, constants.DISK_KERNELSPACE |
1172 |
) |
1173 |
|
1174 |
if not IsValidDiskAccessModeCombination(self.op.hypervisor, |
1175 |
self.op.disk_template,
|
1176 |
access_type): |
1177 |
raise errors.OpPrereqError("Selected hypervisor (%s) cannot be" |
1178 |
" used with %s disk access param" %
|
1179 |
(self.op.hypervisor, access_type),
|
1180 |
errors.ECODE_STATE) |
1181 |
|
1182 |
# Verify instance specs
|
1183 |
spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None) |
1184 |
ispec = { |
1185 |
constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None), |
1186 |
constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None), |
1187 |
constants.ISPEC_DISK_COUNT: len(self.disks), |
1188 |
constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE] |
1189 |
for disk in self.disks], |
1190 |
constants.ISPEC_NIC_COUNT: len(self.nics), |
1191 |
constants.ISPEC_SPINDLE_USE: spindle_use, |
1192 |
} |
1193 |
|
1194 |
group_info = self.cfg.GetNodeGroup(pnode.group)
|
1195 |
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) |
1196 |
res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec, |
1197 |
self.op.disk_template)
|
1198 |
if not self.op.ignore_ipolicy and res: |
1199 |
msg = ("Instance allocation to group %s (%s) violates policy: %s" %
|
1200 |
(pnode.group, group_info.name, utils.CommaJoin(res))) |
1201 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
|
1202 |
|
1203 |
CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams) |
1204 |
|
1205 |
CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant) |
1206 |
# check OS parameters (remotely)
|
1207 |
CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full) |
1208 |
|
1209 |
CheckNicsBridgesExist(self, self.nics, self.pnode.uuid) |
1210 |
|
1211 |
#TODO: _CheckExtParams (remotely)
|
1212 |
# Check parameters for extstorage
|
1213 |
|
1214 |
# memory check on primary node
|
1215 |
#TODO(dynmem): use MINMEM for checking
|
1216 |
if self.op.start: |
1217 |
hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
|
1218 |
self.op.hvparams)
|
1219 |
CheckNodeFreeMemory(self, self.pnode.uuid, |
1220 |
"creating instance %s" % self.op.instance_name, |
1221 |
self.be_full[constants.BE_MAXMEM],
|
1222 |
self.op.hypervisor, hvfull)
|
1223 |
|
1224 |
self.dry_run_result = list(node_uuids) |
1225 |
|
1226 |
def Exec(self, feedback_fn): |
1227 |
"""Create and add the instance to the cluster.
|
1228 |
|
1229 |
"""
|
1230 |
assert not (self.owned_locks(locking.LEVEL_NODE_RES) - |
1231 |
self.owned_locks(locking.LEVEL_NODE)), \
|
1232 |
"Node locks differ from node resource locks"
|
1233 |
assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC) |
1234 |
|
1235 |
ht_kind = self.op.hypervisor
|
1236 |
if ht_kind in constants.HTS_REQ_PORT: |
1237 |
network_port = self.cfg.AllocatePort()
|
1238 |
else:
|
1239 |
network_port = None
|
1240 |
|
1241 |
instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId()) |
1242 |
|
1243 |
# This is ugly but we got a chicken-egg problem here
|
1244 |
# We can only take the group disk parameters, as the instance
|
1245 |
# has no disks yet (we are generating them right here).
|
1246 |
nodegroup = self.cfg.GetNodeGroup(self.pnode.group) |
1247 |
disks = GenerateDiskTemplate(self,
|
1248 |
self.op.disk_template,
|
1249 |
instance_uuid, self.pnode.uuid,
|
1250 |
self.secondaries,
|
1251 |
self.disks,
|
1252 |
self.instance_file_storage_dir,
|
1253 |
self.op.file_driver,
|
1254 |
0,
|
1255 |
feedback_fn, |
1256 |
self.cfg.GetGroupDiskParams(nodegroup))
|
1257 |
|
1258 |
iobj = objects.Instance(name=self.op.instance_name,
|
1259 |
uuid=instance_uuid, |
1260 |
os=self.op.os_type,
|
1261 |
primary_node=self.pnode.uuid,
|
1262 |
nics=self.nics, disks=disks,
|
1263 |
disk_template=self.op.disk_template,
|
1264 |
disks_active=False,
|
1265 |
admin_state=constants.ADMINST_DOWN, |
1266 |
network_port=network_port, |
1267 |
beparams=self.op.beparams,
|
1268 |
hvparams=self.op.hvparams,
|
1269 |
hypervisor=self.op.hypervisor,
|
1270 |
osparams=self.op.osparams,
|
1271 |
) |
1272 |
|
1273 |
if self.op.tags: |
1274 |
for tag in self.op.tags: |
1275 |
iobj.AddTag(tag) |
1276 |
|
1277 |
if self.adopt_disks: |
1278 |
if self.op.disk_template == constants.DT_PLAIN: |
1279 |
# rename LVs to the newly-generated names; we need to construct
|
1280 |
# 'fake' LV disks with the old data, plus the new unique_id
|
1281 |
tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks] |
1282 |
rename_to = [] |
1283 |
for t_dsk, a_dsk in zip(tmp_disks, self.disks): |
1284 |
rename_to.append(t_dsk.logical_id) |
1285 |
t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
|
1286 |
result = self.rpc.call_blockdev_rename(self.pnode.uuid, |
1287 |
zip(tmp_disks, rename_to))
|
1288 |
result.Raise("Failed to rename adoped LVs")
|
1289 |
else:
|
1290 |
feedback_fn("* creating instance disks...")
|
1291 |
try:
|
1292 |
CreateDisks(self, iobj)
|
1293 |
except errors.OpExecError:
|
1294 |
self.LogWarning("Device creation failed") |
1295 |
self.cfg.ReleaseDRBDMinors(self.op.instance_name) |
1296 |
raise
|
1297 |
|
1298 |
feedback_fn("adding instance %s to cluster config" % self.op.instance_name) |
1299 |
|
1300 |
self.cfg.AddInstance(iobj, self.proc.GetECId()) |
1301 |
|
1302 |
# Declare that we don't want to remove the instance lock anymore, as we've
|
1303 |
# added the instance to the config
|
1304 |
del self.remove_locks[locking.LEVEL_INSTANCE] |
1305 |
|
1306 |
if self.op.mode == constants.INSTANCE_IMPORT: |
1307 |
# Release unused nodes
|
1308 |
ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid]) |
1309 |
else:
|
1310 |
# Release all nodes
|
1311 |
ReleaseLocks(self, locking.LEVEL_NODE)
|
1312 |
|
1313 |
disk_abort = False
|
1314 |
if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks: |
1315 |
feedback_fn("* wiping instance disks...")
|
1316 |
try:
|
1317 |
WipeDisks(self, iobj)
|
1318 |
except errors.OpExecError, err:
|
1319 |
logging.exception("Wiping disks failed")
|
1320 |
self.LogWarning("Wiping instance disks failed (%s)", err) |
1321 |
disk_abort = True
|
1322 |
|
1323 |
if disk_abort:
|
1324 |
# Something is already wrong with the disks, don't do anything else
|
1325 |
pass
|
1326 |
elif self.op.wait_for_sync: |
1327 |
disk_abort = not WaitForSync(self, iobj) |
1328 |
elif iobj.disk_template in constants.DTS_INT_MIRROR: |
1329 |
# make sure the disks are not degraded (still sync-ing is ok)
|
1330 |
feedback_fn("* checking mirrors status")
|
1331 |
disk_abort = not WaitForSync(self, iobj, oneshot=True) |
1332 |
else:
|
1333 |
disk_abort = False
|
1334 |
|
1335 |
if disk_abort:
|
1336 |
RemoveDisks(self, iobj)
|
1337 |
self.cfg.RemoveInstance(iobj.uuid)
|
1338 |
# Make sure the instance lock gets removed
|
1339 |
self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
|
1340 |
raise errors.OpExecError("There are some degraded disks for" |
1341 |
" this instance")
|
1342 |
|
1343 |
# instance disks are now active
|
1344 |
iobj.disks_active = True
|
1345 |
|
1346 |
# Release all node resource locks
|
1347 |
ReleaseLocks(self, locking.LEVEL_NODE_RES)
|
1348 |
|
1349 |
if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks: |
1350 |
if self.op.mode == constants.INSTANCE_CREATE: |
1351 |
if not self.op.no_install: |
1352 |
pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and |
1353 |
not self.op.wait_for_sync) |
1354 |
if pause_sync:
|
1355 |
feedback_fn("* pausing disk sync to install instance OS")
|
1356 |
result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid, |
1357 |
(iobj.disks, |
1358 |
iobj), True)
|
1359 |
for idx, success in enumerate(result.payload): |
1360 |
if not success: |
1361 |
logging.warn("pause-sync of instance %s for disk %d failed",
|
1362 |
self.op.instance_name, idx)
|
1363 |
|
1364 |
feedback_fn("* running the instance OS create scripts...")
|
1365 |
# FIXME: pass debug option from opcode to backend
|
1366 |
os_add_result = \ |
1367 |
self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False, |
1368 |
self.op.debug_level)
|
1369 |
if pause_sync:
|
1370 |
feedback_fn("* resuming disk sync")
|
1371 |
result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid, |
1372 |
(iobj.disks, |
1373 |
iobj), False)
|
1374 |
for idx, success in enumerate(result.payload): |
1375 |
if not success: |
1376 |
logging.warn("resume-sync of instance %s for disk %d failed",
|
1377 |
self.op.instance_name, idx)
|
1378 |
|
1379 |
os_add_result.Raise("Could not add os for instance %s"
|
1380 |
" on node %s" % (self.op.instance_name, |
1381 |
self.pnode.name))
|
1382 |
|
1383 |
else:
|
1384 |
if self.op.mode == constants.INSTANCE_IMPORT: |
1385 |
feedback_fn("* running the instance OS import scripts...")
|
1386 |
|
1387 |
transfers = [] |
1388 |
|
1389 |
for idx, image in enumerate(self.src_images): |
1390 |
if not image: |
1391 |
continue
|
1392 |
|
1393 |
# FIXME: pass debug option from opcode to backend
|
1394 |
dt = masterd.instance.DiskTransfer("disk/%s" % idx,
|
1395 |
constants.IEIO_FILE, (image, ), |
1396 |
constants.IEIO_SCRIPT, |
1397 |
((iobj.disks[idx], iobj), idx), |
1398 |
None)
|
1399 |
transfers.append(dt) |
1400 |
|
1401 |
import_result = \ |
1402 |
masterd.instance.TransferInstanceData(self, feedback_fn,
|
1403 |
self.op.src_node_uuid,
|
1404 |
self.pnode.uuid,
|
1405 |
self.pnode.secondary_ip,
|
1406 |
self.op.compress,
|
1407 |
iobj, transfers) |
1408 |
if not compat.all(import_result): |
1409 |
self.LogWarning("Some disks for instance %s on node %s were not" |
1410 |
" imported successfully" % (self.op.instance_name, |
1411 |
self.pnode.name))
|
1412 |
|
1413 |
rename_from = self._old_instance_name
|
1414 |
|
1415 |
elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT: |
1416 |
feedback_fn("* preparing remote import...")
|
1417 |
# The source cluster will stop the instance before attempting to make
|
1418 |
# a connection. In some cases stopping an instance can take a long
|
1419 |
# time, hence the shutdown timeout is added to the connection
|
1420 |
# timeout.
|
1421 |
connect_timeout = (constants.RIE_CONNECT_TIMEOUT + |
1422 |
self.op.source_shutdown_timeout)
|
1423 |
timeouts = masterd.instance.ImportExportTimeouts(connect_timeout) |
1424 |
|
1425 |
assert iobj.primary_node == self.pnode.uuid |
1426 |
disk_results = \ |
1427 |
masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode, |
1428 |
self.source_x509_ca,
|
1429 |
self._cds, self.op.compress, timeouts) |
1430 |
if not compat.all(disk_results): |
1431 |
# TODO: Should the instance still be started, even if some disks
|
1432 |
# failed to import (valid for local imports, too)?
|
1433 |
self.LogWarning("Some disks for instance %s on node %s were not" |
1434 |
" imported successfully" % (self.op.instance_name, |
1435 |
self.pnode.name))
|
1436 |
|
1437 |
rename_from = self.source_instance_name
|
1438 |
|
1439 |
else:
|
1440 |
# also checked in the prereq part
|
1441 |
raise errors.ProgrammerError("Unknown OS initialization mode '%s'" |
1442 |
% self.op.mode)
|
1443 |
|
1444 |
# Run rename script on newly imported instance
|
1445 |
assert iobj.name == self.op.instance_name |
1446 |
feedback_fn("Running rename script for %s" % self.op.instance_name) |
1447 |
result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj, |
1448 |
rename_from, |
1449 |
self.op.debug_level)
|
1450 |
result.Warn("Failed to run rename script for %s on node %s" %
|
1451 |
(self.op.instance_name, self.pnode.name), self.LogWarning) |
1452 |
|
1453 |
assert not self.owned_locks(locking.LEVEL_NODE_RES) |
1454 |
|
1455 |
if self.op.start: |
1456 |
iobj.admin_state = constants.ADMINST_UP |
1457 |
self.cfg.Update(iobj, feedback_fn)
|
1458 |
logging.info("Starting instance %s on node %s", self.op.instance_name, |
1459 |
self.pnode.name)
|
1460 |
feedback_fn("* starting instance...")
|
1461 |
result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None), |
1462 |
False, self.op.reason) |
1463 |
result.Raise("Could not start instance")
|
1464 |
|
1465 |
return list(iobj.all_nodes) |
1466 |
|
1467 |
|
1468 |
class LUInstanceRename(LogicalUnit): |
1469 |
"""Rename an instance.
|
1470 |
|
1471 |
"""
|
1472 |
HPATH = "instance-rename"
|
1473 |
HTYPE = constants.HTYPE_INSTANCE |
1474 |
|
1475 |
def CheckArguments(self): |
1476 |
"""Check arguments.
|
1477 |
|
1478 |
"""
|
1479 |
if self.op.ip_check and not self.op.name_check: |
1480 |
# TODO: make the ip check more flexible and not depend on the name check
|
1481 |
raise errors.OpPrereqError("IP address check requires a name check", |
1482 |
errors.ECODE_INVAL) |
1483 |
|
1484 |
def BuildHooksEnv(self): |
1485 |
"""Build hooks env.
|
1486 |
|
1487 |
This runs on master, primary and secondary nodes of the instance.
|
1488 |
|
1489 |
"""
|
1490 |
env = BuildInstanceHookEnvByObject(self, self.instance) |
1491 |
env["INSTANCE_NEW_NAME"] = self.op.new_name |
1492 |
return env
|
1493 |
|
1494 |
def BuildHooksNodes(self): |
1495 |
"""Build hooks nodes.
|
1496 |
|
1497 |
"""
|
1498 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
1499 |
return (nl, nl)
|
1500 |
|
1501 |
def CheckPrereq(self): |
1502 |
"""Check prerequisites.
|
1503 |
|
1504 |
This checks that the instance is in the cluster and is not running.
|
1505 |
|
1506 |
"""
|
1507 |
(self.op.instance_uuid, self.op.instance_name) = \ |
1508 |
ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid, |
1509 |
self.op.instance_name)
|
1510 |
instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) |
1511 |
assert instance is not None |
1512 |
|
1513 |
# It should actually not happen that an instance is running with a disabled
|
1514 |
# disk template, but in case it does, the renaming of file-based instances
|
1515 |
# will fail horribly. Thus, we test it before.
|
1516 |
if (instance.disk_template in constants.DTS_FILEBASED and |
1517 |
self.op.new_name != instance.name):
|
1518 |
CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
|
1519 |
instance.disk_template) |
1520 |
|
1521 |
CheckNodeOnline(self, instance.primary_node)
|
1522 |
CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
|
1523 |
msg="cannot rename")
|
1524 |
self.instance = instance
|
1525 |
|
1526 |
new_name = self.op.new_name
|
1527 |
if self.op.name_check: |
1528 |
hostname = _CheckHostnameSane(self, new_name)
|
1529 |
new_name = self.op.new_name = hostname.name
|
1530 |
if (self.op.ip_check and |
1531 |
netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)): |
1532 |
raise errors.OpPrereqError("IP %s of instance %s already in use" % |
1533 |
(hostname.ip, new_name), |
1534 |
errors.ECODE_NOTUNIQUE) |
1535 |
|
1536 |
instance_names = [inst.name for
|
1537 |
inst in self.cfg.GetAllInstancesInfo().values()] |
1538 |
if new_name in instance_names and new_name != instance.name: |
1539 |
raise errors.OpPrereqError("Instance '%s' is already in the cluster" % |
1540 |
new_name, errors.ECODE_EXISTS) |
1541 |
|
1542 |
def Exec(self, feedback_fn): |
1543 |
"""Rename the instance.
|
1544 |
|
1545 |
"""
|
1546 |
old_name = self.instance.name
|
1547 |
|
1548 |
rename_file_storage = False
|
1549 |
if (self.instance.disk_template in constants.DTS_FILEBASED and |
1550 |
self.op.new_name != self.instance.name): |
1551 |
old_file_storage_dir = os.path.dirname( |
1552 |
self.instance.disks[0].logical_id[1]) |
1553 |
rename_file_storage = True
|
1554 |
|
1555 |
self.cfg.RenameInstance(self.instance.uuid, self.op.new_name) |
1556 |
# Change the instance lock. This is definitely safe while we hold the BGL.
|
1557 |
# Otherwise the new lock would have to be added in acquired mode.
|
1558 |
assert self.REQ_BGL |
1559 |
assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER) |
1560 |
self.glm.remove(locking.LEVEL_INSTANCE, old_name)
|
1561 |
self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name) |
1562 |
|
1563 |
# re-read the instance from the configuration after rename
|
1564 |
renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid) |
1565 |
|
1566 |
if rename_file_storage:
|
1567 |
new_file_storage_dir = os.path.dirname( |
1568 |
renamed_inst.disks[0].logical_id[1]) |
1569 |
result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
|
1570 |
old_file_storage_dir, |
1571 |
new_file_storage_dir) |
1572 |
result.Raise("Could not rename on node %s directory '%s' to '%s'"
|
1573 |
" (but the instance has been renamed in Ganeti)" %
|
1574 |
(self.cfg.GetNodeName(renamed_inst.primary_node),
|
1575 |
old_file_storage_dir, new_file_storage_dir)) |
1576 |
|
1577 |
StartInstanceDisks(self, renamed_inst, None) |
1578 |
# update info on disks
|
1579 |
info = GetInstanceInfoText(renamed_inst) |
1580 |
for (idx, disk) in enumerate(renamed_inst.disks): |
1581 |
for node_uuid in renamed_inst.all_nodes: |
1582 |
result = self.rpc.call_blockdev_setinfo(node_uuid,
|
1583 |
(disk, renamed_inst), info) |
1584 |
result.Warn("Error setting info on node %s for disk %s" %
|
1585 |
(self.cfg.GetNodeName(node_uuid), idx), self.LogWarning) |
1586 |
try:
|
1587 |
result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
|
1588 |
renamed_inst, old_name, |
1589 |
self.op.debug_level)
|
1590 |
result.Warn("Could not run OS rename script for instance %s on node %s"
|
1591 |
" (but the instance has been renamed in Ganeti)" %
|
1592 |
(renamed_inst.name, |
1593 |
self.cfg.GetNodeName(renamed_inst.primary_node)),
|
1594 |
self.LogWarning)
|
1595 |
finally:
|
1596 |
ShutdownInstanceDisks(self, renamed_inst)
|
1597 |
|
1598 |
return renamed_inst.name
|
1599 |
|
1600 |
|
1601 |
class LUInstanceRemove(LogicalUnit): |
1602 |
"""Remove an instance.
|
1603 |
|
1604 |
"""
|
1605 |
HPATH = "instance-remove"
|
1606 |
HTYPE = constants.HTYPE_INSTANCE |
1607 |
REQ_BGL = False
|
1608 |
|
1609 |
def ExpandNames(self): |
1610 |
self._ExpandAndLockInstance()
|
1611 |
self.needed_locks[locking.LEVEL_NODE] = []
|
1612 |
self.needed_locks[locking.LEVEL_NODE_RES] = []
|
1613 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
1614 |
|
1615 |
def DeclareLocks(self, level): |
1616 |
if level == locking.LEVEL_NODE:
|
1617 |
self._LockInstancesNodes()
|
1618 |
elif level == locking.LEVEL_NODE_RES:
|
1619 |
# Copy node locks
|
1620 |
self.needed_locks[locking.LEVEL_NODE_RES] = \
|
1621 |
CopyLockList(self.needed_locks[locking.LEVEL_NODE])
|
1622 |
|
1623 |
def BuildHooksEnv(self): |
1624 |
"""Build hooks env.
|
1625 |
|
1626 |
This runs on master, primary and secondary nodes of the instance.
|
1627 |
|
1628 |
"""
|
1629 |
env = BuildInstanceHookEnvByObject(self, self.instance) |
1630 |
env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout |
1631 |
return env
|
1632 |
|
1633 |
def BuildHooksNodes(self): |
1634 |
"""Build hooks nodes.
|
1635 |
|
1636 |
"""
|
1637 |
nl = [self.cfg.GetMasterNode()]
|
1638 |
nl_post = list(self.instance.all_nodes) + nl |
1639 |
return (nl, nl_post)
|
1640 |
|
1641 |
def CheckPrereq(self): |
1642 |
"""Check prerequisites.
|
1643 |
|
1644 |
This checks that the instance is in the cluster.
|
1645 |
|
1646 |
"""
|
1647 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) |
1648 |
assert self.instance is not None, \ |
1649 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
1650 |
|
1651 |
def Exec(self, feedback_fn): |
1652 |
"""Remove the instance.
|
1653 |
|
1654 |
"""
|
1655 |
logging.info("Shutting down instance %s on node %s", self.instance.name, |
1656 |
self.cfg.GetNodeName(self.instance.primary_node)) |
1657 |
|
1658 |
result = self.rpc.call_instance_shutdown(self.instance.primary_node, |
1659 |
self.instance,
|
1660 |
self.op.shutdown_timeout,
|
1661 |
self.op.reason)
|
1662 |
if self.op.ignore_failures: |
1663 |
result.Warn("Warning: can't shutdown instance", feedback_fn)
|
1664 |
else:
|
1665 |
result.Raise("Could not shutdown instance %s on node %s" %
|
1666 |
(self.instance.name,
|
1667 |
self.cfg.GetNodeName(self.instance.primary_node))) |
1668 |
|
1669 |
assert (self.owned_locks(locking.LEVEL_NODE) == |
1670 |
self.owned_locks(locking.LEVEL_NODE_RES))
|
1671 |
assert not (set(self.instance.all_nodes) - |
1672 |
self.owned_locks(locking.LEVEL_NODE)), \
|
1673 |
"Not owning correct locks"
|
1674 |
|
1675 |
RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures) |
1676 |
|
1677 |
|
1678 |
class LUInstanceMove(LogicalUnit): |
1679 |
"""Move an instance by data-copying.
|
1680 |
|
1681 |
"""
|
1682 |
HPATH = "instance-move"
|
1683 |
HTYPE = constants.HTYPE_INSTANCE |
1684 |
REQ_BGL = False
|
1685 |
|
1686 |
def ExpandNames(self): |
1687 |
self._ExpandAndLockInstance()
|
1688 |
(self.op.target_node_uuid, self.op.target_node) = \ |
1689 |
ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid, |
1690 |
self.op.target_node)
|
1691 |
self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid] |
1692 |
self.needed_locks[locking.LEVEL_NODE_RES] = []
|
1693 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
1694 |
|
1695 |
def DeclareLocks(self, level): |
1696 |
if level == locking.LEVEL_NODE:
|
1697 |
self._LockInstancesNodes(primary_only=True) |
1698 |
elif level == locking.LEVEL_NODE_RES:
|
1699 |
# Copy node locks
|
1700 |
self.needed_locks[locking.LEVEL_NODE_RES] = \
|
1701 |
CopyLockList(self.needed_locks[locking.LEVEL_NODE])
|
1702 |
|
1703 |
def BuildHooksEnv(self): |
1704 |
"""Build hooks env.
|
1705 |
|
1706 |
This runs on master, primary and target nodes of the instance.
|
1707 |
|
1708 |
"""
|
1709 |
env = { |
1710 |
"TARGET_NODE": self.op.target_node, |
1711 |
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout, |
1712 |
} |
1713 |
env.update(BuildInstanceHookEnvByObject(self, self.instance)) |
1714 |
return env
|
1715 |
|
1716 |
def BuildHooksNodes(self): |
1717 |
"""Build hooks nodes.
|
1718 |
|
1719 |
"""
|
1720 |
nl = [ |
1721 |
self.cfg.GetMasterNode(),
|
1722 |
self.instance.primary_node,
|
1723 |
self.op.target_node_uuid,
|
1724 |
] |
1725 |
return (nl, nl)
|
1726 |
|
1727 |
def CheckPrereq(self): |
1728 |
"""Check prerequisites.
|
1729 |
|
1730 |
This checks that the instance is in the cluster.
|
1731 |
|
1732 |
"""
|
1733 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) |
1734 |
assert self.instance is not None, \ |
1735 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
1736 |
|
1737 |
if self.instance.disk_template not in constants.DTS_COPYABLE: |
1738 |
raise errors.OpPrereqError("Disk template %s not suitable for copying" % |
1739 |
self.instance.disk_template,
|
1740 |
errors.ECODE_STATE) |
1741 |
|
1742 |
target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid) |
1743 |
assert target_node is not None, \ |
1744 |
"Cannot retrieve locked node %s" % self.op.target_node |
1745 |
|
1746 |
self.target_node_uuid = target_node.uuid
|
1747 |
if target_node.uuid == self.instance.primary_node: |
1748 |
raise errors.OpPrereqError("Instance %s is already on the node %s" % |
1749 |
(self.instance.name, target_node.name),
|
1750 |
errors.ECODE_STATE) |
1751 |
|
1752 |
cluster = self.cfg.GetClusterInfo()
|
1753 |
bep = cluster.FillBE(self.instance)
|
1754 |
|
1755 |
for idx, dsk in enumerate(self.instance.disks): |
1756 |
if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE, |
1757 |
constants.DT_SHARED_FILE): |
1758 |
raise errors.OpPrereqError("Instance disk %d has a complex layout," |
1759 |
" cannot copy" % idx, errors.ECODE_STATE)
|
1760 |
|
1761 |
CheckNodeOnline(self, target_node.uuid)
|
1762 |
CheckNodeNotDrained(self, target_node.uuid)
|
1763 |
CheckNodeVmCapable(self, target_node.uuid)
|
1764 |
group_info = self.cfg.GetNodeGroup(target_node.group)
|
1765 |
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) |
1766 |
CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg, |
1767 |
ignore=self.op.ignore_ipolicy)
|
1768 |
|
1769 |
if self.instance.admin_state == constants.ADMINST_UP: |
1770 |
# check memory requirements on the target node
|
1771 |
CheckNodeFreeMemory( |
1772 |
self, target_node.uuid, "failing over instance %s" % |
1773 |
self.instance.name, bep[constants.BE_MAXMEM],
|
1774 |
self.instance.hypervisor,
|
1775 |
cluster.hvparams[self.instance.hypervisor])
|
1776 |
else:
|
1777 |
self.LogInfo("Not checking memory on the secondary node as" |
1778 |
" instance will not be started")
|
1779 |
|
1780 |
# check bridge existance
|
1781 |
CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid) |
1782 |
|
1783 |
def Exec(self, feedback_fn): |
1784 |
"""Move an instance.
|
1785 |
|
1786 |
The move is done by shutting it down on its present node, copying
|
1787 |
the data over (slow) and starting it on the new node.
|
1788 |
|
1789 |
"""
|
1790 |
source_node = self.cfg.GetNodeInfo(self.instance.primary_node) |
1791 |
target_node = self.cfg.GetNodeInfo(self.target_node_uuid) |
1792 |
|
1793 |
self.LogInfo("Shutting down instance %s on source node %s", |
1794 |
self.instance.name, source_node.name)
|
1795 |
|
1796 |
assert (self.owned_locks(locking.LEVEL_NODE) == |
1797 |
self.owned_locks(locking.LEVEL_NODE_RES))
|
1798 |
|
1799 |
result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance, |
1800 |
self.op.shutdown_timeout,
|
1801 |
self.op.reason)
|
1802 |
if self.op.ignore_consistency: |
1803 |
result.Warn("Could not shutdown instance %s on node %s. Proceeding"
|
1804 |
" anyway. Please make sure node %s is down. Error details" %
|
1805 |
(self.instance.name, source_node.name, source_node.name),
|
1806 |
self.LogWarning)
|
1807 |
else:
|
1808 |
result.Raise("Could not shutdown instance %s on node %s" %
|
1809 |
(self.instance.name, source_node.name))
|
1810 |
|
1811 |
# create the target disks
|
1812 |
try:
|
1813 |
CreateDisks(self, self.instance, target_node_uuid=target_node.uuid) |
1814 |
except errors.OpExecError:
|
1815 |
self.LogWarning("Device creation failed") |
1816 |
self.cfg.ReleaseDRBDMinors(self.instance.uuid) |
1817 |
raise
|
1818 |
|
1819 |
errs = [] |
1820 |
transfers = [] |
1821 |
# activate, get path, create transfer jobs
|
1822 |
for idx, disk in enumerate(self.instance.disks): |
1823 |
# FIXME: pass debug option from opcode to backend
|
1824 |
dt = masterd.instance.DiskTransfer("disk/%s" % idx,
|
1825 |
constants.IEIO_RAW_DISK, |
1826 |
(disk, self.instance),
|
1827 |
constants.IEIO_RAW_DISK, |
1828 |
(disk, self.instance),
|
1829 |
None)
|
1830 |
transfers.append(dt) |
1831 |
|
1832 |
import_result = \ |
1833 |
masterd.instance.TransferInstanceData(self, feedback_fn,
|
1834 |
source_node.uuid, |
1835 |
target_node.uuid, |
1836 |
target_node.secondary_ip, |
1837 |
self.op.compress,
|
1838 |
self.instance, transfers)
|
1839 |
if not compat.all(import_result): |
1840 |
errs.append("Failed to transfer instance data")
|
1841 |
|
1842 |
if errs:
|
1843 |
self.LogWarning("Some disks failed to copy, aborting") |
1844 |
try:
|
1845 |
RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid) |
1846 |
finally:
|
1847 |
self.cfg.ReleaseDRBDMinors(self.instance.uuid) |
1848 |
raise errors.OpExecError("Errors during disk copy: %s" % |
1849 |
(",".join(errs),))
|
1850 |
|
1851 |
self.instance.primary_node = target_node.uuid
|
1852 |
self.cfg.Update(self.instance, feedback_fn) |
1853 |
|
1854 |
self.LogInfo("Removing the disks on the original node") |
1855 |
RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid) |
1856 |
|
1857 |
# Only start the instance if it's marked as up
|
1858 |
if self.instance.admin_state == constants.ADMINST_UP: |
1859 |
self.LogInfo("Starting instance %s on node %s", |
1860 |
self.instance.name, target_node.name)
|
1861 |
|
1862 |
disks_ok, _ = AssembleInstanceDisks(self, self.instance, |
1863 |
ignore_secondaries=True)
|
1864 |
if not disks_ok: |
1865 |
ShutdownInstanceDisks(self, self.instance) |
1866 |
raise errors.OpExecError("Can't activate the instance's disks") |
1867 |
|
1868 |
result = self.rpc.call_instance_start(target_node.uuid,
|
1869 |
(self.instance, None, None), False, |
1870 |
self.op.reason)
|
1871 |
msg = result.fail_msg |
1872 |
if msg:
|
1873 |
ShutdownInstanceDisks(self, self.instance) |
1874 |
raise errors.OpExecError("Could not start instance %s on node %s: %s" % |
1875 |
(self.instance.name, target_node.name, msg))
|
1876 |
|
1877 |
|
1878 |
class LUInstanceMultiAlloc(NoHooksLU): |
1879 |
"""Allocates multiple instances at the same time.
|
1880 |
|
1881 |
"""
|
1882 |
REQ_BGL = False
|
1883 |
|
1884 |
def CheckArguments(self): |
1885 |
"""Check arguments.
|
1886 |
|
1887 |
"""
|
1888 |
nodes = [] |
1889 |
for inst in self.op.instances: |
1890 |
if inst.iallocator is not None: |
1891 |
raise errors.OpPrereqError("iallocator are not allowed to be set on" |
1892 |
" instance objects", errors.ECODE_INVAL)
|
1893 |
nodes.append(bool(inst.pnode))
|
1894 |
if inst.disk_template in constants.DTS_INT_MIRROR: |
1895 |
nodes.append(bool(inst.snode))
|
1896 |
|
1897 |
has_nodes = compat.any(nodes) |
1898 |
if compat.all(nodes) ^ has_nodes:
|
1899 |
raise errors.OpPrereqError("There are instance objects providing" |
1900 |
" pnode/snode while others do not",
|
1901 |
errors.ECODE_INVAL) |
1902 |
|
1903 |
if not has_nodes and self.op.iallocator is None: |
1904 |
default_iallocator = self.cfg.GetDefaultIAllocator()
|
1905 |
if default_iallocator:
|
1906 |
self.op.iallocator = default_iallocator
|
1907 |
else:
|
1908 |
raise errors.OpPrereqError("No iallocator or nodes on the instances" |
1909 |
" given and no cluster-wide default"
|
1910 |
" iallocator found; please specify either"
|
1911 |
" an iallocator or nodes on the instances"
|
1912 |
" or set a cluster-wide default iallocator",
|
1913 |
errors.ECODE_INVAL) |
1914 |
|
1915 |
_CheckOpportunisticLocking(self.op)
|
1916 |
|
1917 |
dups = utils.FindDuplicates([op.instance_name for op in self.op.instances]) |
1918 |
if dups:
|
1919 |
raise errors.OpPrereqError("There are duplicate instance names: %s" % |
1920 |
utils.CommaJoin(dups), errors.ECODE_INVAL) |
1921 |
|
1922 |
def ExpandNames(self): |
1923 |
"""Calculate the locks.
|
1924 |
|
1925 |
"""
|
1926 |
self.share_locks = ShareAll()
|
1927 |
self.needed_locks = {
|
1928 |
# iallocator will select nodes and even if no iallocator is used,
|
1929 |
# collisions with LUInstanceCreate should be avoided
|
1930 |
locking.LEVEL_NODE_ALLOC: locking.ALL_SET, |
1931 |
} |
1932 |
|
1933 |
if self.op.iallocator: |
1934 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
1935 |
self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
|
1936 |
|
1937 |
if self.op.opportunistic_locking: |
1938 |
self.opportunistic_locks[locking.LEVEL_NODE] = True |
1939 |
self.opportunistic_locks[locking.LEVEL_NODE_RES] = True |
1940 |
else:
|
1941 |
nodeslist = [] |
1942 |
for inst in self.op.instances: |
1943 |
(inst.pnode_uuid, inst.pnode) = \ |
1944 |
ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
|
1945 |
nodeslist.append(inst.pnode_uuid) |
1946 |
if inst.snode is not None: |
1947 |
(inst.snode_uuid, inst.snode) = \ |
1948 |
ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
|
1949 |
nodeslist.append(inst.snode_uuid) |
1950 |
|
1951 |
self.needed_locks[locking.LEVEL_NODE] = nodeslist
|
1952 |
# Lock resources of instance's primary and secondary nodes (copy to
|
1953 |
# prevent accidential modification)
|
1954 |
self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist) |
1955 |
|
1956 |
def CheckPrereq(self): |
1957 |
"""Check prerequisite.
|
1958 |
|
1959 |
"""
|
1960 |
if self.op.iallocator: |
1961 |
cluster = self.cfg.GetClusterInfo()
|
1962 |
default_vg = self.cfg.GetVGName()
|
1963 |
ec_id = self.proc.GetECId()
|
1964 |
|
1965 |
if self.op.opportunistic_locking: |
1966 |
# Only consider nodes for which a lock is held
|
1967 |
node_whitelist = self.cfg.GetNodeNames(
|
1968 |
list(self.owned_locks(locking.LEVEL_NODE))) |
1969 |
else:
|
1970 |
node_whitelist = None
|
1971 |
|
1972 |
insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg), |
1973 |
_ComputeNics(op, cluster, None,
|
1974 |
self.cfg, ec_id),
|
1975 |
_ComputeFullBeParams(op, cluster), |
1976 |
node_whitelist) |
1977 |
for op in self.op.instances] |
1978 |
|
1979 |
req = iallocator.IAReqMultiInstanceAlloc(instances=insts) |
1980 |
ial = iallocator.IAllocator(self.cfg, self.rpc, req) |
1981 |
|
1982 |
ial.Run(self.op.iallocator)
|
1983 |
|
1984 |
if not ial.success: |
1985 |
raise errors.OpPrereqError("Can't compute nodes using" |
1986 |
" iallocator '%s': %s" %
|
1987 |
(self.op.iallocator, ial.info),
|
1988 |
errors.ECODE_NORES) |
1989 |
|
1990 |
self.ia_result = ial.result
|
1991 |
|
1992 |
if self.op.dry_run: |
1993 |
self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), { |
1994 |
constants.JOB_IDS_KEY: [], |
1995 |
}) |
1996 |
|
1997 |
def _ConstructPartialResult(self): |
1998 |
"""Contructs the partial result.
|
1999 |
|
2000 |
"""
|
2001 |
if self.op.iallocator: |
2002 |
(allocatable, failed_insts) = self.ia_result
|
2003 |
allocatable_insts = map(compat.fst, allocatable)
|
2004 |
else:
|
2005 |
allocatable_insts = [op.instance_name for op in self.op.instances] |
2006 |
failed_insts = [] |
2007 |
|
2008 |
return {
|
2009 |
constants.ALLOCATABLE_KEY: allocatable_insts, |
2010 |
constants.FAILED_KEY: failed_insts, |
2011 |
} |
2012 |
|
2013 |
def Exec(self, feedback_fn): |
2014 |
"""Executes the opcode.
|
2015 |
|
2016 |
"""
|
2017 |
jobs = [] |
2018 |
if self.op.iallocator: |
2019 |
op2inst = dict((op.instance_name, op) for op in self.op.instances) |
2020 |
(allocatable, failed) = self.ia_result
|
2021 |
|
2022 |
for (name, node_names) in allocatable: |
2023 |
op = op2inst.pop(name) |
2024 |
|
2025 |
(op.pnode_uuid, op.pnode) = \ |
2026 |
ExpandNodeUuidAndName(self.cfg, None, node_names[0]) |
2027 |
if len(node_names) > 1: |
2028 |
(op.snode_uuid, op.snode) = \ |
2029 |
ExpandNodeUuidAndName(self.cfg, None, node_names[1]) |
2030 |
|
2031 |
jobs.append([op]) |
2032 |
|
2033 |
missing = set(op2inst.keys()) - set(failed) |
2034 |
assert not missing, \ |
2035 |
"Iallocator did return incomplete result: %s" % \
|
2036 |
utils.CommaJoin(missing) |
2037 |
else:
|
2038 |
jobs.extend([op] for op in self.op.instances) |
2039 |
|
2040 |
return ResultWithJobs(jobs, **self._ConstructPartialResult()) |
2041 |
|
2042 |
|
2043 |
class _InstNicModPrivate: |
2044 |
"""Data structure for network interface modifications.
|
2045 |
|
2046 |
Used by L{LUInstanceSetParams}.
|
2047 |
|
2048 |
"""
|
2049 |
def __init__(self): |
2050 |
self.params = None |
2051 |
self.filled = None |
2052 |
|
2053 |
|
2054 |
def _PrepareContainerMods(mods, private_fn): |
2055 |
"""Prepares a list of container modifications by adding a private data field.
|
2056 |
|
2057 |
@type mods: list of tuples; (operation, index, parameters)
|
2058 |
@param mods: List of modifications
|
2059 |
@type private_fn: callable or None
|
2060 |
@param private_fn: Callable for constructing a private data field for a
|
2061 |
modification
|
2062 |
@rtype: list
|
2063 |
|
2064 |
"""
|
2065 |
if private_fn is None: |
2066 |
fn = lambda: None |
2067 |
else:
|
2068 |
fn = private_fn |
2069 |
|
2070 |
return [(op, idx, params, fn()) for (op, idx, params) in mods] |
2071 |
|
2072 |
|
2073 |
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs): |
2074 |
"""Checks if nodes have enough physical CPUs
|
2075 |
|
2076 |
This function checks if all given nodes have the needed number of
|
2077 |
physical CPUs. In case any node has less CPUs or we cannot get the
|
2078 |
information from the node, this function raises an OpPrereqError
|
2079 |
exception.
|
2080 |
|
2081 |
@type lu: C{LogicalUnit}
|
2082 |
@param lu: a logical unit from which we get configuration data
|
2083 |
@type node_uuids: C{list}
|
2084 |
@param node_uuids: the list of node UUIDs to check
|
2085 |
@type requested: C{int}
|
2086 |
@param requested: the minimum acceptable number of physical CPUs
|
2087 |
@type hypervisor_specs: list of pairs (string, dict of strings)
|
2088 |
@param hypervisor_specs: list of hypervisor specifications in
|
2089 |
pairs (hypervisor_name, hvparams)
|
2090 |
@raise errors.OpPrereqError: if the node doesn't have enough CPUs,
|
2091 |
or we cannot check the node
|
2092 |
|
2093 |
"""
|
2094 |
nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
|
2095 |
for node_uuid in node_uuids: |
2096 |
info = nodeinfo[node_uuid] |
2097 |
node_name = lu.cfg.GetNodeName(node_uuid) |
2098 |
info.Raise("Cannot get current information from node %s" % node_name,
|
2099 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
2100 |
(_, _, (hv_info, )) = info.payload |
2101 |
num_cpus = hv_info.get("cpu_total", None) |
2102 |
if not isinstance(num_cpus, int): |
2103 |
raise errors.OpPrereqError("Can't compute the number of physical CPUs" |
2104 |
" on node %s, result was '%s'" %
|
2105 |
(node_name, num_cpus), errors.ECODE_ENVIRON) |
2106 |
if requested > num_cpus:
|
2107 |
raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are " |
2108 |
"required" % (node_name, num_cpus, requested),
|
2109 |
errors.ECODE_NORES) |
2110 |
|
2111 |
|
2112 |
def GetItemFromContainer(identifier, kind, container): |
2113 |
"""Return the item refered by the identifier.
|
2114 |
|
2115 |
@type identifier: string
|
2116 |
@param identifier: Item index or name or UUID
|
2117 |
@type kind: string
|
2118 |
@param kind: One-word item description
|
2119 |
@type container: list
|
2120 |
@param container: Container to get the item from
|
2121 |
|
2122 |
"""
|
2123 |
# Index
|
2124 |
try:
|
2125 |
idx = int(identifier)
|
2126 |
if idx == -1: |
2127 |
# Append
|
2128 |
absidx = len(container) - 1 |
2129 |
elif idx < 0: |
2130 |
raise IndexError("Not accepting negative indices other than -1") |
2131 |
elif idx > len(container): |
2132 |
raise IndexError("Got %s index %s, but there are only %s" % |
2133 |
(kind, idx, len(container)))
|
2134 |
else:
|
2135 |
absidx = idx |
2136 |
return (absidx, container[idx])
|
2137 |
except ValueError: |
2138 |
pass
|
2139 |
|
2140 |
for idx, item in enumerate(container): |
2141 |
if item.uuid == identifier or item.name == identifier: |
2142 |
return (idx, item)
|
2143 |
|
2144 |
raise errors.OpPrereqError("Cannot find %s with identifier %s" % |
2145 |
(kind, identifier), errors.ECODE_NOENT) |
2146 |
|
2147 |
|
2148 |
def _ApplyContainerMods(kind, container, chgdesc, mods, |
2149 |
create_fn, modify_fn, remove_fn, |
2150 |
post_add_fn=None):
|
2151 |
"""Applies descriptions in C{mods} to C{container}.
|
2152 |
|
2153 |
@type kind: string
|
2154 |
@param kind: One-word item description
|
2155 |
@type container: list
|
2156 |
@param container: Container to modify
|
2157 |
@type chgdesc: None or list
|
2158 |
@param chgdesc: List of applied changes
|
2159 |
@type mods: list
|
2160 |
@param mods: Modifications as returned by L{_PrepareContainerMods}
|
2161 |
@type create_fn: callable
|
2162 |
@param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
|
2163 |
receives absolute item index, parameters and private data object as added
|
2164 |
by L{_PrepareContainerMods}, returns tuple containing new item and changes
|
2165 |
as list
|
2166 |
@type modify_fn: callable
|
2167 |
@param modify_fn: Callback for modifying an existing item
|
2168 |
(L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
|
2169 |
and private data object as added by L{_PrepareContainerMods}, returns
|
2170 |
changes as list
|
2171 |
@type remove_fn: callable
|
2172 |
@param remove_fn: Callback on removing item; receives absolute item index,
|
2173 |
item and private data object as added by L{_PrepareContainerMods}
|
2174 |
@type post_add_fn: callable
|
2175 |
@param post_add_fn: Callable for post-processing a newly created item after
|
2176 |
it has been put into the container. It receives the index of the new item
|
2177 |
and the new item as parameters.
|
2178 |
|
2179 |
"""
|
2180 |
for (op, identifier, params, private) in mods: |
2181 |
changes = None
|
2182 |
|
2183 |
if op == constants.DDM_ADD:
|
2184 |
# Calculate where item will be added
|
2185 |
# When adding an item, identifier can only be an index
|
2186 |
try:
|
2187 |
idx = int(identifier)
|
2188 |
except ValueError: |
2189 |
raise errors.OpPrereqError("Only possitive integer or -1 is accepted as" |
2190 |
" identifier for %s" % constants.DDM_ADD,
|
2191 |
errors.ECODE_INVAL) |
2192 |
if idx == -1: |
2193 |
addidx = len(container)
|
2194 |
else:
|
2195 |
if idx < 0: |
2196 |
raise IndexError("Not accepting negative indices other than -1") |
2197 |
elif idx > len(container): |
2198 |
raise IndexError("Got %s index %s, but there are only %s" % |
2199 |
(kind, idx, len(container)))
|
2200 |
addidx = idx |
2201 |
|
2202 |
if create_fn is None: |
2203 |
item = params |
2204 |
else:
|
2205 |
(item, changes) = create_fn(addidx, params, private) |
2206 |
|
2207 |
if idx == -1: |
2208 |
container.append(item) |
2209 |
else:
|
2210 |
assert idx >= 0 |
2211 |
assert idx <= len(container) |
2212 |
# list.insert does so before the specified index
|
2213 |
container.insert(idx, item) |
2214 |
|
2215 |
if post_add_fn is not None: |
2216 |
post_add_fn(addidx, item) |
2217 |
|
2218 |
else:
|
2219 |
# Retrieve existing item
|
2220 |
(absidx, item) = GetItemFromContainer(identifier, kind, container) |
2221 |
|
2222 |
if op == constants.DDM_REMOVE:
|
2223 |
assert not params |
2224 |
|
2225 |
changes = [("%s/%s" % (kind, absidx), "remove")] |
2226 |
|
2227 |
if remove_fn is not None: |
2228 |
msg = remove_fn(absidx, item, private) |
2229 |
if msg:
|
2230 |
changes.append(("%s/%s" % (kind, absidx), msg))
|
2231 |
|
2232 |
assert container[absidx] == item
|
2233 |
del container[absidx]
|
2234 |
elif op == constants.DDM_MODIFY:
|
2235 |
if modify_fn is not None: |
2236 |
changes = modify_fn(absidx, item, params, private) |
2237 |
else:
|
2238 |
raise errors.ProgrammerError("Unhandled operation '%s'" % op) |
2239 |
|
2240 |
assert _TApplyContModsCbChanges(changes)
|
2241 |
|
2242 |
if not (chgdesc is None or changes is None): |
2243 |
chgdesc.extend(changes) |
2244 |
|
2245 |
|
2246 |
def _UpdateIvNames(base_index, disks): |
2247 |
"""Updates the C{iv_name} attribute of disks.
|
2248 |
|
2249 |
@type disks: list of L{objects.Disk}
|
2250 |
|
2251 |
"""
|
2252 |
for (idx, disk) in enumerate(disks): |
2253 |
disk.iv_name = "disk/%s" % (base_index + idx, )
|
2254 |
|
2255 |
|
2256 |
class LUInstanceSetParams(LogicalUnit): |
2257 |
"""Modifies an instances's parameters.
|
2258 |
|
2259 |
"""
|
2260 |
HPATH = "instance-modify"
|
2261 |
HTYPE = constants.HTYPE_INSTANCE |
2262 |
REQ_BGL = False
|
2263 |
|
2264 |
@staticmethod
|
2265 |
def _UpgradeDiskNicMods(kind, mods, verify_fn): |
2266 |
assert ht.TList(mods)
|
2267 |
assert not mods or len(mods[0]) in (2, 3) |
2268 |
|
2269 |
if mods and len(mods[0]) == 2: |
2270 |
result = [] |
2271 |
|
2272 |
addremove = 0
|
2273 |
for op, params in mods: |
2274 |
if op in (constants.DDM_ADD, constants.DDM_REMOVE): |
2275 |
result.append((op, -1, params))
|
2276 |
addremove += 1
|
2277 |
|
2278 |
if addremove > 1: |
2279 |
raise errors.OpPrereqError("Only one %s add or remove operation is" |
2280 |
" supported at a time" % kind,
|
2281 |
errors.ECODE_INVAL) |
2282 |
else:
|
2283 |
result.append((constants.DDM_MODIFY, op, params)) |
2284 |
|
2285 |
assert verify_fn(result)
|
2286 |
else:
|
2287 |
result = mods |
2288 |
|
2289 |
return result
|
2290 |
|
2291 |
@staticmethod
|
2292 |
def _CheckMods(kind, mods, key_types, item_fn): |
2293 |
"""Ensures requested disk/NIC modifications are valid.
|
2294 |
|
2295 |
"""
|
2296 |
for (op, _, params) in mods: |
2297 |
assert ht.TDict(params)
|
2298 |
|
2299 |
# If 'key_types' is an empty dict, we assume we have an
|
2300 |
# 'ext' template and thus do not ForceDictType
|
2301 |
if key_types:
|
2302 |
utils.ForceDictType(params, key_types) |
2303 |
|
2304 |
if op == constants.DDM_REMOVE:
|
2305 |
if params:
|
2306 |
raise errors.OpPrereqError("No settings should be passed when" |
2307 |
" removing a %s" % kind,
|
2308 |
errors.ECODE_INVAL) |
2309 |
elif op in (constants.DDM_ADD, constants.DDM_MODIFY): |
2310 |
item_fn(op, params) |
2311 |
else:
|
2312 |
raise errors.ProgrammerError("Unhandled operation '%s'" % op) |
2313 |
|
2314 |
@staticmethod
|
2315 |
def _VerifyDiskModification(op, params, excl_stor): |
2316 |
"""Verifies a disk modification.
|
2317 |
|
2318 |
"""
|
2319 |
if op == constants.DDM_ADD:
|
2320 |
mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR) |
2321 |
if mode not in constants.DISK_ACCESS_SET: |
2322 |
raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode, |
2323 |
errors.ECODE_INVAL) |
2324 |
|
2325 |
size = params.get(constants.IDISK_SIZE, None)
|
2326 |
if size is None: |
2327 |
raise errors.OpPrereqError("Required disk parameter '%s' missing" % |
2328 |
constants.IDISK_SIZE, errors.ECODE_INVAL) |
2329 |
size = int(size)
|
2330 |
|
2331 |
params[constants.IDISK_SIZE] = size |
2332 |
name = params.get(constants.IDISK_NAME, None)
|
2333 |
if name is not None and name.lower() == constants.VALUE_NONE: |
2334 |
params[constants.IDISK_NAME] = None
|
2335 |
|
2336 |
CheckSpindlesExclusiveStorage(params, excl_stor, True)
|
2337 |
|
2338 |
elif op == constants.DDM_MODIFY:
|
2339 |
if constants.IDISK_SIZE in params: |
2340 |
raise errors.OpPrereqError("Disk size change not possible, use" |
2341 |
" grow-disk", errors.ECODE_INVAL)
|
2342 |
if len(params) > 2: |
2343 |
raise errors.OpPrereqError("Disk modification doesn't support" |
2344 |
" additional arbitrary parameters",
|
2345 |
errors.ECODE_INVAL) |
2346 |
name = params.get(constants.IDISK_NAME, None)
|
2347 |
if name is not None and name.lower() == constants.VALUE_NONE: |
2348 |
params[constants.IDISK_NAME] = None
|
2349 |
|
2350 |
@staticmethod
|
2351 |
def _VerifyNicModification(op, params): |
2352 |
"""Verifies a network interface modification.
|
2353 |
|
2354 |
"""
|
2355 |
if op in (constants.DDM_ADD, constants.DDM_MODIFY): |
2356 |
ip = params.get(constants.INIC_IP, None)
|
2357 |
name = params.get(constants.INIC_NAME, None)
|
2358 |
req_net = params.get(constants.INIC_NETWORK, None)
|
2359 |
link = params.get(constants.NIC_LINK, None)
|
2360 |
mode = params.get(constants.NIC_MODE, None)
|
2361 |
if name is not None and name.lower() == constants.VALUE_NONE: |
2362 |
params[constants.INIC_NAME] = None
|
2363 |
if req_net is not None: |
2364 |
if req_net.lower() == constants.VALUE_NONE:
|
2365 |
params[constants.INIC_NETWORK] = None
|
2366 |
req_net = None
|
2367 |
elif link is not None or mode is not None: |
2368 |
raise errors.OpPrereqError("If network is given" |
2369 |
" mode or link should not",
|
2370 |
errors.ECODE_INVAL) |
2371 |
|
2372 |
if op == constants.DDM_ADD:
|
2373 |
macaddr = params.get(constants.INIC_MAC, None)
|
2374 |
if macaddr is None: |
2375 |
params[constants.INIC_MAC] = constants.VALUE_AUTO |
2376 |
|
2377 |
if ip is not None: |
2378 |
if ip.lower() == constants.VALUE_NONE:
|
2379 |
params[constants.INIC_IP] = None
|
2380 |
else:
|
2381 |
if ip.lower() == constants.NIC_IP_POOL:
|
2382 |
if op == constants.DDM_ADD and req_net is None: |
2383 |
raise errors.OpPrereqError("If ip=pool, parameter network" |
2384 |
" cannot be none",
|
2385 |
errors.ECODE_INVAL) |
2386 |
else:
|
2387 |
if not netutils.IPAddress.IsValid(ip): |
2388 |
raise errors.OpPrereqError("Invalid IP address '%s'" % ip, |
2389 |
errors.ECODE_INVAL) |
2390 |
|
2391 |
if constants.INIC_MAC in params: |
2392 |
macaddr = params[constants.INIC_MAC] |
2393 |
if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): |
2394 |
macaddr = utils.NormalizeAndValidateMac(macaddr) |
2395 |
|
2396 |
if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO: |
2397 |
raise errors.OpPrereqError("'auto' is not a valid MAC address when" |
2398 |
" modifying an existing NIC",
|
2399 |
errors.ECODE_INVAL) |
2400 |
|
2401 |
def CheckArguments(self): |
2402 |
if not (self.op.nics or self.op.disks or self.op.disk_template or |
2403 |
self.op.hvparams or self.op.beparams or self.op.os_name or |
2404 |
self.op.osparams or self.op.offline is not None or |
2405 |
self.op.runtime_mem or self.op.pnode): |
2406 |
raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL) |
2407 |
|
2408 |
if self.op.hvparams: |
2409 |
CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
|
2410 |
"hypervisor", "instance", "cluster") |
2411 |
|
2412 |
self.op.disks = self._UpgradeDiskNicMods( |
2413 |
"disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams)) |
2414 |
self.op.nics = self._UpgradeDiskNicMods( |
2415 |
"NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams)) |
2416 |
|
2417 |
if self.op.disks and self.op.disk_template is not None: |
2418 |
raise errors.OpPrereqError("Disk template conversion and other disk" |
2419 |
" changes not supported at the same time",
|
2420 |
errors.ECODE_INVAL) |
2421 |
|
2422 |
if (self.op.disk_template and |
2423 |
self.op.disk_template in constants.DTS_INT_MIRROR and |
2424 |
self.op.remote_node is None): |
2425 |
raise errors.OpPrereqError("Changing the disk template to a mirrored" |
2426 |
" one requires specifying a secondary node",
|
2427 |
errors.ECODE_INVAL) |
2428 |
|
2429 |
# Check NIC modifications
|
2430 |
self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES, |
2431 |
self._VerifyNicModification)
|
2432 |
|
2433 |
if self.op.pnode: |
2434 |
(self.op.pnode_uuid, self.op.pnode) = \ |
2435 |
ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode) |
2436 |
|
2437 |
def ExpandNames(self): |
2438 |
self._ExpandAndLockInstance()
|
2439 |
self.needed_locks[locking.LEVEL_NODEGROUP] = []
|
2440 |
# Can't even acquire node locks in shared mode as upcoming changes in
|
2441 |
# Ganeti 2.6 will start to modify the node object on disk conversion
|
2442 |
self.needed_locks[locking.LEVEL_NODE] = []
|
2443 |
self.needed_locks[locking.LEVEL_NODE_RES] = []
|
2444 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
2445 |
# Look node group to look up the ipolicy
|
2446 |
self.share_locks[locking.LEVEL_NODEGROUP] = 1 |
2447 |
|
2448 |
def DeclareLocks(self, level): |
2449 |
if level == locking.LEVEL_NODEGROUP:
|
2450 |
assert not self.needed_locks[locking.LEVEL_NODEGROUP] |
2451 |
# Acquire locks for the instance's nodegroups optimistically. Needs
|
2452 |
# to be verified in CheckPrereq
|
2453 |
self.needed_locks[locking.LEVEL_NODEGROUP] = \
|
2454 |
self.cfg.GetInstanceNodeGroups(self.op.instance_uuid) |
2455 |
elif level == locking.LEVEL_NODE:
|
2456 |
self._LockInstancesNodes()
|
2457 |
if self.op.disk_template and self.op.remote_node: |
2458 |
(self.op.remote_node_uuid, self.op.remote_node) = \ |
2459 |
ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid, |
2460 |
self.op.remote_node)
|
2461 |
self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid) |
2462 |
elif level == locking.LEVEL_NODE_RES and self.op.disk_template: |
2463 |
# Copy node locks
|
2464 |
self.needed_locks[locking.LEVEL_NODE_RES] = \
|
2465 |
CopyLockList(self.needed_locks[locking.LEVEL_NODE])
|
2466 |
|
2467 |
def BuildHooksEnv(self): |
2468 |
"""Build hooks env.
|
2469 |
|
2470 |
This runs on the master, primary and secondaries.
|
2471 |
|
2472 |
"""
|
2473 |
args = {} |
2474 |
if constants.BE_MINMEM in self.be_new: |
2475 |
args["minmem"] = self.be_new[constants.BE_MINMEM] |
2476 |
if constants.BE_MAXMEM in self.be_new: |
2477 |
args["maxmem"] = self.be_new[constants.BE_MAXMEM] |
2478 |
if constants.BE_VCPUS in self.be_new: |
2479 |
args["vcpus"] = self.be_new[constants.BE_VCPUS] |
2480 |
# TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
|
2481 |
# information at all.
|
2482 |
|
2483 |
if self._new_nics is not None: |
2484 |
nics = [] |
2485 |
|
2486 |
for nic in self._new_nics: |
2487 |
n = copy.deepcopy(nic) |
2488 |
nicparams = self.cluster.SimpleFillNIC(n.nicparams)
|
2489 |
n.nicparams = nicparams |
2490 |
nics.append(NICToTuple(self, n))
|
2491 |
|
2492 |
args["nics"] = nics
|
2493 |
|
2494 |
env = BuildInstanceHookEnvByObject(self, self.instance, override=args) |
2495 |
if self.op.disk_template: |
2496 |
env["NEW_DISK_TEMPLATE"] = self.op.disk_template |
2497 |
if self.op.runtime_mem: |
2498 |
env["RUNTIME_MEMORY"] = self.op.runtime_mem |
2499 |
|
2500 |
return env
|
2501 |
|
2502 |
def BuildHooksNodes(self): |
2503 |
"""Build hooks nodes.
|
2504 |
|
2505 |
"""
|
2506 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
2507 |
return (nl, nl)
|
2508 |
|
2509 |
def _PrepareNicModification(self, params, private, old_ip, old_net_uuid, |
2510 |
old_params, cluster, pnode_uuid): |
2511 |
|
2512 |
update_params_dict = dict([(key, params[key])
|
2513 |
for key in constants.NICS_PARAMETERS |
2514 |
if key in params]) |
2515 |
|
2516 |
req_link = update_params_dict.get(constants.NIC_LINK, None)
|
2517 |
req_mode = update_params_dict.get(constants.NIC_MODE, None)
|
2518 |
|
2519 |
new_net_uuid = None
|
2520 |
new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid) |
2521 |
if new_net_uuid_or_name:
|
2522 |
new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
|
2523 |
new_net_obj = self.cfg.GetNetwork(new_net_uuid)
|
2524 |
|
2525 |
if old_net_uuid:
|
2526 |
old_net_obj = self.cfg.GetNetwork(old_net_uuid)
|
2527 |
|
2528 |
if new_net_uuid:
|
2529 |
netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
|
2530 |
if not netparams: |
2531 |
raise errors.OpPrereqError("No netparams found for the network" |
2532 |
" %s, probably not connected" %
|
2533 |
new_net_obj.name, errors.ECODE_INVAL) |
2534 |
new_params = dict(netparams)
|
2535 |
else:
|
2536 |
new_params = GetUpdatedParams(old_params, update_params_dict) |
2537 |
|
2538 |
utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES) |
2539 |
|
2540 |
new_filled_params = cluster.SimpleFillNIC(new_params) |
2541 |
objects.NIC.CheckParameterSyntax(new_filled_params) |
2542 |
|
2543 |
new_mode = new_filled_params[constants.NIC_MODE] |
2544 |
if new_mode == constants.NIC_MODE_BRIDGED:
|
2545 |
bridge = new_filled_params[constants.NIC_LINK] |
2546 |
msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
|
2547 |
if msg:
|
2548 |
msg = "Error checking bridges on node '%s': %s" % \
|
2549 |
(self.cfg.GetNodeName(pnode_uuid), msg)
|
2550 |
if self.op.force: |
2551 |
self.warn.append(msg)
|
2552 |
else:
|
2553 |
raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
|
2554 |
|
2555 |
elif new_mode == constants.NIC_MODE_ROUTED:
|
2556 |
ip = params.get(constants.INIC_IP, old_ip) |
2557 |
if ip is None: |
2558 |
raise errors.OpPrereqError("Cannot set the NIC IP address to None" |
2559 |
" on a routed NIC", errors.ECODE_INVAL)
|
2560 |
|
2561 |
elif new_mode == constants.NIC_MODE_OVS:
|
2562 |
# TODO: check OVS link
|
2563 |
self.LogInfo("OVS links are currently not checked for correctness") |
2564 |
|
2565 |
if constants.INIC_MAC in params: |
2566 |
mac = params[constants.INIC_MAC] |
2567 |
if mac is None: |
2568 |
raise errors.OpPrereqError("Cannot unset the NIC MAC address", |
2569 |
errors.ECODE_INVAL) |
2570 |
elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE): |
2571 |
# otherwise generate the MAC address
|
2572 |
params[constants.INIC_MAC] = \ |
2573 |
self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId()) |
2574 |
else:
|
2575 |
# or validate/reserve the current one
|
2576 |
try:
|
2577 |
self.cfg.ReserveMAC(mac, self.proc.GetECId()) |
2578 |
except errors.ReservationError:
|
2579 |
raise errors.OpPrereqError("MAC address '%s' already in use" |
2580 |
" in cluster" % mac,
|
2581 |
errors.ECODE_NOTUNIQUE) |
2582 |
elif new_net_uuid != old_net_uuid:
|
2583 |
|
2584 |
def get_net_prefix(net_uuid): |
2585 |
mac_prefix = None
|
2586 |
if net_uuid:
|
2587 |
nobj = self.cfg.GetNetwork(net_uuid)
|
2588 |
mac_prefix = nobj.mac_prefix |
2589 |
|
2590 |
return mac_prefix
|
2591 |
|
2592 |
new_prefix = get_net_prefix(new_net_uuid) |
2593 |
old_prefix = get_net_prefix(old_net_uuid) |
2594 |
if old_prefix != new_prefix:
|
2595 |
params[constants.INIC_MAC] = \ |
2596 |
self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId()) |
2597 |
|
2598 |
# if there is a change in (ip, network) tuple
|
2599 |
new_ip = params.get(constants.INIC_IP, old_ip) |
2600 |
if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
|
2601 |
if new_ip:
|
2602 |
# if IP is pool then require a network and generate one IP
|
2603 |
if new_ip.lower() == constants.NIC_IP_POOL:
|
2604 |
if new_net_uuid:
|
2605 |
try:
|
2606 |
new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId()) |
2607 |
except errors.ReservationError:
|
2608 |
raise errors.OpPrereqError("Unable to get a free IP" |
2609 |
" from the address pool",
|
2610 |
errors.ECODE_STATE) |
2611 |
self.LogInfo("Chose IP %s from network %s", |
2612 |
new_ip, |
2613 |
new_net_obj.name) |
2614 |
params[constants.INIC_IP] = new_ip |
2615 |
else:
|
2616 |
raise errors.OpPrereqError("ip=pool, but no network found", |
2617 |
errors.ECODE_INVAL) |
2618 |
# Reserve new IP if in the new network if any
|
2619 |
elif new_net_uuid:
|
2620 |
try:
|
2621 |
self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId()) |
2622 |
self.LogInfo("Reserving IP %s in network %s", |
2623 |
new_ip, new_net_obj.name) |
2624 |
except errors.ReservationError:
|
2625 |
raise errors.OpPrereqError("IP %s not available in network %s" % |
2626 |
(new_ip, new_net_obj.name), |
2627 |
errors.ECODE_NOTUNIQUE) |
2628 |
# new network is None so check if new IP is a conflicting IP
|
2629 |
elif self.op.conflicts_check: |
2630 |
_CheckForConflictingIp(self, new_ip, pnode_uuid)
|
2631 |
|
2632 |
# release old IP if old network is not None
|
2633 |
if old_ip and old_net_uuid: |
2634 |
try:
|
2635 |
self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId()) |
2636 |
except errors.AddressPoolError:
|
2637 |
logging.warning("Release IP %s not contained in network %s",
|
2638 |
old_ip, old_net_obj.name) |
2639 |
|
2640 |
# there are no changes in (ip, network) tuple and old network is not None
|
2641 |
elif (old_net_uuid is not None and |
2642 |
(req_link is not None or req_mode is not None)): |
2643 |
raise errors.OpPrereqError("Not allowed to change link or mode of" |
2644 |
" a NIC that is connected to a network",
|
2645 |
errors.ECODE_INVAL) |
2646 |
|
2647 |
private.params = new_params |
2648 |
private.filled = new_filled_params |
2649 |
|
2650 |
def _PreCheckDiskTemplate(self, pnode_info): |
2651 |
"""CheckPrereq checks related to a new disk template."""
|
2652 |
# Arguments are passed to avoid configuration lookups
|
2653 |
pnode_uuid = self.instance.primary_node
|
2654 |
if self.instance.disk_template == self.op.disk_template: |
2655 |
raise errors.OpPrereqError("Instance already has disk template %s" % |
2656 |
self.instance.disk_template,
|
2657 |
errors.ECODE_INVAL) |
2658 |
|
2659 |
if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template): |
2660 |
raise errors.OpPrereqError("Disk template '%s' is not enabled for this" |
2661 |
" cluster." % self.op.disk_template) |
2662 |
|
2663 |
if (self.instance.disk_template, |
2664 |
self.op.disk_template) not in self._DISK_CONVERSIONS: |
2665 |
raise errors.OpPrereqError("Unsupported disk template conversion from" |
2666 |
" %s to %s" % (self.instance.disk_template, |
2667 |
self.op.disk_template),
|
2668 |
errors.ECODE_INVAL) |
2669 |
CheckInstanceState(self, self.instance, INSTANCE_DOWN, |
2670 |
msg="cannot change disk template")
|
2671 |
if self.op.disk_template in constants.DTS_INT_MIRROR: |
2672 |
if self.op.remote_node_uuid == pnode_uuid: |
2673 |
raise errors.OpPrereqError("Given new secondary node %s is the same" |
2674 |
" as the primary node of the instance" %
|
2675 |
self.op.remote_node, errors.ECODE_STATE)
|
2676 |
CheckNodeOnline(self, self.op.remote_node_uuid) |
2677 |
CheckNodeNotDrained(self, self.op.remote_node_uuid) |
2678 |
# FIXME: here we assume that the old instance type is DT_PLAIN
|
2679 |
assert self.instance.disk_template == constants.DT_PLAIN |
2680 |
disks = [{constants.IDISK_SIZE: d.size, |
2681 |
constants.IDISK_VG: d.logical_id[0]}
|
2682 |
for d in self.instance.disks] |
2683 |
required = ComputeDiskSizePerVG(self.op.disk_template, disks)
|
2684 |
CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required) |
2685 |
|
2686 |
snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid) |
2687 |
snode_group = self.cfg.GetNodeGroup(snode_info.group)
|
2688 |
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
|
2689 |
snode_group) |
2690 |
CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg, |
2691 |
ignore=self.op.ignore_ipolicy)
|
2692 |
if pnode_info.group != snode_info.group:
|
2693 |
self.LogWarning("The primary and secondary nodes are in two" |
2694 |
" different node groups; the disk parameters"
|
2695 |
" from the first disk's node group will be"
|
2696 |
" used")
|
2697 |
|
2698 |
if not self.op.disk_template in constants.DTS_EXCL_STORAGE: |
2699 |
# Make sure none of the nodes require exclusive storage
|
2700 |
nodes = [pnode_info] |
2701 |
if self.op.disk_template in constants.DTS_INT_MIRROR: |
2702 |
assert snode_info
|
2703 |
nodes.append(snode_info) |
2704 |
has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n) |
2705 |
if compat.any(map(has_es, nodes)): |
2706 |
errmsg = ("Cannot convert disk template from %s to %s when exclusive"
|
2707 |
" storage is enabled" % (self.instance.disk_template, |
2708 |
self.op.disk_template))
|
2709 |
raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
|
2710 |
|
2711 |
def _PreCheckDisks(self, ispec): |
2712 |
"""CheckPrereq checks related to disk changes.
|
2713 |
|
2714 |
@type ispec: dict
|
2715 |
@param ispec: instance specs to be updated with the new disks
|
2716 |
|
2717 |
"""
|
2718 |
self.diskparams = self.cfg.GetInstanceDiskParams(self.instance) |
2719 |
|
2720 |
excl_stor = compat.any( |
2721 |
rpc.GetExclusiveStorageForNodes(self.cfg,
|
2722 |
self.instance.all_nodes).values()
|
2723 |
) |
2724 |
|
2725 |
# Check disk modifications. This is done here and not in CheckArguments
|
2726 |
# (as with NICs), because we need to know the instance's disk template
|
2727 |
ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor) |
2728 |
if self.instance.disk_template == constants.DT_EXT: |
2729 |
self._CheckMods("disk", self.op.disks, {}, ver_fn) |
2730 |
else:
|
2731 |
self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES, |
2732 |
ver_fn) |
2733 |
|
2734 |
self.diskmod = _PrepareContainerMods(self.op.disks, None) |
2735 |
|
2736 |
# Check the validity of the `provider' parameter
|
2737 |
if self.instance.disk_template in constants.DT_EXT: |
2738 |
for mod in self.diskmod: |
2739 |
ext_provider = mod[2].get(constants.IDISK_PROVIDER, None) |
2740 |
if mod[0] == constants.DDM_ADD: |
2741 |
if ext_provider is None: |
2742 |
raise errors.OpPrereqError("Instance template is '%s' and parameter" |
2743 |
" '%s' missing, during disk add" %
|
2744 |
(constants.DT_EXT, |
2745 |
constants.IDISK_PROVIDER), |
2746 |
errors.ECODE_NOENT) |
2747 |
elif mod[0] == constants.DDM_MODIFY: |
2748 |
if ext_provider:
|
2749 |
raise errors.OpPrereqError("Parameter '%s' is invalid during disk" |
2750 |
" modification" %
|
2751 |
constants.IDISK_PROVIDER, |
2752 |
errors.ECODE_INVAL) |
2753 |
else:
|
2754 |
for mod in self.diskmod: |
2755 |
ext_provider = mod[2].get(constants.IDISK_PROVIDER, None) |
2756 |
if ext_provider is not None: |
2757 |
raise errors.OpPrereqError("Parameter '%s' is only valid for" |
2758 |
" instances of type '%s'" %
|
2759 |
(constants.IDISK_PROVIDER, |
2760 |
constants.DT_EXT), |
2761 |
errors.ECODE_INVAL) |
2762 |
|
2763 |
if not self.op.wait_for_sync and self.instance.disks_active: |
2764 |
for mod in self.diskmod: |
2765 |
if mod[0] == constants.DDM_ADD: |
2766 |
raise errors.OpPrereqError("Can't add a disk to an instance with" |
2767 |
" activated disks and"
|
2768 |
" --no-wait-for-sync given.",
|
2769 |
errors.ECODE_INVAL) |
2770 |
|
2771 |
if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS: |
2772 |
raise errors.OpPrereqError("Disk operations not supported for" |
2773 |
" diskless instances", errors.ECODE_INVAL)
|
2774 |
|
2775 |
def _PrepareDiskMod(_, disk, params, __): |
2776 |
disk.name = params.get(constants.IDISK_NAME, None)
|
2777 |
|
2778 |
# Verify disk changes (operating on a copy)
|
2779 |
disks = copy.deepcopy(self.instance.disks)
|
2780 |
_ApplyContainerMods("disk", disks, None, self.diskmod, None, |
2781 |
_PrepareDiskMod, None)
|
2782 |
utils.ValidateDeviceNames("disk", disks)
|
2783 |
if len(disks) > constants.MAX_DISKS: |
2784 |
raise errors.OpPrereqError("Instance has too many disks (%d), cannot add" |
2785 |
" more" % constants.MAX_DISKS,
|
2786 |
errors.ECODE_STATE) |
2787 |
disk_sizes = [disk.size for disk in self.instance.disks] |
2788 |
disk_sizes.extend(params["size"] for (op, idx, params, private) in |
2789 |
self.diskmod if op == constants.DDM_ADD) |
2790 |
ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
|
2791 |
ispec[constants.ISPEC_DISK_SIZE] = disk_sizes |
2792 |
|
2793 |
if self.op.offline is not None and self.op.offline: |
2794 |
CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE, |
2795 |
msg="can't change to offline")
|
2796 |
|
2797 |
def CheckPrereq(self): |
2798 |
"""Check prerequisites.
|
2799 |
|
2800 |
This only checks the instance list against the existing names.
|
2801 |
|
2802 |
"""
|
2803 |
assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE) |
2804 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) |
2805 |
self.cluster = self.cfg.GetClusterInfo() |
2806 |
cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor] |
2807 |
|
2808 |
assert self.instance is not None, \ |
2809 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
2810 |
|
2811 |
pnode_uuid = self.instance.primary_node
|
2812 |
|
2813 |
self.warn = []
|
2814 |
|
2815 |
if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and |
2816 |
not self.op.force): |
2817 |
# verify that the instance is not up
|
2818 |
instance_info = self.rpc.call_instance_info(
|
2819 |
pnode_uuid, self.instance.name, self.instance.hypervisor, |
2820 |
cluster_hvparams) |
2821 |
if instance_info.fail_msg:
|
2822 |
self.warn.append("Can't get instance runtime information: %s" % |
2823 |
instance_info.fail_msg) |
2824 |
elif instance_info.payload:
|
2825 |
raise errors.OpPrereqError("Instance is still running on %s" % |
2826 |
self.cfg.GetNodeName(pnode_uuid),
|
2827 |
errors.ECODE_STATE) |
2828 |
|
2829 |
assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE) |
2830 |
node_uuids = list(self.instance.all_nodes) |
2831 |
pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
|
2832 |
|
2833 |
#_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
|
2834 |
assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP) |
2835 |
group_info = self.cfg.GetNodeGroup(pnode_info.group)
|
2836 |
|
2837 |
# dictionary with instance information after the modification
|
2838 |
ispec = {} |
2839 |
|
2840 |
if self.op.hotplug: |
2841 |
result = self.rpc.call_hotplug_supported(self.instance.primary_node, |
2842 |
self.instance)
|
2843 |
result.Raise("Hotplug is not supported.")
|
2844 |
|
2845 |
# Prepare NIC modifications
|
2846 |
self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate) |
2847 |
|
2848 |
# OS change
|
2849 |
if self.op.os_name and not self.op.force: |
2850 |
CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name, |
2851 |
self.op.force_variant)
|
2852 |
instance_os = self.op.os_name
|
2853 |
else:
|
2854 |
instance_os = self.instance.os
|
2855 |
|
2856 |
assert not (self.op.disk_template and self.op.disks), \ |
2857 |
"Can't modify disk template and apply disk changes at the same time"
|
2858 |
|
2859 |
if self.op.disk_template: |
2860 |
self._PreCheckDiskTemplate(pnode_info)
|
2861 |
|
2862 |
self._PreCheckDisks(ispec)
|
2863 |
|
2864 |
# hvparams processing
|
2865 |
if self.op.hvparams: |
2866 |
hv_type = self.instance.hypervisor
|
2867 |
i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams) |
2868 |
utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES) |
2869 |
hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict) |
2870 |
|
2871 |
# local check
|
2872 |
hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new) |
2873 |
CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new) |
2874 |
self.hv_proposed = self.hv_new = hv_new # the new actual values |
2875 |
self.hv_inst = i_hvdict # the new dict (without defaults) |
2876 |
else:
|
2877 |
self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor, |
2878 |
self.instance.os,
|
2879 |
self.instance.hvparams)
|
2880 |
self.hv_new = self.hv_inst = {} |
2881 |
|
2882 |
# beparams processing
|
2883 |
if self.op.beparams: |
2884 |
i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams, |
2885 |
use_none=True)
|
2886 |
objects.UpgradeBeParams(i_bedict) |
2887 |
utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES) |
2888 |
be_new = self.cluster.SimpleFillBE(i_bedict)
|
2889 |
self.be_proposed = self.be_new = be_new # the new actual values |
2890 |
self.be_inst = i_bedict # the new dict (without defaults) |
2891 |
else:
|
2892 |
self.be_new = self.be_inst = {} |
2893 |
self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams) |
2894 |
be_old = self.cluster.FillBE(self.instance) |
2895 |
|
2896 |
# CPU param validation -- checking every time a parameter is
|
2897 |
# changed to cover all cases where either CPU mask or vcpus have
|
2898 |
# changed
|
2899 |
if (constants.BE_VCPUS in self.be_proposed and |
2900 |
constants.HV_CPU_MASK in self.hv_proposed): |
2901 |
cpu_list = \ |
2902 |
utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
|
2903 |
# Verify mask is consistent with number of vCPUs. Can skip this
|
2904 |
# test if only 1 entry in the CPU mask, which means same mask
|
2905 |
# is applied to all vCPUs.
|
2906 |
if (len(cpu_list) > 1 and |
2907 |
len(cpu_list) != self.be_proposed[constants.BE_VCPUS]): |
2908 |
raise errors.OpPrereqError("Number of vCPUs [%d] does not match the" |
2909 |
" CPU mask [%s]" %
|
2910 |
(self.be_proposed[constants.BE_VCPUS],
|
2911 |
self.hv_proposed[constants.HV_CPU_MASK]),
|
2912 |
errors.ECODE_INVAL) |
2913 |
|
2914 |
# Only perform this test if a new CPU mask is given
|
2915 |
if constants.HV_CPU_MASK in self.hv_new: |
2916 |
# Calculate the largest CPU number requested
|
2917 |
max_requested_cpu = max(map(max, cpu_list)) |
2918 |
# Check that all of the instance's nodes have enough physical CPUs to
|
2919 |
# satisfy the requested CPU mask
|
2920 |
hvspecs = [(self.instance.hypervisor,
|
2921 |
self.cfg.GetClusterInfo()
|
2922 |
.hvparams[self.instance.hypervisor])]
|
2923 |
_CheckNodesPhysicalCPUs(self, self.instance.all_nodes, |
2924 |
max_requested_cpu + 1,
|
2925 |
hvspecs) |
2926 |
|
2927 |
# osparams processing
|
2928 |
if self.op.osparams: |
2929 |
i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams) |
2930 |
CheckOSParams(self, True, node_uuids, instance_os, i_osdict) |
2931 |
self.os_inst = i_osdict # the new dict (without defaults) |
2932 |
else:
|
2933 |
self.os_inst = {}
|
2934 |
|
2935 |
#TODO(dynmem): do the appropriate check involving MINMEM
|
2936 |
if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and |
2937 |
be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]): |
2938 |
mem_check_list = [pnode_uuid] |
2939 |
if be_new[constants.BE_AUTO_BALANCE]:
|
2940 |
# either we changed auto_balance to yes or it was from before
|
2941 |
mem_check_list.extend(self.instance.secondary_nodes)
|
2942 |
instance_info = self.rpc.call_instance_info(
|
2943 |
pnode_uuid, self.instance.name, self.instance.hypervisor, |
2944 |
cluster_hvparams) |
2945 |
hvspecs = [(self.instance.hypervisor,
|
2946 |
cluster_hvparams)] |
2947 |
nodeinfo = self.rpc.call_node_info(mem_check_list, None, |
2948 |
hvspecs) |
2949 |
pninfo = nodeinfo[pnode_uuid] |
2950 |
msg = pninfo.fail_msg |
2951 |
if msg:
|
2952 |
# Assume the primary node is unreachable and go ahead
|
2953 |
self.warn.append("Can't get info from primary node %s: %s" % |
2954 |
(self.cfg.GetNodeName(pnode_uuid), msg))
|
2955 |
else:
|
2956 |
(_, _, (pnhvinfo, )) = pninfo.payload |
2957 |
if not isinstance(pnhvinfo.get("memory_free", None), int): |
2958 |
self.warn.append("Node data from primary node %s doesn't contain" |
2959 |
" free memory information" %
|
2960 |
self.cfg.GetNodeName(pnode_uuid))
|
2961 |
elif instance_info.fail_msg:
|
2962 |
self.warn.append("Can't get instance runtime information: %s" % |
2963 |
instance_info.fail_msg) |
2964 |
else:
|
2965 |
if instance_info.payload:
|
2966 |
current_mem = int(instance_info.payload["memory"]) |
2967 |
else:
|
2968 |
# Assume instance not running
|
2969 |
# (there is a slight race condition here, but it's not very
|
2970 |
# probable, and we have no other way to check)
|
2971 |
# TODO: Describe race condition
|
2972 |
current_mem = 0
|
2973 |
#TODO(dynmem): do the appropriate check involving MINMEM
|
2974 |
miss_mem = (be_new[constants.BE_MAXMEM] - current_mem - |
2975 |
pnhvinfo["memory_free"])
|
2976 |
if miss_mem > 0: |
2977 |
raise errors.OpPrereqError("This change will prevent the instance" |
2978 |
" from starting, due to %d MB of memory"
|
2979 |
" missing on its primary node" %
|
2980 |
miss_mem, errors.ECODE_NORES) |
2981 |
|
2982 |
if be_new[constants.BE_AUTO_BALANCE]:
|
2983 |
for node_uuid, nres in nodeinfo.items(): |
2984 |
if node_uuid not in self.instance.secondary_nodes: |
2985 |
continue
|
2986 |
nres.Raise("Can't get info from secondary node %s" %
|
2987 |
self.cfg.GetNodeName(node_uuid), prereq=True, |
2988 |
ecode=errors.ECODE_STATE) |
2989 |
(_, _, (nhvinfo, )) = nres.payload |
2990 |
if not isinstance(nhvinfo.get("memory_free", None), int): |
2991 |
raise errors.OpPrereqError("Secondary node %s didn't return free" |
2992 |
" memory information" %
|
2993 |
self.cfg.GetNodeName(node_uuid),
|
2994 |
errors.ECODE_STATE) |
2995 |
#TODO(dynmem): do the appropriate check involving MINMEM
|
2996 |
elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]: |
2997 |
raise errors.OpPrereqError("This change will prevent the instance" |
2998 |
" from failover to its secondary node"
|
2999 |
" %s, due to not enough memory" %
|
3000 |
self.cfg.GetNodeName(node_uuid),
|
3001 |
errors.ECODE_STATE) |
3002 |
|
3003 |
if self.op.runtime_mem: |
3004 |
remote_info = self.rpc.call_instance_info(
|
3005 |
self.instance.primary_node, self.instance.name, |
3006 |
self.instance.hypervisor,
|
3007 |
cluster_hvparams) |
3008 |
remote_info.Raise("Error checking node %s" %
|
3009 |
self.cfg.GetNodeName(self.instance.primary_node)) |
3010 |
if not remote_info.payload: # not running already |
3011 |
raise errors.OpPrereqError("Instance %s is not running" % |
3012 |
self.instance.name, errors.ECODE_STATE)
|
3013 |
|
3014 |
current_memory = remote_info.payload["memory"]
|
3015 |
if (not self.op.force and |
3016 |
(self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or |
3017 |
self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])): |
3018 |
raise errors.OpPrereqError("Instance %s must have memory between %d" |
3019 |
" and %d MB of memory unless --force is"
|
3020 |
" given" %
|
3021 |
(self.instance.name,
|
3022 |
self.be_proposed[constants.BE_MINMEM],
|
3023 |
self.be_proposed[constants.BE_MAXMEM]),
|
3024 |
errors.ECODE_INVAL) |
3025 |
|
3026 |
delta = self.op.runtime_mem - current_memory
|
3027 |
if delta > 0: |
3028 |
CheckNodeFreeMemory( |
3029 |
self, self.instance.primary_node, |
3030 |
"ballooning memory for instance %s" % self.instance.name, delta, |
3031 |
self.instance.hypervisor,
|
3032 |
self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor]) |
3033 |
|
3034 |
# make self.cluster visible in the functions below
|
3035 |
cluster = self.cluster
|
3036 |
|
3037 |
def _PrepareNicCreate(_, params, private): |
3038 |
self._PrepareNicModification(params, private, None, None, |
3039 |
{}, cluster, pnode_uuid) |
3040 |
return (None, None) |
3041 |
|
3042 |
def _PrepareNicMod(_, nic, params, private): |
3043 |
self._PrepareNicModification(params, private, nic.ip, nic.network,
|
3044 |
nic.nicparams, cluster, pnode_uuid) |
3045 |
return None |
3046 |
|
3047 |
def _PrepareNicRemove(_, params, __): |
3048 |
ip = params.ip |
3049 |
net = params.network |
3050 |
if net is not None and ip is not None: |
3051 |
self.cfg.ReleaseIp(net, ip, self.proc.GetECId()) |
3052 |
|
3053 |
# Verify NIC changes (operating on copy)
|
3054 |
nics = self.instance.nics[:]
|
3055 |
_ApplyContainerMods("NIC", nics, None, self.nicmod, |
3056 |
_PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove) |
3057 |
if len(nics) > constants.MAX_NICS: |
3058 |
raise errors.OpPrereqError("Instance has too many network interfaces" |
3059 |
" (%d), cannot add more" % constants.MAX_NICS,
|
3060 |
errors.ECODE_STATE) |
3061 |
|
3062 |
# Pre-compute NIC changes (necessary to use result in hooks)
|
3063 |
self._nic_chgdesc = []
|
3064 |
if self.nicmod: |
3065 |
# Operate on copies as this is still in prereq
|
3066 |
nics = [nic.Copy() for nic in self.instance.nics] |
3067 |
_ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod, |
3068 |
self._CreateNewNic, self._ApplyNicMods, |
3069 |
self._RemoveNic)
|
3070 |
# Verify that NIC names are unique and valid
|
3071 |
utils.ValidateDeviceNames("NIC", nics)
|
3072 |
self._new_nics = nics
|
3073 |
ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics) |
3074 |
else:
|
3075 |
self._new_nics = None |
3076 |
ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics) |
3077 |
|
3078 |
if not self.op.ignore_ipolicy: |
3079 |
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
|
3080 |
group_info) |
3081 |
|
3082 |
# Fill ispec with backend parameters
|
3083 |
ispec[constants.ISPEC_SPINDLE_USE] = \ |
3084 |
self.be_new.get(constants.BE_SPINDLE_USE, None) |
3085 |
ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
|
3086 |
None)
|
3087 |
|
3088 |
# Copy ispec to verify parameters with min/max values separately
|
3089 |
if self.op.disk_template: |
3090 |
new_disk_template = self.op.disk_template
|
3091 |
else:
|
3092 |
new_disk_template = self.instance.disk_template
|
3093 |
ispec_max = ispec.copy() |
3094 |
ispec_max[constants.ISPEC_MEM_SIZE] = \ |
3095 |
self.be_new.get(constants.BE_MAXMEM, None) |
3096 |
res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max, |
3097 |
new_disk_template) |
3098 |
ispec_min = ispec.copy() |
3099 |
ispec_min[constants.ISPEC_MEM_SIZE] = \ |
3100 |
self.be_new.get(constants.BE_MINMEM, None) |
3101 |
res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min, |
3102 |
new_disk_template) |
3103 |
|
3104 |
if (res_max or res_min): |
3105 |
# FIXME: Improve error message by including information about whether
|
3106 |
# the upper or lower limit of the parameter fails the ipolicy.
|
3107 |
msg = ("Instance allocation to group %s (%s) violates policy: %s" %
|
3108 |
(group_info, group_info.name, |
3109 |
utils.CommaJoin(set(res_max + res_min))))
|
3110 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
|
3111 |
|
3112 |
def _ConvertPlainToDrbd(self, feedback_fn): |
3113 |
"""Converts an instance from plain to drbd.
|
3114 |
|
3115 |
"""
|
3116 |
feedback_fn("Converting template to drbd")
|
3117 |
pnode_uuid = self.instance.primary_node
|
3118 |
snode_uuid = self.op.remote_node_uuid
|
3119 |
|
3120 |
assert self.instance.disk_template == constants.DT_PLAIN |
3121 |
|
3122 |
# create a fake disk info for _GenerateDiskTemplate
|
3123 |
disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode, |
3124 |
constants.IDISK_VG: d.logical_id[0],
|
3125 |
constants.IDISK_NAME: d.name} |
3126 |
for d in self.instance.disks] |
3127 |
new_disks = GenerateDiskTemplate(self, self.op.disk_template, |
3128 |
self.instance.uuid, pnode_uuid,
|
3129 |
[snode_uuid], disk_info, None, None, 0, |
3130 |
feedback_fn, self.diskparams)
|
3131 |
anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
|
3132 |
p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
|
3133 |
s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
|
3134 |
info = GetInstanceInfoText(self.instance)
|
3135 |
feedback_fn("Creating additional volumes...")
|
3136 |
# first, create the missing data and meta devices
|
3137 |
for disk in anno_disks: |
3138 |
# unfortunately this is... not too nice
|
3139 |
CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1], |
3140 |
info, True, p_excl_stor)
|
3141 |
for child in disk.children: |
3142 |
CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True, |
3143 |
s_excl_stor) |
3144 |
# at this stage, all new LVs have been created, we can rename the
|
3145 |
# old ones
|
3146 |
feedback_fn("Renaming original volumes...")
|
3147 |
rename_list = [(o, n.children[0].logical_id)
|
3148 |
for (o, n) in zip(self.instance.disks, new_disks)] |
3149 |
result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
|
3150 |
result.Raise("Failed to rename original LVs")
|
3151 |
|
3152 |
feedback_fn("Initializing DRBD devices...")
|
3153 |
# all child devices are in place, we can now create the DRBD devices
|
3154 |
try:
|
3155 |
for disk in anno_disks: |
3156 |
for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor), |
3157 |
(snode_uuid, s_excl_stor)]: |
3158 |
f_create = node_uuid == pnode_uuid |
3159 |
CreateSingleBlockDev(self, node_uuid, self.instance, disk, info, |
3160 |
f_create, excl_stor) |
3161 |
except errors.GenericError, e:
|
3162 |
feedback_fn("Initializing of DRBD devices failed;"
|
3163 |
" renaming back original volumes...")
|
3164 |
rename_back_list = [(n.children[0], o.logical_id)
|
3165 |
for (n, o) in zip(new_disks, self.instance.disks)] |
3166 |
result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
|
3167 |
result.Raise("Failed to rename LVs back after error %s" % str(e)) |
3168 |
raise
|
3169 |
|
3170 |
# at this point, the instance has been modified
|
3171 |
self.instance.disk_template = constants.DT_DRBD8
|
3172 |
self.instance.disks = new_disks
|
3173 |
self.cfg.Update(self.instance, feedback_fn) |
3174 |
|
3175 |
# Release node locks while waiting for sync
|
3176 |
ReleaseLocks(self, locking.LEVEL_NODE)
|
3177 |
|
3178 |
# disks are created, waiting for sync
|
3179 |
disk_abort = not WaitForSync(self, self.instance, |
3180 |
oneshot=not self.op.wait_for_sync) |
3181 |
if disk_abort:
|
3182 |
raise errors.OpExecError("There are some degraded disks for" |
3183 |
" this instance, please cleanup manually")
|
3184 |
|
3185 |
# Node resource locks will be released by caller
|
3186 |
|
3187 |
def _ConvertDrbdToPlain(self, feedback_fn): |
3188 |
"""Converts an instance from drbd to plain.
|
3189 |
|
3190 |
"""
|
3191 |
assert len(self.instance.secondary_nodes) == 1 |
3192 |
assert self.instance.disk_template == constants.DT_DRBD8 |
3193 |
|
3194 |
pnode_uuid = self.instance.primary_node
|
3195 |
snode_uuid = self.instance.secondary_nodes[0] |
3196 |
feedback_fn("Converting template to plain")
|
3197 |
|
3198 |
old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg) |
3199 |
new_disks = [d.children[0] for d in self.instance.disks] |
3200 |
|
3201 |
# copy over size, mode and name
|
3202 |
for parent, child in zip(old_disks, new_disks): |
3203 |
child.size = parent.size |
3204 |
child.mode = parent.mode |
3205 |
child.name = parent.name |
3206 |
|
3207 |
# this is a DRBD disk, return its port to the pool
|
3208 |
# NOTE: this must be done right before the call to cfg.Update!
|
3209 |
for disk in old_disks: |
3210 |
tcp_port = disk.logical_id[2]
|
3211 |
self.cfg.AddTcpUdpPort(tcp_port)
|
3212 |
|
3213 |
# update instance structure
|
3214 |
self.instance.disks = new_disks
|
3215 |
self.instance.disk_template = constants.DT_PLAIN
|
3216 |
_UpdateIvNames(0, self.instance.disks) |
3217 |
self.cfg.Update(self.instance, feedback_fn) |
3218 |
|
3219 |
# Release locks in case removing disks takes a while
|
3220 |
ReleaseLocks(self, locking.LEVEL_NODE)
|
3221 |
|
3222 |
feedback_fn("Removing volumes on the secondary node...")
|
3223 |
for disk in old_disks: |
3224 |
result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance)) |
3225 |
result.Warn("Could not remove block device %s on node %s,"
|
3226 |
" continuing anyway" %
|
3227 |
(disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
|
3228 |
self.LogWarning)
|
3229 |
|
3230 |
feedback_fn("Removing unneeded volumes on the primary node...")
|
3231 |
for idx, disk in enumerate(old_disks): |
3232 |
meta = disk.children[1]
|
3233 |
result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance)) |
3234 |
result.Warn("Could not remove metadata for disk %d on node %s,"
|
3235 |
" continuing anyway" %
|
3236 |
(idx, self.cfg.GetNodeName(pnode_uuid)),
|
3237 |
self.LogWarning)
|
3238 |
|
3239 |
def _HotplugDevice(self, action, dev_type, device, extra, seq): |
3240 |
self.LogInfo("Trying to hotplug device...") |
3241 |
msg = "hotplug:"
|
3242 |
result = self.rpc.call_hotplug_device(self.instance.primary_node, |
3243 |
self.instance, action, dev_type,
|
3244 |
(device, self.instance),
|
3245 |
extra, seq) |
3246 |
if result.fail_msg:
|
3247 |
self.LogWarning("Could not hotplug device: %s" % result.fail_msg) |
3248 |
self.LogInfo("Continuing execution..") |
3249 |
msg += "failed"
|
3250 |
else:
|
3251 |
self.LogInfo("Hotplug done.") |
3252 |
msg += "done"
|
3253 |
return msg
|
3254 |
|
3255 |
def _CreateNewDisk(self, idx, params, _): |
3256 |
"""Creates a new disk.
|
3257 |
|
3258 |
"""
|
3259 |
# add a new disk
|
3260 |
if self.instance.disk_template in constants.DTS_FILEBASED: |
3261 |
(file_driver, file_path) = self.instance.disks[0].logical_id |
3262 |
file_path = os.path.dirname(file_path) |
3263 |
else:
|
3264 |
file_driver = file_path = None
|
3265 |
|
3266 |
disk = \ |
3267 |
GenerateDiskTemplate(self, self.instance.disk_template, |
3268 |
self.instance.uuid, self.instance.primary_node, |
3269 |
self.instance.secondary_nodes, [params], file_path,
|
3270 |
file_driver, idx, self.Log, self.diskparams)[0] |
3271 |
|
3272 |
new_disks = CreateDisks(self, self.instance, disks=[disk]) |
3273 |
|
3274 |
if self.cluster.prealloc_wipe_disks: |
3275 |
# Wipe new disk
|
3276 |
WipeOrCleanupDisks(self, self.instance, |
3277 |
disks=[(idx, disk, 0)],
|
3278 |
cleanup=new_disks) |
3279 |
|
3280 |
changes = [ |
3281 |
("disk/%d" % idx,
|
3282 |
"add:size=%s,mode=%s" % (disk.size, disk.mode)),
|
3283 |
] |
3284 |
if self.op.hotplug: |
3285 |
result = self.rpc.call_blockdev_assemble(self.instance.primary_node, |
3286 |
(disk, self.instance),
|
3287 |
self.instance.name, True, idx) |
3288 |
if result.fail_msg:
|
3289 |
changes.append(("disk/%d" % idx, "assemble:failed")) |
3290 |
self.LogWarning("Can't assemble newly created disk %d: %s", |
3291 |
idx, result.fail_msg) |
3292 |
else:
|
3293 |
_, link_name = result.payload |
3294 |
msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
|
3295 |
constants.HOTPLUG_TARGET_DISK, |
3296 |
disk, link_name, idx) |
3297 |
changes.append(("disk/%d" % idx, msg))
|
3298 |
|
3299 |
return (disk, changes)
|
3300 |
|
3301 |
def _PostAddDisk(self, _, disk): |
3302 |
if not WaitForSync(self, self.instance, disks=[disk], |
3303 |
oneshot=not self.op.wait_for_sync): |
3304 |
raise errors.OpExecError("Failed to sync disks of %s" % |
3305 |
self.instance.name)
|
3306 |
|
3307 |
# the disk is active at this point, so deactivate it if the instance disks
|
3308 |
# are supposed to be inactive
|
3309 |
if not self.instance.disks_active: |
3310 |
ShutdownInstanceDisks(self, self.instance, disks=[disk]) |
3311 |
|
3312 |
@staticmethod
|
3313 |
def _ModifyDisk(idx, disk, params, _): |
3314 |
"""Modifies a disk.
|
3315 |
|
3316 |
"""
|
3317 |
changes = [] |
3318 |
mode = params.get(constants.IDISK_MODE, None)
|
3319 |
if mode:
|
3320 |
disk.mode = mode |
3321 |
changes.append(("disk.mode/%d" % idx, disk.mode))
|
3322 |
|
3323 |
name = params.get(constants.IDISK_NAME, None)
|
3324 |
disk.name = name |
3325 |
changes.append(("disk.name/%d" % idx, disk.name))
|
3326 |
|
3327 |
return changes
|
3328 |
|
3329 |
def _RemoveDisk(self, idx, root, _): |
3330 |
"""Removes a disk.
|
3331 |
|
3332 |
"""
|
3333 |
hotmsg = ""
|
3334 |
if self.op.hotplug: |
3335 |
hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
|
3336 |
constants.HOTPLUG_TARGET_DISK, |
3337 |
root, None, idx)
|
3338 |
ShutdownInstanceDisks(self, self.instance, [root]) |
3339 |
|
3340 |
(anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg) |
3341 |
for node_uuid, disk in anno_disk.ComputeNodeTree( |
3342 |
self.instance.primary_node):
|
3343 |
msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \ |
3344 |
.fail_msg |
3345 |
if msg:
|
3346 |
self.LogWarning("Could not remove disk/%d on node '%s': %s," |
3347 |
" continuing anyway", idx,
|
3348 |
self.cfg.GetNodeName(node_uuid), msg)
|
3349 |
|
3350 |
# if this is a DRBD disk, return its port to the pool
|
3351 |
if root.dev_type in constants.DTS_DRBD: |
3352 |
self.cfg.AddTcpUdpPort(root.logical_id[2]) |
3353 |
|
3354 |
return hotmsg
|
3355 |
|
3356 |
def _CreateNewNic(self, idx, params, private): |
3357 |
"""Creates data structure for a new network interface.
|
3358 |
|
3359 |
"""
|
3360 |
mac = params[constants.INIC_MAC] |
3361 |
ip = params.get(constants.INIC_IP, None)
|
3362 |
net = params.get(constants.INIC_NETWORK, None)
|
3363 |
name = params.get(constants.INIC_NAME, None)
|
3364 |
net_uuid = self.cfg.LookupNetwork(net)
|
3365 |
#TODO: not private.filled?? can a nic have no nicparams??
|
3366 |
nicparams = private.filled |
3367 |
nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name, |
3368 |
nicparams=nicparams) |
3369 |
nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId()) |
3370 |
|
3371 |
changes = [ |
3372 |
("nic.%d" % idx,
|
3373 |
"add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
|
3374 |
(mac, ip, private.filled[constants.NIC_MODE], |
3375 |
private.filled[constants.NIC_LINK], net)), |
3376 |
] |
3377 |
|
3378 |
if self.op.hotplug: |
3379 |
msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
|
3380 |
constants.HOTPLUG_TARGET_NIC, |
3381 |
nobj, None, idx)
|
3382 |
changes.append(("nic.%d" % idx, msg))
|
3383 |
|
3384 |
return (nobj, changes)
|
3385 |
|
3386 |
def _ApplyNicMods(self, idx, nic, params, private): |
3387 |
"""Modifies a network interface.
|
3388 |
|
3389 |
"""
|
3390 |
changes = [] |
3391 |
|
3392 |
for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]: |
3393 |
if key in params: |
3394 |
changes.append(("nic.%s/%d" % (key, idx), params[key]))
|
3395 |
setattr(nic, key, params[key])
|
3396 |
|
3397 |
new_net = params.get(constants.INIC_NETWORK, nic.network) |
3398 |
new_net_uuid = self.cfg.LookupNetwork(new_net)
|
3399 |
if new_net_uuid != nic.network:
|
3400 |
changes.append(("nic.network/%d" % idx, new_net))
|
3401 |
nic.network = new_net_uuid |
3402 |
|
3403 |
if private.filled:
|
3404 |
nic.nicparams = private.filled |
3405 |
|
3406 |
for (key, val) in nic.nicparams.items(): |
3407 |
changes.append(("nic.%s/%d" % (key, idx), val))
|
3408 |
|
3409 |
if self.op.hotplug: |
3410 |
msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
|
3411 |
constants.HOTPLUG_TARGET_NIC, |
3412 |
nic, None, idx)
|
3413 |
changes.append(("nic/%d" % idx, msg))
|
3414 |
|
3415 |
return changes
|
3416 |
|
3417 |
def _RemoveNic(self, idx, nic, _): |
3418 |
if self.op.hotplug: |
3419 |
return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE, |
3420 |
constants.HOTPLUG_TARGET_NIC, |
3421 |
nic, None, idx)
|
3422 |
|
3423 |
def Exec(self, feedback_fn): |
3424 |
"""Modifies an instance.
|
3425 |
|
3426 |
All parameters take effect only at the next restart of the instance.
|
3427 |
|
3428 |
"""
|
3429 |
# Process here the warnings from CheckPrereq, as we don't have a
|
3430 |
# feedback_fn there.
|
3431 |
# TODO: Replace with self.LogWarning
|
3432 |
for warn in self.warn: |
3433 |
feedback_fn("WARNING: %s" % warn)
|
3434 |
|
3435 |
assert ((self.op.disk_template is None) ^ |
3436 |
bool(self.owned_locks(locking.LEVEL_NODE_RES))), \ |
3437 |
"Not owning any node resource locks"
|
3438 |
|
3439 |
result = [] |
3440 |
|
3441 |
# New primary node
|
3442 |
if self.op.pnode_uuid: |
3443 |
self.instance.primary_node = self.op.pnode_uuid |
3444 |
|
3445 |
# runtime memory
|
3446 |
if self.op.runtime_mem: |
3447 |
rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node, |
3448 |
self.instance,
|
3449 |
self.op.runtime_mem)
|
3450 |
rpcres.Raise("Cannot modify instance runtime memory")
|
3451 |
result.append(("runtime_memory", self.op.runtime_mem)) |
3452 |
|
3453 |
# Apply disk changes
|
3454 |
_ApplyContainerMods("disk", self.instance.disks, result, self.diskmod, |
3455 |
self._CreateNewDisk, self._ModifyDisk, |
3456 |
self._RemoveDisk, post_add_fn=self._PostAddDisk) |
3457 |
_UpdateIvNames(0, self.instance.disks) |
3458 |
|
3459 |
if self.op.disk_template: |
3460 |
if __debug__:
|
3461 |
check_nodes = set(self.instance.all_nodes) |
3462 |
if self.op.remote_node_uuid: |
3463 |
check_nodes.add(self.op.remote_node_uuid)
|
3464 |
for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]: |
3465 |
owned = self.owned_locks(level)
|
3466 |
assert not (check_nodes - owned), \ |
3467 |
("Not owning the correct locks, owning %r, expected at least %r" %
|
3468 |
(owned, check_nodes)) |
3469 |
|
3470 |
r_shut = ShutdownInstanceDisks(self, self.instance) |
3471 |
if not r_shut: |
3472 |
raise errors.OpExecError("Cannot shutdown instance disks, unable to" |
3473 |
" proceed with disk template conversion")
|
3474 |
mode = (self.instance.disk_template, self.op.disk_template) |
3475 |
try:
|
3476 |
self._DISK_CONVERSIONS[mode](self, feedback_fn) |
3477 |
except:
|
3478 |
self.cfg.ReleaseDRBDMinors(self.instance.uuid) |
3479 |
raise
|
3480 |
result.append(("disk_template", self.op.disk_template)) |
3481 |
|
3482 |
assert self.instance.disk_template == self.op.disk_template, \ |
3483 |
("Expected disk template '%s', found '%s'" %
|
3484 |
(self.op.disk_template, self.instance.disk_template)) |
3485 |
|
3486 |
# Release node and resource locks if there are any (they might already have
|
3487 |
# been released during disk conversion)
|
3488 |
ReleaseLocks(self, locking.LEVEL_NODE)
|
3489 |
ReleaseLocks(self, locking.LEVEL_NODE_RES)
|
3490 |
|
3491 |
# Apply NIC changes
|
3492 |
if self._new_nics is not None: |
3493 |
self.instance.nics = self._new_nics |
3494 |
result.extend(self._nic_chgdesc)
|
3495 |
|
3496 |
# hvparams changes
|
3497 |
if self.op.hvparams: |
3498 |
self.instance.hvparams = self.hv_inst |
3499 |
for key, val in self.op.hvparams.iteritems(): |
3500 |
result.append(("hv/%s" % key, val))
|
3501 |
|
3502 |
# beparams changes
|
3503 |
if self.op.beparams: |
3504 |
self.instance.beparams = self.be_inst |
3505 |
for key, val in self.op.beparams.iteritems(): |
3506 |
result.append(("be/%s" % key, val))
|
3507 |
|
3508 |
# OS change
|
3509 |
if self.op.os_name: |
3510 |
self.instance.os = self.op.os_name |
3511 |
|
3512 |
# osparams changes
|
3513 |
if self.op.osparams: |
3514 |
self.instance.osparams = self.os_inst |
3515 |
for key, val in self.op.osparams.iteritems(): |
3516 |
result.append(("os/%s" % key, val))
|
3517 |
|
3518 |
if self.op.offline is None: |
3519 |
# Ignore
|
3520 |
pass
|
3521 |
elif self.op.offline: |
3522 |
# Mark instance as offline
|
3523 |
self.cfg.MarkInstanceOffline(self.instance.uuid) |
3524 |
result.append(("admin_state", constants.ADMINST_OFFLINE))
|
3525 |
else:
|
3526 |
# Mark instance as online, but stopped
|
3527 |
self.cfg.MarkInstanceDown(self.instance.uuid) |
3528 |
result.append(("admin_state", constants.ADMINST_DOWN))
|
3529 |
|
3530 |
self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId()) |
3531 |
|
3532 |
assert not (self.owned_locks(locking.LEVEL_NODE_RES) or |
3533 |
self.owned_locks(locking.LEVEL_NODE)), \
|
3534 |
"All node locks should have been released by now"
|
3535 |
|
3536 |
return result
|
3537 |
|
3538 |
_DISK_CONVERSIONS = { |
3539 |
(constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd, |
3540 |
(constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain, |
3541 |
} |
3542 |
|
3543 |
|
3544 |
class LUInstanceChangeGroup(LogicalUnit): |
3545 |
HPATH = "instance-change-group"
|
3546 |
HTYPE = constants.HTYPE_INSTANCE |
3547 |
REQ_BGL = False
|
3548 |
|
3549 |
def ExpandNames(self): |
3550 |
self.share_locks = ShareAll()
|
3551 |
|
3552 |
self.needed_locks = {
|
3553 |
locking.LEVEL_NODEGROUP: [], |
3554 |
locking.LEVEL_NODE: [], |
3555 |
locking.LEVEL_NODE_ALLOC: locking.ALL_SET, |
3556 |
} |
3557 |
|
3558 |
self._ExpandAndLockInstance()
|
3559 |
|
3560 |
if self.op.target_groups: |
3561 |
self.req_target_uuids = map(self.cfg.LookupNodeGroup, |
3562 |
self.op.target_groups)
|
3563 |
else:
|
3564 |
self.req_target_uuids = None |
3565 |
|
3566 |
self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator) |
3567 |
|
3568 |
def DeclareLocks(self, level): |
3569 |
if level == locking.LEVEL_NODEGROUP:
|
3570 |
assert not self.needed_locks[locking.LEVEL_NODEGROUP] |
3571 |
|
3572 |
if self.req_target_uuids: |
3573 |
lock_groups = set(self.req_target_uuids) |
3574 |
|
3575 |
# Lock all groups used by instance optimistically; this requires going
|
3576 |
# via the node before it's locked, requiring verification later on
|
3577 |
instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid) |
3578 |
lock_groups.update(instance_groups) |
3579 |
else:
|
3580 |
# No target groups, need to lock all of them
|
3581 |
lock_groups = locking.ALL_SET |
3582 |
|
3583 |
self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
|
3584 |
|
3585 |
elif level == locking.LEVEL_NODE:
|
3586 |
if self.req_target_uuids: |
3587 |
# Lock all nodes used by instances
|
3588 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
3589 |
self._LockInstancesNodes()
|
3590 |
|
3591 |
# Lock all nodes in all potential target groups
|
3592 |
lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) - |
3593 |
self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)) |
3594 |
member_nodes = [node_uuid |
3595 |
for group in lock_groups |
3596 |
for node_uuid in self.cfg.GetNodeGroup(group).members] |
3597 |
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
|
3598 |
else:
|
3599 |
# Lock all nodes as all groups are potential targets
|
3600 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
3601 |
|
3602 |
def CheckPrereq(self): |
3603 |
owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) |
3604 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) |
3605 |
owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE)) |
3606 |
|
3607 |
assert (self.req_target_uuids is None or |
3608 |
owned_groups.issuperset(self.req_target_uuids))
|
3609 |
assert owned_instance_names == set([self.op.instance_name]) |
3610 |
|
3611 |
# Get instance information
|
3612 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) |
3613 |
|
3614 |
# Check if node groups for locked instance are still correct
|
3615 |
assert owned_nodes.issuperset(self.instance.all_nodes), \ |
3616 |
("Instance %s's nodes changed while we kept the lock" %
|
3617 |
self.op.instance_name)
|
3618 |
|
3619 |
inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid, |
3620 |
owned_groups) |
3621 |
|
3622 |
if self.req_target_uuids: |
3623 |
# User requested specific target groups
|
3624 |
self.target_uuids = frozenset(self.req_target_uuids) |
3625 |
else:
|
3626 |
# All groups except those used by the instance are potential targets
|
3627 |
self.target_uuids = owned_groups - inst_groups
|
3628 |
|
3629 |
conflicting_groups = self.target_uuids & inst_groups
|
3630 |
if conflicting_groups:
|
3631 |
raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are" |
3632 |
" used by the instance '%s'" %
|
3633 |
(utils.CommaJoin(conflicting_groups), |
3634 |
self.op.instance_name),
|
3635 |
errors.ECODE_INVAL) |
3636 |
|
3637 |
if not self.target_uuids: |
3638 |
raise errors.OpPrereqError("There are no possible target groups", |
3639 |
errors.ECODE_INVAL) |
3640 |
|
3641 |
def BuildHooksEnv(self): |
3642 |
"""Build hooks env.
|
3643 |
|
3644 |
"""
|
3645 |
assert self.target_uuids |
3646 |
|
3647 |
env = { |
3648 |
"TARGET_GROUPS": " ".join(self.target_uuids), |
3649 |
} |
3650 |
|
3651 |
env.update(BuildInstanceHookEnvByObject(self, self.instance)) |
3652 |
|
3653 |
return env
|
3654 |
|
3655 |
def BuildHooksNodes(self): |
3656 |
"""Build hooks nodes.
|
3657 |
|
3658 |
"""
|
3659 |
mn = self.cfg.GetMasterNode()
|
3660 |
return ([mn], [mn])
|
3661 |
|
3662 |
def Exec(self, feedback_fn): |
3663 |
instances = list(self.owned_locks(locking.LEVEL_INSTANCE)) |
3664 |
|
3665 |
assert instances == [self.op.instance_name], "Instance not locked" |
3666 |
|
3667 |
req = iallocator.IAReqGroupChange(instances=instances, |
3668 |
target_groups=list(self.target_uuids)) |
3669 |
ial = iallocator.IAllocator(self.cfg, self.rpc, req) |
3670 |
|
3671 |
ial.Run(self.op.iallocator)
|
3672 |
|
3673 |
if not ial.success: |
3674 |
raise errors.OpPrereqError("Can't compute solution for changing group of" |
3675 |
" instance '%s' using iallocator '%s': %s" %
|
3676 |
(self.op.instance_name, self.op.iallocator, |
3677 |
ial.info), errors.ECODE_NORES) |
3678 |
|
3679 |
jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False) |
3680 |
|
3681 |
self.LogInfo("Iallocator returned %s job(s) for changing group of" |
3682 |
" instance '%s'", len(jobs), self.op.instance_name) |
3683 |
|
3684 |
return ResultWithJobs(jobs)
|