4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Logical units dealing with instances."""
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53 CheckDiskTemplateEnabled
54 from ganeti.cmdlib.instance_storage import CreateDisks, \
55 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
56 IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
57 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
58 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
59 CheckSpindlesExclusiveStorage
60 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
67 import ganeti.masterd.instance
70 #: Type description for changes as returned by L{_ApplyContainerMods}'s
72 _TApplyContModsCbChanges = \
73 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
79 def _CheckHostnameSane(lu, name):
80 """Ensures that a given hostname resolves to a 'sane' name.
82 The given name is required to be a prefix of the resolved hostname,
83 to prevent accidental mismatches.
85 @param lu: the logical unit on behalf of which we're checking
86 @param name: the name we should resolve and check
87 @return: the resolved hostname object
90 hostname = netutils.GetHostname(name=name)
91 if hostname.name != name:
92 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
93 if not utils.MatchNameComponent(name, [hostname.name]):
94 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
95 " same as given hostname '%s'") %
96 (hostname.name, name), errors.ECODE_INVAL)
100 def _CheckOpportunisticLocking(op):
101 """Generate error if opportunistic locking is not possible.
104 if op.opportunistic_locking and not op.iallocator:
105 raise errors.OpPrereqError("Opportunistic locking is only available in"
106 " combination with an instance allocator",
110 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
111 """Wrapper around IAReqInstanceAlloc.
113 @param op: The instance opcode
114 @param disks: The computed disks
115 @param nics: The computed nics
116 @param beparams: The full filled beparams
117 @param node_name_whitelist: List of nodes which should appear as online to the
118 allocator (unless the node is already marked offline)
120 @returns: A filled L{iallocator.IAReqInstanceAlloc}
123 spindle_use = beparams[constants.BE_SPINDLE_USE]
124 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
125 disk_template=op.disk_template,
128 vcpus=beparams[constants.BE_VCPUS],
129 memory=beparams[constants.BE_MAXMEM],
130 spindle_use=spindle_use,
132 nics=[n.ToDict() for n in nics],
133 hypervisor=op.hypervisor,
134 node_whitelist=node_name_whitelist)
137 def _ComputeFullBeParams(op, cluster):
138 """Computes the full beparams.
140 @param op: The instance opcode
141 @param cluster: The cluster config object
143 @return: The fully filled beparams
146 default_beparams = cluster.beparams[constants.PP_DEFAULT]
147 for param, value in op.beparams.iteritems():
148 if value == constants.VALUE_AUTO:
149 op.beparams[param] = default_beparams[param]
150 objects.UpgradeBeParams(op.beparams)
151 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
152 return cluster.SimpleFillBE(op.beparams)
155 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
156 """Computes the nics.
158 @param op: The instance opcode
159 @param cluster: Cluster configuration object
160 @param default_ip: The default ip to assign
161 @param cfg: An instance of the configuration object
162 @param ec_id: Execution context ID
164 @returns: The build up nics
169 nic_mode_req = nic.get(constants.INIC_MODE, None)
170 nic_mode = nic_mode_req
171 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
172 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
174 net = nic.get(constants.INIC_NETWORK, None)
175 link = nic.get(constants.NIC_LINK, None)
176 ip = nic.get(constants.INIC_IP, None)
178 if net is None or net.lower() == constants.VALUE_NONE:
181 if nic_mode_req is not None or link is not None:
182 raise errors.OpPrereqError("If network is given, no mode or link"
183 " is allowed to be passed",
187 if ip is None or ip.lower() == constants.VALUE_NONE:
189 elif ip.lower() == constants.VALUE_AUTO:
190 if not op.name_check:
191 raise errors.OpPrereqError("IP address set to auto but name checks"
192 " have been skipped",
196 # We defer pool operations until later, so that the iallocator has
197 # filled in the instance's node(s) dimara
198 if ip.lower() == constants.NIC_IP_POOL:
200 raise errors.OpPrereqError("if ip=pool, parameter network"
201 " must be passed too",
204 elif not netutils.IPAddress.IsValid(ip):
205 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
210 # TODO: check the ip address for uniqueness
211 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
212 raise errors.OpPrereqError("Routed nic mode requires an ip address",
215 # MAC address verification
216 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
217 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
218 mac = utils.NormalizeAndValidateMac(mac)
221 # TODO: We need to factor this out
222 cfg.ReserveMAC(mac, ec_id)
223 except errors.ReservationError:
224 raise errors.OpPrereqError("MAC address %s already in use"
226 errors.ECODE_NOTUNIQUE)
228 # Build nic parameters
231 nicparams[constants.NIC_MODE] = nic_mode
233 nicparams[constants.NIC_LINK] = link
235 check_params = cluster.SimpleFillNIC(nicparams)
236 objects.NIC.CheckParameterSyntax(check_params)
237 net_uuid = cfg.LookupNetwork(net)
238 name = nic.get(constants.INIC_NAME, None)
239 if name is not None and name.lower() == constants.VALUE_NONE:
241 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
242 network=net_uuid, nicparams=nicparams)
243 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
249 def _CheckForConflictingIp(lu, ip, node_uuid):
250 """In case of conflicting IP address raise error.
253 @param ip: IP address
254 @type node_uuid: string
255 @param node_uuid: node UUID
258 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
259 if conf_net is not None:
260 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
261 " network %s, but the target NIC does not." %
268 def _ComputeIPolicyInstanceSpecViolation(
269 ipolicy, instance_spec, disk_template,
270 _compute_fn=ComputeIPolicySpecViolation):
271 """Compute if instance specs meets the specs of ipolicy.
274 @param ipolicy: The ipolicy to verify against
275 @param instance_spec: dict
276 @param instance_spec: The instance spec to verify
277 @type disk_template: string
278 @param disk_template: the disk template of the instance
279 @param _compute_fn: The function to verify ipolicy (unittest only)
280 @see: L{ComputeIPolicySpecViolation}
283 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
284 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
285 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
286 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
287 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
288 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
290 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
291 disk_sizes, spindle_use, disk_template)
294 def _CheckOSVariant(os_obj, name):
295 """Check whether an OS name conforms to the os variants specification.
297 @type os_obj: L{objects.OS}
298 @param os_obj: OS object to check
300 @param name: OS name passed by the user, to check for validity
303 variant = objects.OS.GetVariant(name)
304 if not os_obj.supported_variants:
306 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
307 " passed)" % (os_obj.name, variant),
311 raise errors.OpPrereqError("OS name must include a variant",
314 if variant not in os_obj.supported_variants:
315 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
318 class LUInstanceCreate(LogicalUnit):
319 """Create an instance.
322 HPATH = "instance-add"
323 HTYPE = constants.HTYPE_INSTANCE
326 def _CheckDiskTemplateValid(self):
327 """Checks validity of disk template.
330 cluster = self.cfg.GetClusterInfo()
331 if self.op.disk_template is None:
332 # FIXME: It would be better to take the default disk template from the
333 # ipolicy, but for the ipolicy we need the primary node, which we get from
334 # the iallocator, which wants the disk template as input. To solve this
335 # chicken-and-egg problem, it should be possible to specify just a node
336 # group from the iallocator and take the ipolicy from that.
337 self.op.disk_template = cluster.enabled_disk_templates[0]
338 CheckDiskTemplateEnabled(cluster, self.op.disk_template)
340 def _CheckDiskArguments(self):
341 """Checks validity of disk-related arguments.
344 # check that disk's names are unique and valid
345 utils.ValidateDeviceNames("disk", self.op.disks)
347 self._CheckDiskTemplateValid()
349 # check disks. parameter names and consistent adopt/no-adopt strategy
350 has_adopt = has_no_adopt = False
351 for disk in self.op.disks:
352 if self.op.disk_template != constants.DT_EXT:
353 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
354 if constants.IDISK_ADOPT in disk:
358 if has_adopt and has_no_adopt:
359 raise errors.OpPrereqError("Either all disks are adopted or none is",
362 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
363 raise errors.OpPrereqError("Disk adoption is not supported for the"
364 " '%s' disk template" %
365 self.op.disk_template,
367 if self.op.iallocator is not None:
368 raise errors.OpPrereqError("Disk adoption not allowed with an"
369 " iallocator script", errors.ECODE_INVAL)
370 if self.op.mode == constants.INSTANCE_IMPORT:
371 raise errors.OpPrereqError("Disk adoption not allowed for"
372 " instance import", errors.ECODE_INVAL)
374 if self.op.disk_template in constants.DTS_MUST_ADOPT:
375 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
376 " but no 'adopt' parameter given" %
377 self.op.disk_template,
380 self.adopt_disks = has_adopt
382 def CheckArguments(self):
386 # do not require name_check to ease forward/backward compatibility
388 if self.op.no_install and self.op.start:
389 self.LogInfo("No-installation mode selected, disabling startup")
390 self.op.start = False
391 # validate/normalize the instance name
392 self.op.instance_name = \
393 netutils.Hostname.GetNormalizedName(self.op.instance_name)
395 if self.op.ip_check and not self.op.name_check:
396 # TODO: make the ip check more flexible and not depend on the name check
397 raise errors.OpPrereqError("Cannot do IP address check without a name"
398 " check", errors.ECODE_INVAL)
400 # check nics' parameter names
401 for nic in self.op.nics:
402 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
403 # check that NIC's parameters names are unique and valid
404 utils.ValidateDeviceNames("NIC", self.op.nics)
406 self._CheckDiskArguments()
408 # instance name verification
409 if self.op.name_check:
410 self.hostname = _CheckHostnameSane(self, self.op.instance_name)
411 self.op.instance_name = self.hostname.name
412 # used in CheckPrereq for ip ping check
413 self.check_ip = self.hostname.ip
417 # file storage checks
418 if (self.op.file_driver and
419 not self.op.file_driver in constants.FILE_DRIVER):
420 raise errors.OpPrereqError("Invalid file driver name '%s'" %
421 self.op.file_driver, errors.ECODE_INVAL)
423 ### Node/iallocator related checks
424 CheckIAllocatorOrNode(self, "iallocator", "pnode")
426 if self.op.pnode is not None:
427 if self.op.disk_template in constants.DTS_INT_MIRROR:
428 if self.op.snode is None:
429 raise errors.OpPrereqError("The networked disk templates need"
430 " a mirror node", errors.ECODE_INVAL)
432 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
436 _CheckOpportunisticLocking(self.op)
438 self._cds = GetClusterDomainSecret()
440 if self.op.mode == constants.INSTANCE_IMPORT:
441 # On import force_variant must be True, because if we forced it at
442 # initial install, our only chance when importing it back is that it
444 self.op.force_variant = True
446 if self.op.no_install:
447 self.LogInfo("No-installation mode has no effect during import")
449 elif self.op.mode == constants.INSTANCE_CREATE:
450 if self.op.os_type is None:
451 raise errors.OpPrereqError("No guest OS specified",
453 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
454 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
455 " installation" % self.op.os_type,
457 if self.op.disk_template is None:
458 raise errors.OpPrereqError("No disk template specified",
461 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
462 # Check handshake to ensure both clusters have the same domain secret
463 src_handshake = self.op.source_handshake
464 if not src_handshake:
465 raise errors.OpPrereqError("Missing source handshake",
468 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
471 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
474 # Load and check source CA
475 self.source_x509_ca_pem = self.op.source_x509_ca
476 if not self.source_x509_ca_pem:
477 raise errors.OpPrereqError("Missing source X509 CA",
481 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
483 except OpenSSL.crypto.Error, err:
484 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
485 (err, ), errors.ECODE_INVAL)
487 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
488 if errcode is not None:
489 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
492 self.source_x509_ca = cert
494 src_instance_name = self.op.source_instance_name
495 if not src_instance_name:
496 raise errors.OpPrereqError("Missing source instance name",
499 self.source_instance_name = \
500 netutils.GetHostname(name=src_instance_name).name
503 raise errors.OpPrereqError("Invalid instance creation mode %r" %
504 self.op.mode, errors.ECODE_INVAL)
506 def ExpandNames(self):
507 """ExpandNames for CreateInstance.
509 Figure out the right locks for instance creation.
512 self.needed_locks = {}
514 # this is just a preventive check, but someone might still add this
515 # instance in the meantime, and creation will fail at lock-add time
516 if self.op.instance_name in\
517 [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
518 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
519 self.op.instance_name, errors.ECODE_EXISTS)
521 self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
523 if self.op.iallocator:
524 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
525 # specifying a group on instance creation and then selecting nodes from
527 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
528 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
530 if self.op.opportunistic_locking:
531 self.opportunistic_locks[locking.LEVEL_NODE] = True
532 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
534 (self.op.pnode_uuid, self.op.pnode) = \
535 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
536 nodelist = [self.op.pnode_uuid]
537 if self.op.snode is not None:
538 (self.op.snode_uuid, self.op.snode) = \
539 ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
540 nodelist.append(self.op.snode_uuid)
541 self.needed_locks[locking.LEVEL_NODE] = nodelist
543 # in case of import lock the source node too
544 if self.op.mode == constants.INSTANCE_IMPORT:
545 src_node = self.op.src_node
546 src_path = self.op.src_path
549 self.op.src_path = src_path = self.op.instance_name
552 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
553 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
554 self.op.src_node = None
555 if os.path.isabs(src_path):
556 raise errors.OpPrereqError("Importing an instance from a path"
557 " requires a source node option",
560 (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
561 ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
562 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
563 self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
564 if not os.path.isabs(src_path):
565 self.op.src_path = src_path = \
566 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
568 self.needed_locks[locking.LEVEL_NODE_RES] = \
569 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
571 def _RunAllocator(self):
572 """Run the allocator based on input opcode.
575 if self.op.opportunistic_locking:
576 # Only consider nodes for which a lock is held
577 node_name_whitelist = self.cfg.GetNodeNames(
578 self.owned_locks(locking.LEVEL_NODE))
580 node_name_whitelist = None
582 #TODO Export network to iallocator so that it chooses a pnode
583 # in a nodegroup that has the desired network connected to
584 req = _CreateInstanceAllocRequest(self.op, self.disks,
585 self.nics, self.be_full,
587 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
589 ial.Run(self.op.iallocator)
592 # When opportunistic locks are used only a temporary failure is generated
593 if self.op.opportunistic_locking:
594 ecode = errors.ECODE_TEMP_NORES
596 ecode = errors.ECODE_NORES
598 raise errors.OpPrereqError("Can't compute nodes using"
599 " iallocator '%s': %s" %
600 (self.op.iallocator, ial.info),
603 (self.op.pnode_uuid, self.op.pnode) = \
604 ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
605 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
606 self.op.instance_name, self.op.iallocator,
607 utils.CommaJoin(ial.result))
609 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
611 if req.RequiredNodes() == 2:
612 (self.op.snode_uuid, self.op.snode) = \
613 ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
615 def BuildHooksEnv(self):
618 This runs on master, primary and secondary nodes of the instance.
622 "ADD_MODE": self.op.mode,
624 if self.op.mode == constants.INSTANCE_IMPORT:
625 env["SRC_NODE"] = self.op.src_node
626 env["SRC_PATH"] = self.op.src_path
627 env["SRC_IMAGES"] = self.src_images
629 env.update(BuildInstanceHookEnv(
630 name=self.op.instance_name,
631 primary_node_name=self.op.pnode,
632 secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
633 status=self.op.start,
634 os_type=self.op.os_type,
635 minmem=self.be_full[constants.BE_MINMEM],
636 maxmem=self.be_full[constants.BE_MAXMEM],
637 vcpus=self.be_full[constants.BE_VCPUS],
638 nics=NICListToTuple(self, self.nics),
639 disk_template=self.op.disk_template,
640 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
641 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
642 for d in self.disks],
645 hypervisor_name=self.op.hypervisor,
651 def BuildHooksNodes(self):
652 """Build hooks nodes.
655 nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
658 def _ReadExportInfo(self):
659 """Reads the export information from disk.
661 It will override the opcode source node and path with the actual
662 information, if these two were not specified before.
664 @return: the export information
667 assert self.op.mode == constants.INSTANCE_IMPORT
669 if self.op.src_node_uuid is None:
670 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
671 exp_list = self.rpc.call_export_list(locked_nodes)
673 for node in exp_list:
674 if exp_list[node].fail_msg:
676 if self.op.src_path in exp_list[node].payload:
678 self.op.src_node = node
679 self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
680 self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
684 raise errors.OpPrereqError("No export found for relative path %s" %
685 self.op.src_path, errors.ECODE_INVAL)
687 CheckNodeOnline(self, self.op.src_node_uuid)
688 result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
689 result.Raise("No export or invalid export found in dir %s" %
692 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
693 if not export_info.has_section(constants.INISECT_EXP):
694 raise errors.ProgrammerError("Corrupted export config",
695 errors.ECODE_ENVIRON)
697 ei_version = export_info.get(constants.INISECT_EXP, "version")
698 if int(ei_version) != constants.EXPORT_VERSION:
699 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
700 (ei_version, constants.EXPORT_VERSION),
701 errors.ECODE_ENVIRON)
704 def _ReadExportParams(self, einfo):
705 """Use export parameters as defaults.
707 In case the opcode doesn't specify (as in override) some instance
708 parameters, then try to use them from the export information, if
712 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
714 if not self.op.disks:
716 # TODO: import the disk iv_name too
717 for idx in range(constants.MAX_DISKS):
718 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
719 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
720 disks.append({constants.IDISK_SIZE: disk_sz})
721 self.op.disks = disks
722 if not disks and self.op.disk_template != constants.DT_DISKLESS:
723 raise errors.OpPrereqError("No disk info specified and the export"
724 " is missing the disk information",
729 for idx in range(constants.MAX_NICS):
730 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
732 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
733 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
740 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
741 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
743 if (self.op.hypervisor is None and
744 einfo.has_option(constants.INISECT_INS, "hypervisor")):
745 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
747 if einfo.has_section(constants.INISECT_HYP):
748 # use the export parameters but do not override the ones
749 # specified by the user
750 for name, value in einfo.items(constants.INISECT_HYP):
751 if name not in self.op.hvparams:
752 self.op.hvparams[name] = value
754 if einfo.has_section(constants.INISECT_BEP):
755 # use the parameters, without overriding
756 for name, value in einfo.items(constants.INISECT_BEP):
757 if name not in self.op.beparams:
758 self.op.beparams[name] = value
759 # Compatibility for the old "memory" be param
760 if name == constants.BE_MEMORY:
761 if constants.BE_MAXMEM not in self.op.beparams:
762 self.op.beparams[constants.BE_MAXMEM] = value
763 if constants.BE_MINMEM not in self.op.beparams:
764 self.op.beparams[constants.BE_MINMEM] = value
766 # try to read the parameters old style, from the main section
767 for name in constants.BES_PARAMETERS:
768 if (name not in self.op.beparams and
769 einfo.has_option(constants.INISECT_INS, name)):
770 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
772 if einfo.has_section(constants.INISECT_OSP):
773 # use the parameters, without overriding
774 for name, value in einfo.items(constants.INISECT_OSP):
775 if name not in self.op.osparams:
776 self.op.osparams[name] = value
778 def _RevertToDefaults(self, cluster):
779 """Revert the instance parameters to the default values.
783 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
784 for name in self.op.hvparams.keys():
785 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
786 del self.op.hvparams[name]
788 be_defs = cluster.SimpleFillBE({})
789 for name in self.op.beparams.keys():
790 if name in be_defs and be_defs[name] == self.op.beparams[name]:
791 del self.op.beparams[name]
793 nic_defs = cluster.SimpleFillNIC({})
794 for nic in self.op.nics:
795 for name in constants.NICS_PARAMETERS:
796 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
799 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
800 for name in self.op.osparams.keys():
801 if name in os_defs and os_defs[name] == self.op.osparams[name]:
802 del self.op.osparams[name]
804 def _CalculateFileStorageDir(self):
805 """Calculate final instance file storage dir.
808 # file storage dir calculation/check
809 self.instance_file_storage_dir = None
810 if self.op.disk_template in constants.DTS_FILEBASED:
811 # build the full file storage dir path
814 if self.op.disk_template == constants.DT_SHARED_FILE:
815 get_fsd_fn = self.cfg.GetSharedFileStorageDir
817 get_fsd_fn = self.cfg.GetFileStorageDir
819 cfg_storagedir = get_fsd_fn()
820 if not cfg_storagedir:
821 raise errors.OpPrereqError("Cluster file storage dir not defined",
823 joinargs.append(cfg_storagedir)
825 if self.op.file_storage_dir is not None:
826 joinargs.append(self.op.file_storage_dir)
828 joinargs.append(self.op.instance_name)
830 # pylint: disable=W0142
831 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
833 def CheckPrereq(self): # pylint: disable=R0914
834 """Check prerequisites.
837 self._CalculateFileStorageDir()
839 if self.op.mode == constants.INSTANCE_IMPORT:
840 export_info = self._ReadExportInfo()
841 self._ReadExportParams(export_info)
842 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
844 self._old_instance_name = None
846 if (not self.cfg.GetVGName() and
847 self.op.disk_template not in constants.DTS_NOT_LVM):
848 raise errors.OpPrereqError("Cluster does not support lvm-based"
849 " instances", errors.ECODE_STATE)
851 if (self.op.hypervisor is None or
852 self.op.hypervisor == constants.VALUE_AUTO):
853 self.op.hypervisor = self.cfg.GetHypervisorType()
855 cluster = self.cfg.GetClusterInfo()
856 enabled_hvs = cluster.enabled_hypervisors
857 if self.op.hypervisor not in enabled_hvs:
858 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
860 (self.op.hypervisor, ",".join(enabled_hvs)),
864 for tag in self.op.tags:
865 objects.TaggableObject.ValidateTag(tag)
867 # check hypervisor parameter syntax (locally)
868 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
869 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
871 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
872 hv_type.CheckParameterSyntax(filled_hvp)
873 self.hv_full = filled_hvp
874 # check that we don't specify global parameters on an instance
875 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
876 "instance", "cluster")
878 # fill and remember the beparams dict
879 self.be_full = _ComputeFullBeParams(self.op, cluster)
881 # build os parameters
882 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
884 # now that hvp/bep are in final format, let's reset to defaults,
886 if self.op.identify_defaults:
887 self._RevertToDefaults(cluster)
890 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
893 # disk checks/pre-build
894 default_vg = self.cfg.GetVGName()
895 self.disks = ComputeDisks(self.op, default_vg)
897 if self.op.mode == constants.INSTANCE_IMPORT:
899 for idx in range(len(self.disks)):
900 option = "disk%d_dump" % idx
901 if export_info.has_option(constants.INISECT_INS, option):
902 # FIXME: are the old os-es, disk sizes, etc. useful?
903 export_name = export_info.get(constants.INISECT_INS, option)
904 image = utils.PathJoin(self.op.src_path, export_name)
905 disk_images.append(image)
907 disk_images.append(False)
909 self.src_images = disk_images
911 if self.op.instance_name == self._old_instance_name:
912 for idx, nic in enumerate(self.nics):
913 if nic.mac == constants.VALUE_AUTO:
914 nic_mac_ini = "nic%d_mac" % idx
915 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
917 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
919 # ip ping checks (we use the same ip that was resolved in ExpandNames)
921 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
922 raise errors.OpPrereqError("IP %s of instance %s already in use" %
923 (self.check_ip, self.op.instance_name),
924 errors.ECODE_NOTUNIQUE)
926 #### mac address generation
927 # By generating here the mac address both the allocator and the hooks get
928 # the real final mac address rather than the 'auto' or 'generate' value.
929 # There is a race condition between the generation and the instance object
930 # creation, which means that we know the mac is valid now, but we're not
931 # sure it will be when we actually add the instance. If things go bad
932 # adding the instance will abort because of a duplicate mac, and the
933 # creation job will fail.
934 for nic in self.nics:
935 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
936 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
940 if self.op.iallocator is not None:
943 # Release all unneeded node locks
944 keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
945 self.op.src_node_uuid])
946 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
947 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
948 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
950 assert (self.owned_locks(locking.LEVEL_NODE) ==
951 self.owned_locks(locking.LEVEL_NODE_RES)), \
952 "Node locks differ from node resource locks"
954 #### node related checks
957 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
958 assert self.pnode is not None, \
959 "Cannot retrieve locked node %s" % self.op.pnode_uuid
961 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
962 pnode.name, errors.ECODE_STATE)
964 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
965 pnode.name, errors.ECODE_STATE)
966 if not pnode.vm_capable:
967 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
968 " '%s'" % pnode.name, errors.ECODE_STATE)
970 self.secondaries = []
972 # Fill in any IPs from IP pools. This must happen here, because we need to
973 # know the nic's primary node, as specified by the iallocator
974 for idx, nic in enumerate(self.nics):
975 net_uuid = nic.network
976 if net_uuid is not None:
977 nobj = self.cfg.GetNetwork(net_uuid)
978 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
979 if netparams is None:
980 raise errors.OpPrereqError("No netparams found for network"
981 " %s. Propably not connected to"
982 " node's %s nodegroup" %
983 (nobj.name, self.pnode.name),
985 self.LogInfo("NIC/%d inherits netparams %s" %
986 (idx, netparams.values()))
987 nic.nicparams = dict(netparams)
988 if nic.ip is not None:
989 if nic.ip.lower() == constants.NIC_IP_POOL:
991 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
992 except errors.ReservationError:
993 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
994 " from the address pool" % idx,
996 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
999 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1000 except errors.ReservationError:
1001 raise errors.OpPrereqError("IP address %s already in use"
1002 " or does not belong to network %s" %
1003 (nic.ip, nobj.name),
1004 errors.ECODE_NOTUNIQUE)
1006 # net is None, ip None or given
1007 elif self.op.conflicts_check:
1008 _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1010 # mirror node verification
1011 if self.op.disk_template in constants.DTS_INT_MIRROR:
1012 if self.op.snode_uuid == pnode.uuid:
1013 raise errors.OpPrereqError("The secondary node cannot be the"
1014 " primary node", errors.ECODE_INVAL)
1015 CheckNodeOnline(self, self.op.snode_uuid)
1016 CheckNodeNotDrained(self, self.op.snode_uuid)
1017 CheckNodeVmCapable(self, self.op.snode_uuid)
1018 self.secondaries.append(self.op.snode_uuid)
1020 snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1021 if pnode.group != snode.group:
1022 self.LogWarning("The primary and secondary nodes are in two"
1023 " different node groups; the disk parameters"
1024 " from the first disk's node group will be"
1028 if self.op.disk_template in constants.DTS_INT_MIRROR:
1030 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1031 excl_stor = compat.any(map(has_es, nodes))
1032 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1033 raise errors.OpPrereqError("Disk template %s not supported with"
1034 " exclusive storage" % self.op.disk_template,
1036 for disk in self.disks:
1037 CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1039 node_uuids = [pnode.uuid] + self.secondaries
1041 if not self.adopt_disks:
1042 if self.op.disk_template == constants.DT_RBD:
1043 # _CheckRADOSFreeSpace() is just a placeholder.
1044 # Any function that checks prerequisites can be placed here.
1045 # Check if there is enough space on the RADOS cluster.
1046 CheckRADOSFreeSpace()
1047 elif self.op.disk_template == constants.DT_EXT:
1048 # FIXME: Function that checks prereqs if needed
1050 elif self.op.disk_template in utils.GetLvmDiskTemplates():
1051 # Check lv size requirements, if not adopting
1052 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1053 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1055 # FIXME: add checks for other, non-adopting, non-lvm disk templates
1058 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1059 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1060 disk[constants.IDISK_ADOPT])
1061 for disk in self.disks])
1062 if len(all_lvs) != len(self.disks):
1063 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1065 for lv_name in all_lvs:
1067 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1068 # to ReserveLV uses the same syntax
1069 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1070 except errors.ReservationError:
1071 raise errors.OpPrereqError("LV named %s used by another instance" %
1072 lv_name, errors.ECODE_NOTUNIQUE)
1074 vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1075 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1077 node_lvs = self.rpc.call_lv_list([pnode.uuid],
1078 vg_names.payload.keys())[pnode.uuid]
1079 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1080 node_lvs = node_lvs.payload
1082 delta = all_lvs.difference(node_lvs.keys())
1084 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1085 utils.CommaJoin(delta),
1087 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1089 raise errors.OpPrereqError("Online logical volumes found, cannot"
1090 " adopt: %s" % utils.CommaJoin(online_lvs),
1092 # update the size of disk based on what is found
1093 for dsk in self.disks:
1094 dsk[constants.IDISK_SIZE] = \
1095 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1096 dsk[constants.IDISK_ADOPT])][0]))
1098 elif self.op.disk_template == constants.DT_BLOCK:
1099 # Normalize and de-duplicate device paths
1100 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1101 for disk in self.disks])
1102 if len(all_disks) != len(self.disks):
1103 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1105 baddisks = [d for d in all_disks
1106 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1108 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1109 " cannot be adopted" %
1110 (utils.CommaJoin(baddisks),
1111 constants.ADOPTABLE_BLOCKDEV_ROOT),
1114 node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1115 list(all_disks))[pnode.uuid]
1116 node_disks.Raise("Cannot get block device information from node %s" %
1118 node_disks = node_disks.payload
1119 delta = all_disks.difference(node_disks.keys())
1121 raise errors.OpPrereqError("Missing block device(s): %s" %
1122 utils.CommaJoin(delta),
1124 for dsk in self.disks:
1125 dsk[constants.IDISK_SIZE] = \
1126 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1128 # Verify instance specs
1129 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1131 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1132 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1133 constants.ISPEC_DISK_COUNT: len(self.disks),
1134 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1135 for disk in self.disks],
1136 constants.ISPEC_NIC_COUNT: len(self.nics),
1137 constants.ISPEC_SPINDLE_USE: spindle_use,
1140 group_info = self.cfg.GetNodeGroup(pnode.group)
1141 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1142 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1143 self.op.disk_template)
1144 if not self.op.ignore_ipolicy and res:
1145 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1146 (pnode.group, group_info.name, utils.CommaJoin(res)))
1147 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1149 CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1151 CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1152 # check OS parameters (remotely)
1153 CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1155 CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1157 #TODO: _CheckExtParams (remotely)
1158 # Check parameters for extstorage
1160 # memory check on primary node
1161 #TODO(dynmem): use MINMEM for checking
1163 hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1165 CheckNodeFreeMemory(self, self.pnode.uuid,
1166 "creating instance %s" % self.op.instance_name,
1167 self.be_full[constants.BE_MAXMEM],
1168 self.op.hypervisor, hvfull)
1170 self.dry_run_result = list(node_uuids)
1172 def Exec(self, feedback_fn):
1173 """Create and add the instance to the cluster.
1176 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1177 self.owned_locks(locking.LEVEL_NODE)), \
1178 "Node locks differ from node resource locks"
1179 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1181 ht_kind = self.op.hypervisor
1182 if ht_kind in constants.HTS_REQ_PORT:
1183 network_port = self.cfg.AllocatePort()
1187 instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1189 # This is ugly but we got a chicken-egg problem here
1190 # We can only take the group disk parameters, as the instance
1191 # has no disks yet (we are generating them right here).
1192 nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1193 disks = GenerateDiskTemplate(self,
1194 self.op.disk_template,
1195 instance_uuid, self.pnode.uuid,
1198 self.instance_file_storage_dir,
1199 self.op.file_driver,
1202 self.cfg.GetGroupDiskParams(nodegroup))
1204 iobj = objects.Instance(name=self.op.instance_name,
1207 primary_node=self.pnode.uuid,
1208 nics=self.nics, disks=disks,
1209 disk_template=self.op.disk_template,
1211 admin_state=constants.ADMINST_DOWN,
1212 network_port=network_port,
1213 beparams=self.op.beparams,
1214 hvparams=self.op.hvparams,
1215 hypervisor=self.op.hypervisor,
1216 osparams=self.op.osparams,
1220 for tag in self.op.tags:
1223 if self.adopt_disks:
1224 if self.op.disk_template == constants.DT_PLAIN:
1225 # rename LVs to the newly-generated names; we need to construct
1226 # 'fake' LV disks with the old data, plus the new unique_id
1227 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1229 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1230 rename_to.append(t_dsk.logical_id)
1231 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1232 self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1233 result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1234 zip(tmp_disks, rename_to))
1235 result.Raise("Failed to rename adoped LVs")
1237 feedback_fn("* creating instance disks...")
1239 CreateDisks(self, iobj)
1240 except errors.OpExecError:
1241 self.LogWarning("Device creation failed")
1242 self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1245 feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1247 self.cfg.AddInstance(iobj, self.proc.GetECId())
1249 # Declare that we don't want to remove the instance lock anymore, as we've
1250 # added the instance to the config
1251 del self.remove_locks[locking.LEVEL_INSTANCE]
1253 if self.op.mode == constants.INSTANCE_IMPORT:
1254 # Release unused nodes
1255 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1258 ReleaseLocks(self, locking.LEVEL_NODE)
1261 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1262 feedback_fn("* wiping instance disks...")
1264 WipeDisks(self, iobj)
1265 except errors.OpExecError, err:
1266 logging.exception("Wiping disks failed")
1267 self.LogWarning("Wiping instance disks failed (%s)", err)
1271 # Something is already wrong with the disks, don't do anything else
1273 elif self.op.wait_for_sync:
1274 disk_abort = not WaitForSync(self, iobj)
1275 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1276 # make sure the disks are not degraded (still sync-ing is ok)
1277 feedback_fn("* checking mirrors status")
1278 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1283 RemoveDisks(self, iobj)
1284 self.cfg.RemoveInstance(iobj.uuid)
1285 # Make sure the instance lock gets removed
1286 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1287 raise errors.OpExecError("There are some degraded disks for"
1290 # instance disks are now active
1291 iobj.disks_active = True
1293 # Release all node resource locks
1294 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1296 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1297 # we need to set the disks ID to the primary node, since the
1298 # preceding code might or might have not done it, depending on
1299 # disk template and other options
1300 for disk in iobj.disks:
1301 self.cfg.SetDiskID(disk, self.pnode.uuid)
1302 if self.op.mode == constants.INSTANCE_CREATE:
1303 if not self.op.no_install:
1304 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1305 not self.op.wait_for_sync)
1307 feedback_fn("* pausing disk sync to install instance OS")
1308 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1311 for idx, success in enumerate(result.payload):
1313 logging.warn("pause-sync of instance %s for disk %d failed",
1314 self.op.instance_name, idx)
1316 feedback_fn("* running the instance OS create scripts...")
1317 # FIXME: pass debug option from opcode to backend
1319 self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1320 self.op.debug_level)
1322 feedback_fn("* resuming disk sync")
1323 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1326 for idx, success in enumerate(result.payload):
1328 logging.warn("resume-sync of instance %s for disk %d failed",
1329 self.op.instance_name, idx)
1331 os_add_result.Raise("Could not add os for instance %s"
1332 " on node %s" % (self.op.instance_name,
1336 if self.op.mode == constants.INSTANCE_IMPORT:
1337 feedback_fn("* running the instance OS import scripts...")
1341 for idx, image in enumerate(self.src_images):
1345 # FIXME: pass debug option from opcode to backend
1346 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1347 constants.IEIO_FILE, (image, ),
1348 constants.IEIO_SCRIPT,
1349 (iobj.disks[idx], idx),
1351 transfers.append(dt)
1354 masterd.instance.TransferInstanceData(self, feedback_fn,
1355 self.op.src_node_uuid,
1357 self.pnode.secondary_ip,
1359 if not compat.all(import_result):
1360 self.LogWarning("Some disks for instance %s on node %s were not"
1361 " imported successfully" % (self.op.instance_name,
1364 rename_from = self._old_instance_name
1366 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1367 feedback_fn("* preparing remote import...")
1368 # The source cluster will stop the instance before attempting to make
1369 # a connection. In some cases stopping an instance can take a long
1370 # time, hence the shutdown timeout is added to the connection
1372 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1373 self.op.source_shutdown_timeout)
1374 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1376 assert iobj.primary_node == self.pnode.uuid
1378 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1379 self.source_x509_ca,
1380 self._cds, timeouts)
1381 if not compat.all(disk_results):
1382 # TODO: Should the instance still be started, even if some disks
1383 # failed to import (valid for local imports, too)?
1384 self.LogWarning("Some disks for instance %s on node %s were not"
1385 " imported successfully" % (self.op.instance_name,
1388 rename_from = self.source_instance_name
1391 # also checked in the prereq part
1392 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1395 # Run rename script on newly imported instance
1396 assert iobj.name == self.op.instance_name
1397 feedback_fn("Running rename script for %s" % self.op.instance_name)
1398 result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1400 self.op.debug_level)
1401 result.Warn("Failed to run rename script for %s on node %s" %
1402 (self.op.instance_name, self.pnode.name), self.LogWarning)
1404 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1407 iobj.admin_state = constants.ADMINST_UP
1408 self.cfg.Update(iobj, feedback_fn)
1409 logging.info("Starting instance %s on node %s", self.op.instance_name,
1411 feedback_fn("* starting instance...")
1412 result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1413 False, self.op.reason)
1414 result.Raise("Could not start instance")
1416 return list(iobj.all_nodes)
1419 class LUInstanceRename(LogicalUnit):
1420 """Rename an instance.
1423 HPATH = "instance-rename"
1424 HTYPE = constants.HTYPE_INSTANCE
1426 def CheckArguments(self):
1430 if self.op.ip_check and not self.op.name_check:
1431 # TODO: make the ip check more flexible and not depend on the name check
1432 raise errors.OpPrereqError("IP address check requires a name check",
1435 def BuildHooksEnv(self):
1438 This runs on master, primary and secondary nodes of the instance.
1441 env = BuildInstanceHookEnvByObject(self, self.instance)
1442 env["INSTANCE_NEW_NAME"] = self.op.new_name
1445 def BuildHooksNodes(self):
1446 """Build hooks nodes.
1449 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1452 def CheckPrereq(self):
1453 """Check prerequisites.
1455 This checks that the instance is in the cluster and is not running.
1458 (self.op.instance_uuid, self.op.instance_name) = \
1459 ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1460 self.op.instance_name)
1461 instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1462 assert instance is not None
1464 # It should actually not happen that an instance is running with a disabled
1465 # disk template, but in case it does, the renaming of file-based instances
1466 # will fail horribly. Thus, we test it before.
1467 if (instance.disk_template in constants.DTS_FILEBASED and
1468 self.op.new_name != instance.name):
1469 CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1470 instance.disk_template)
1472 CheckNodeOnline(self, instance.primary_node)
1473 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1474 msg="cannot rename")
1475 self.instance = instance
1477 new_name = self.op.new_name
1478 if self.op.name_check:
1479 hostname = _CheckHostnameSane(self, new_name)
1480 new_name = self.op.new_name = hostname.name
1481 if (self.op.ip_check and
1482 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1483 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1484 (hostname.ip, new_name),
1485 errors.ECODE_NOTUNIQUE)
1487 instance_names = [inst.name for
1488 inst in self.cfg.GetAllInstancesInfo().values()]
1489 if new_name in instance_names and new_name != instance.name:
1490 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1491 new_name, errors.ECODE_EXISTS)
1493 def Exec(self, feedback_fn):
1494 """Rename the instance.
1497 old_name = self.instance.name
1499 rename_file_storage = False
1500 if (self.instance.disk_template in constants.DTS_FILEBASED and
1501 self.op.new_name != self.instance.name):
1502 old_file_storage_dir = os.path.dirname(
1503 self.instance.disks[0].logical_id[1])
1504 rename_file_storage = True
1506 self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1507 # Change the instance lock. This is definitely safe while we hold the BGL.
1508 # Otherwise the new lock would have to be added in acquired mode.
1510 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1511 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1512 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1514 # re-read the instance from the configuration after rename
1515 renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1517 if rename_file_storage:
1518 new_file_storage_dir = os.path.dirname(
1519 renamed_inst.disks[0].logical_id[1])
1520 result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1521 old_file_storage_dir,
1522 new_file_storage_dir)
1523 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1524 " (but the instance has been renamed in Ganeti)" %
1525 (self.cfg.GetNodeName(renamed_inst.primary_node),
1526 old_file_storage_dir, new_file_storage_dir))
1528 StartInstanceDisks(self, renamed_inst, None)
1529 # update info on disks
1530 info = GetInstanceInfoText(renamed_inst)
1531 for (idx, disk) in enumerate(renamed_inst.disks):
1532 for node_uuid in renamed_inst.all_nodes:
1533 self.cfg.SetDiskID(disk, node_uuid)
1534 result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1535 result.Warn("Error setting info on node %s for disk %s" %
1536 (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1538 result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1539 renamed_inst, old_name,
1540 self.op.debug_level)
1541 result.Warn("Could not run OS rename script for instance %s on node %s"
1542 " (but the instance has been renamed in Ganeti)" %
1544 self.cfg.GetNodeName(renamed_inst.primary_node)),
1547 ShutdownInstanceDisks(self, renamed_inst)
1549 return renamed_inst.name
1552 class LUInstanceRemove(LogicalUnit):
1553 """Remove an instance.
1556 HPATH = "instance-remove"
1557 HTYPE = constants.HTYPE_INSTANCE
1560 def ExpandNames(self):
1561 self._ExpandAndLockInstance()
1562 self.needed_locks[locking.LEVEL_NODE] = []
1563 self.needed_locks[locking.LEVEL_NODE_RES] = []
1564 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1566 def DeclareLocks(self, level):
1567 if level == locking.LEVEL_NODE:
1568 self._LockInstancesNodes()
1569 elif level == locking.LEVEL_NODE_RES:
1571 self.needed_locks[locking.LEVEL_NODE_RES] = \
1572 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1574 def BuildHooksEnv(self):
1577 This runs on master, primary and secondary nodes of the instance.
1580 env = BuildInstanceHookEnvByObject(self, self.instance)
1581 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1584 def BuildHooksNodes(self):
1585 """Build hooks nodes.
1588 nl = [self.cfg.GetMasterNode()]
1589 nl_post = list(self.instance.all_nodes) + nl
1590 return (nl, nl_post)
1592 def CheckPrereq(self):
1593 """Check prerequisites.
1595 This checks that the instance is in the cluster.
1598 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1599 assert self.instance is not None, \
1600 "Cannot retrieve locked instance %s" % self.op.instance_name
1602 def Exec(self, feedback_fn):
1603 """Remove the instance.
1606 logging.info("Shutting down instance %s on node %s", self.instance.name,
1607 self.cfg.GetNodeName(self.instance.primary_node))
1609 result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1611 self.op.shutdown_timeout,
1613 if self.op.ignore_failures:
1614 result.Warn("Warning: can't shutdown instance", feedback_fn)
1616 result.Raise("Could not shutdown instance %s on node %s" %
1617 (self.instance.name,
1618 self.cfg.GetNodeName(self.instance.primary_node)))
1620 assert (self.owned_locks(locking.LEVEL_NODE) ==
1621 self.owned_locks(locking.LEVEL_NODE_RES))
1622 assert not (set(self.instance.all_nodes) -
1623 self.owned_locks(locking.LEVEL_NODE)), \
1624 "Not owning correct locks"
1626 RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1629 class LUInstanceMove(LogicalUnit):
1630 """Move an instance by data-copying.
1633 HPATH = "instance-move"
1634 HTYPE = constants.HTYPE_INSTANCE
1637 def ExpandNames(self):
1638 self._ExpandAndLockInstance()
1639 (self.op.target_node_uuid, self.op.target_node) = \
1640 ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1641 self.op.target_node)
1642 self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node]
1643 self.needed_locks[locking.LEVEL_NODE_RES] = []
1644 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1646 def DeclareLocks(self, level):
1647 if level == locking.LEVEL_NODE:
1648 self._LockInstancesNodes(primary_only=True)
1649 elif level == locking.LEVEL_NODE_RES:
1651 self.needed_locks[locking.LEVEL_NODE_RES] = \
1652 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1654 def BuildHooksEnv(self):
1657 This runs on master, primary and secondary nodes of the instance.
1661 "TARGET_NODE": self.op.target_node,
1662 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1664 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1667 def BuildHooksNodes(self):
1668 """Build hooks nodes.
1672 self.cfg.GetMasterNode(),
1673 self.instance.primary_node,
1674 self.op.target_node_uuid,
1678 def CheckPrereq(self):
1679 """Check prerequisites.
1681 This checks that the instance is in the cluster.
1684 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1685 assert self.instance is not None, \
1686 "Cannot retrieve locked instance %s" % self.op.instance_name
1688 if self.instance.disk_template not in constants.DTS_COPYABLE:
1689 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1690 self.instance.disk_template,
1693 target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1694 assert target_node is not None, \
1695 "Cannot retrieve locked node %s" % self.op.target_node
1697 self.target_node_uuid = target_node.uuid
1698 if target_node.uuid == self.instance.primary_node:
1699 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1700 (self.instance.name, target_node.name),
1703 bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1705 for idx, dsk in enumerate(self.instance.disks):
1706 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1707 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1708 " cannot copy" % idx, errors.ECODE_STATE)
1710 CheckNodeOnline(self, target_node.uuid)
1711 CheckNodeNotDrained(self, target_node.uuid)
1712 CheckNodeVmCapable(self, target_node.uuid)
1713 cluster = self.cfg.GetClusterInfo()
1714 group_info = self.cfg.GetNodeGroup(target_node.group)
1715 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1716 CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1717 ignore=self.op.ignore_ipolicy)
1719 if self.instance.admin_state == constants.ADMINST_UP:
1720 # check memory requirements on the secondary node
1721 CheckNodeFreeMemory(
1722 self, target_node.uuid, "failing over instance %s" %
1723 self.instance.name, bep[constants.BE_MAXMEM],
1724 self.instance.hypervisor,
1725 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1727 self.LogInfo("Not checking memory on the secondary node as"
1728 " instance will not be started")
1730 # check bridge existance
1731 CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1733 def Exec(self, feedback_fn):
1734 """Move an instance.
1736 The move is done by shutting it down on its present node, copying
1737 the data over (slow) and starting it on the new node.
1740 source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1741 target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1743 self.LogInfo("Shutting down instance %s on source node %s",
1744 self.instance.name, source_node.name)
1746 assert (self.owned_locks(locking.LEVEL_NODE) ==
1747 self.owned_locks(locking.LEVEL_NODE_RES))
1749 result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1750 self.op.shutdown_timeout,
1752 if self.op.ignore_consistency:
1753 result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1754 " anyway. Please make sure node %s is down. Error details" %
1755 (self.instance.name, source_node.name, source_node.name),
1758 result.Raise("Could not shutdown instance %s on node %s" %
1759 (self.instance.name, source_node.name))
1761 # create the target disks
1763 CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1764 except errors.OpExecError:
1765 self.LogWarning("Device creation failed")
1766 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1769 cluster_name = self.cfg.GetClusterInfo().cluster_name
1772 # activate, get path, copy the data over
1773 for idx, disk in enumerate(self.instance.disks):
1774 self.LogInfo("Copying data for disk %d", idx)
1775 result = self.rpc.call_blockdev_assemble(
1776 target_node.uuid, (disk, self.instance), self.instance.name,
1779 self.LogWarning("Can't assemble newly created disk %d: %s",
1780 idx, result.fail_msg)
1781 errs.append(result.fail_msg)
1783 dev_path = result.payload
1784 result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1786 target_node.name, dev_path,
1789 self.LogWarning("Can't copy data over for disk %d: %s",
1790 idx, result.fail_msg)
1791 errs.append(result.fail_msg)
1795 self.LogWarning("Some disks failed to copy, aborting")
1797 RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1799 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1800 raise errors.OpExecError("Errors during disk copy: %s" %
1803 self.instance.primary_node = target_node.uuid
1804 self.cfg.Update(self.instance, feedback_fn)
1806 self.LogInfo("Removing the disks on the original node")
1807 RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1809 # Only start the instance if it's marked as up
1810 if self.instance.admin_state == constants.ADMINST_UP:
1811 self.LogInfo("Starting instance %s on node %s",
1812 self.instance.name, target_node.name)
1814 disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1815 ignore_secondaries=True)
1817 ShutdownInstanceDisks(self, self.instance)
1818 raise errors.OpExecError("Can't activate the instance's disks")
1820 result = self.rpc.call_instance_start(target_node.uuid,
1821 (self.instance, None, None), False,
1823 msg = result.fail_msg
1825 ShutdownInstanceDisks(self, self.instance)
1826 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1827 (self.instance.name, target_node.name, msg))
1830 class LUInstanceMultiAlloc(NoHooksLU):
1831 """Allocates multiple instances at the same time.
1836 def CheckArguments(self):
1841 for inst in self.op.instances:
1842 if inst.iallocator is not None:
1843 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1844 " instance objects", errors.ECODE_INVAL)
1845 nodes.append(bool(inst.pnode))
1846 if inst.disk_template in constants.DTS_INT_MIRROR:
1847 nodes.append(bool(inst.snode))
1849 has_nodes = compat.any(nodes)
1850 if compat.all(nodes) ^ has_nodes:
1851 raise errors.OpPrereqError("There are instance objects providing"
1852 " pnode/snode while others do not",
1855 if not has_nodes and self.op.iallocator is None:
1856 default_iallocator = self.cfg.GetDefaultIAllocator()
1857 if default_iallocator:
1858 self.op.iallocator = default_iallocator
1860 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1861 " given and no cluster-wide default"
1862 " iallocator found; please specify either"
1863 " an iallocator or nodes on the instances"
1864 " or set a cluster-wide default iallocator",
1867 _CheckOpportunisticLocking(self.op)
1869 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1871 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1872 utils.CommaJoin(dups), errors.ECODE_INVAL)
1874 def ExpandNames(self):
1875 """Calculate the locks.
1878 self.share_locks = ShareAll()
1879 self.needed_locks = {
1880 # iallocator will select nodes and even if no iallocator is used,
1881 # collisions with LUInstanceCreate should be avoided
1882 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1885 if self.op.iallocator:
1886 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1887 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1889 if self.op.opportunistic_locking:
1890 self.opportunistic_locks[locking.LEVEL_NODE] = True
1891 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1894 for inst in self.op.instances:
1895 (inst.pnode_uuid, inst.pnode) = \
1896 ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1897 nodeslist.append(inst.pnode)
1898 if inst.snode is not None:
1899 (inst.snode_uuid, inst.snode) = \
1900 ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1901 nodeslist.append(inst.snode)
1903 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1904 # Lock resources of instance's primary and secondary nodes (copy to
1905 # prevent accidential modification)
1906 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1908 def CheckPrereq(self):
1909 """Check prerequisite.
1912 if self.op.iallocator:
1913 cluster = self.cfg.GetClusterInfo()
1914 default_vg = self.cfg.GetVGName()
1915 ec_id = self.proc.GetECId()
1917 if self.op.opportunistic_locking:
1918 # Only consider nodes for which a lock is held
1919 node_whitelist = self.cfg.GetNodeNames(
1920 list(self.owned_locks(locking.LEVEL_NODE)))
1922 node_whitelist = None
1924 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1925 _ComputeNics(op, cluster, None,
1927 _ComputeFullBeParams(op, cluster),
1929 for op in self.op.instances]
1931 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1932 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1934 ial.Run(self.op.iallocator)
1937 raise errors.OpPrereqError("Can't compute nodes using"
1938 " iallocator '%s': %s" %
1939 (self.op.iallocator, ial.info),
1942 self.ia_result = ial.result
1945 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1946 constants.JOB_IDS_KEY: [],
1949 def _ConstructPartialResult(self):
1950 """Contructs the partial result.
1953 if self.op.iallocator:
1954 (allocatable, failed_insts) = self.ia_result
1955 allocatable_insts = map(compat.fst, allocatable)
1957 allocatable_insts = [op.instance_name for op in self.op.instances]
1961 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
1962 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
1965 def Exec(self, feedback_fn):
1966 """Executes the opcode.
1970 if self.op.iallocator:
1971 op2inst = dict((op.instance_name, op) for op in self.op.instances)
1972 (allocatable, failed) = self.ia_result
1974 for (name, node_names) in allocatable:
1975 op = op2inst.pop(name)
1977 (op.pnode_uuid, op.pnode) = \
1978 ExpandNodeUuidAndName(self.cfg, None, node_names[0])
1979 if len(node_names) > 1:
1980 (op.snode_uuid, op.snode) = \
1981 ExpandNodeUuidAndName(self.cfg, None, node_names[1])
1985 missing = set(op2inst.keys()) - set(failed)
1986 assert not missing, \
1987 "Iallocator did return incomplete result: %s" % \
1988 utils.CommaJoin(missing)
1990 jobs.extend([op] for op in self.op.instances)
1992 return ResultWithJobs(jobs, **self._ConstructPartialResult())
1995 class _InstNicModPrivate:
1996 """Data structure for network interface modifications.
1998 Used by L{LUInstanceSetParams}.
2006 def _PrepareContainerMods(mods, private_fn):
2007 """Prepares a list of container modifications by adding a private data field.
2009 @type mods: list of tuples; (operation, index, parameters)
2010 @param mods: List of modifications
2011 @type private_fn: callable or None
2012 @param private_fn: Callable for constructing a private data field for a
2017 if private_fn is None:
2022 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2025 def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2026 """Checks if nodes have enough physical CPUs
2028 This function checks if all given nodes have the needed number of
2029 physical CPUs. In case any node has less CPUs or we cannot get the
2030 information from the node, this function raises an OpPrereqError
2033 @type lu: C{LogicalUnit}
2034 @param lu: a logical unit from which we get configuration data
2035 @type node_uuids: C{list}
2036 @param node_uuids: the list of node UUIDs to check
2037 @type requested: C{int}
2038 @param requested: the minimum acceptable number of physical CPUs
2039 @type hypervisor_specs: list of pairs (string, dict of strings)
2040 @param hypervisor_specs: list of hypervisor specifications in
2041 pairs (hypervisor_name, hvparams)
2042 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2043 or we cannot check the node
2046 nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2047 for node_uuid in node_uuids:
2048 info = nodeinfo[node_uuid]
2049 node_name = lu.cfg.GetNodeName(node_uuid)
2050 info.Raise("Cannot get current information from node %s" % node_name,
2051 prereq=True, ecode=errors.ECODE_ENVIRON)
2052 (_, _, (hv_info, )) = info.payload
2053 num_cpus = hv_info.get("cpu_total", None)
2054 if not isinstance(num_cpus, int):
2055 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2056 " on node %s, result was '%s'" %
2057 (node_name, num_cpus), errors.ECODE_ENVIRON)
2058 if requested > num_cpus:
2059 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2060 "required" % (node_name, num_cpus, requested),
2064 def GetItemFromContainer(identifier, kind, container):
2065 """Return the item refered by the identifier.
2067 @type identifier: string
2068 @param identifier: Item index or name or UUID
2070 @param kind: One-word item description
2071 @type container: list
2072 @param container: Container to get the item from
2077 idx = int(identifier)
2080 absidx = len(container) - 1
2082 raise IndexError("Not accepting negative indices other than -1")
2083 elif idx > len(container):
2084 raise IndexError("Got %s index %s, but there are only %s" %
2085 (kind, idx, len(container)))
2088 return (absidx, container[idx])
2092 for idx, item in enumerate(container):
2093 if item.uuid == identifier or item.name == identifier:
2096 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2097 (kind, identifier), errors.ECODE_NOENT)
2100 def _ApplyContainerMods(kind, container, chgdesc, mods,
2101 create_fn, modify_fn, remove_fn):
2102 """Applies descriptions in C{mods} to C{container}.
2105 @param kind: One-word item description
2106 @type container: list
2107 @param container: Container to modify
2108 @type chgdesc: None or list
2109 @param chgdesc: List of applied changes
2111 @param mods: Modifications as returned by L{_PrepareContainerMods}
2112 @type create_fn: callable
2113 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2114 receives absolute item index, parameters and private data object as added
2115 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2117 @type modify_fn: callable
2118 @param modify_fn: Callback for modifying an existing item
2119 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2120 and private data object as added by L{_PrepareContainerMods}, returns
2122 @type remove_fn: callable
2123 @param remove_fn: Callback on removing item; receives absolute item index,
2124 item and private data object as added by L{_PrepareContainerMods}
2127 for (op, identifier, params, private) in mods:
2130 if op == constants.DDM_ADD:
2131 # Calculate where item will be added
2132 # When adding an item, identifier can only be an index
2134 idx = int(identifier)
2136 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2137 " identifier for %s" % constants.DDM_ADD,
2140 addidx = len(container)
2143 raise IndexError("Not accepting negative indices other than -1")
2144 elif idx > len(container):
2145 raise IndexError("Got %s index %s, but there are only %s" %
2146 (kind, idx, len(container)))
2149 if create_fn is None:
2152 (item, changes) = create_fn(addidx, params, private)
2155 container.append(item)
2158 assert idx <= len(container)
2159 # list.insert does so before the specified index
2160 container.insert(idx, item)
2162 # Retrieve existing item
2163 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2165 if op == constants.DDM_REMOVE:
2168 if remove_fn is not None:
2169 remove_fn(absidx, item, private)
2171 changes = [("%s/%s" % (kind, absidx), "remove")]
2173 assert container[absidx] == item
2174 del container[absidx]
2175 elif op == constants.DDM_MODIFY:
2176 if modify_fn is not None:
2177 changes = modify_fn(absidx, item, params, private)
2179 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2181 assert _TApplyContModsCbChanges(changes)
2183 if not (chgdesc is None or changes is None):
2184 chgdesc.extend(changes)
2187 def _UpdateIvNames(base_index, disks):
2188 """Updates the C{iv_name} attribute of disks.
2190 @type disks: list of L{objects.Disk}
2193 for (idx, disk) in enumerate(disks):
2194 disk.iv_name = "disk/%s" % (base_index + idx, )
2197 class LUInstanceSetParams(LogicalUnit):
2198 """Modifies an instances's parameters.
2201 HPATH = "instance-modify"
2202 HTYPE = constants.HTYPE_INSTANCE
2206 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2207 assert ht.TList(mods)
2208 assert not mods or len(mods[0]) in (2, 3)
2210 if mods and len(mods[0]) == 2:
2214 for op, params in mods:
2215 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2216 result.append((op, -1, params))
2220 raise errors.OpPrereqError("Only one %s add or remove operation is"
2221 " supported at a time" % kind,
2224 result.append((constants.DDM_MODIFY, op, params))
2226 assert verify_fn(result)
2233 def _CheckMods(kind, mods, key_types, item_fn):
2234 """Ensures requested disk/NIC modifications are valid.
2237 for (op, _, params) in mods:
2238 assert ht.TDict(params)
2240 # If 'key_types' is an empty dict, we assume we have an
2241 # 'ext' template and thus do not ForceDictType
2243 utils.ForceDictType(params, key_types)
2245 if op == constants.DDM_REMOVE:
2247 raise errors.OpPrereqError("No settings should be passed when"
2248 " removing a %s" % kind,
2250 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2253 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2256 def _VerifyDiskModification(op, params, excl_stor):
2257 """Verifies a disk modification.
2260 if op == constants.DDM_ADD:
2261 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2262 if mode not in constants.DISK_ACCESS_SET:
2263 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2266 size = params.get(constants.IDISK_SIZE, None)
2268 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2269 constants.IDISK_SIZE, errors.ECODE_INVAL)
2273 except (TypeError, ValueError), err:
2274 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2277 params[constants.IDISK_SIZE] = size
2278 name = params.get(constants.IDISK_NAME, None)
2279 if name is not None and name.lower() == constants.VALUE_NONE:
2280 params[constants.IDISK_NAME] = None
2282 CheckSpindlesExclusiveStorage(params, excl_stor, True)
2284 elif op == constants.DDM_MODIFY:
2285 if constants.IDISK_SIZE in params:
2286 raise errors.OpPrereqError("Disk size change not possible, use"
2287 " grow-disk", errors.ECODE_INVAL)
2289 raise errors.OpPrereqError("Disk modification doesn't support"
2290 " additional arbitrary parameters",
2292 name = params.get(constants.IDISK_NAME, None)
2293 if name is not None and name.lower() == constants.VALUE_NONE:
2294 params[constants.IDISK_NAME] = None
2297 def _VerifyNicModification(op, params):
2298 """Verifies a network interface modification.
2301 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2302 ip = params.get(constants.INIC_IP, None)
2303 name = params.get(constants.INIC_NAME, None)
2304 req_net = params.get(constants.INIC_NETWORK, None)
2305 link = params.get(constants.NIC_LINK, None)
2306 mode = params.get(constants.NIC_MODE, None)
2307 if name is not None and name.lower() == constants.VALUE_NONE:
2308 params[constants.INIC_NAME] = None
2309 if req_net is not None:
2310 if req_net.lower() == constants.VALUE_NONE:
2311 params[constants.INIC_NETWORK] = None
2313 elif link is not None or mode is not None:
2314 raise errors.OpPrereqError("If network is given"
2315 " mode or link should not",
2318 if op == constants.DDM_ADD:
2319 macaddr = params.get(constants.INIC_MAC, None)
2321 params[constants.INIC_MAC] = constants.VALUE_AUTO
2324 if ip.lower() == constants.VALUE_NONE:
2325 params[constants.INIC_IP] = None
2327 if ip.lower() == constants.NIC_IP_POOL:
2328 if op == constants.DDM_ADD and req_net is None:
2329 raise errors.OpPrereqError("If ip=pool, parameter network"
2333 if not netutils.IPAddress.IsValid(ip):
2334 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2337 if constants.INIC_MAC in params:
2338 macaddr = params[constants.INIC_MAC]
2339 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2340 macaddr = utils.NormalizeAndValidateMac(macaddr)
2342 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2343 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2344 " modifying an existing NIC",
2347 def CheckArguments(self):
2348 if not (self.op.nics or self.op.disks or self.op.disk_template or
2349 self.op.hvparams or self.op.beparams or self.op.os_name or
2350 self.op.offline is not None or self.op.runtime_mem or
2352 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2354 if self.op.hvparams:
2355 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2356 "hypervisor", "instance", "cluster")
2358 self.op.disks = self._UpgradeDiskNicMods(
2359 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2360 self.op.nics = self._UpgradeDiskNicMods(
2361 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2363 if self.op.disks and self.op.disk_template is not None:
2364 raise errors.OpPrereqError("Disk template conversion and other disk"
2365 " changes not supported at the same time",
2368 if (self.op.disk_template and
2369 self.op.disk_template in constants.DTS_INT_MIRROR and
2370 self.op.remote_node is None):
2371 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2372 " one requires specifying a secondary node",
2375 # Check NIC modifications
2376 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2377 self._VerifyNicModification)
2380 (self.op.pnode_uuid, self.op.pnode) = \
2381 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2383 def ExpandNames(self):
2384 self._ExpandAndLockInstance()
2385 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2386 # Can't even acquire node locks in shared mode as upcoming changes in
2387 # Ganeti 2.6 will start to modify the node object on disk conversion
2388 self.needed_locks[locking.LEVEL_NODE] = []
2389 self.needed_locks[locking.LEVEL_NODE_RES] = []
2390 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2391 # Look node group to look up the ipolicy
2392 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2394 def DeclareLocks(self, level):
2395 if level == locking.LEVEL_NODEGROUP:
2396 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2397 # Acquire locks for the instance's nodegroups optimistically. Needs
2398 # to be verified in CheckPrereq
2399 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2400 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2401 elif level == locking.LEVEL_NODE:
2402 self._LockInstancesNodes()
2403 if self.op.disk_template and self.op.remote_node:
2404 (self.op.remote_node_uuid, self.op.remote_node) = \
2405 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2406 self.op.remote_node)
2407 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2408 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2410 self.needed_locks[locking.LEVEL_NODE_RES] = \
2411 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2413 def BuildHooksEnv(self):
2416 This runs on the master, primary and secondaries.
2420 if constants.BE_MINMEM in self.be_new:
2421 args["minmem"] = self.be_new[constants.BE_MINMEM]
2422 if constants.BE_MAXMEM in self.be_new:
2423 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2424 if constants.BE_VCPUS in self.be_new:
2425 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2426 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2427 # information at all.
2429 if self._new_nics is not None:
2432 for nic in self._new_nics:
2433 n = copy.deepcopy(nic)
2434 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2435 n.nicparams = nicparams
2436 nics.append(NICToTuple(self, n))
2440 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2441 if self.op.disk_template:
2442 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2443 if self.op.runtime_mem:
2444 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2448 def BuildHooksNodes(self):
2449 """Build hooks nodes.
2452 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2455 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2456 old_params, cluster, pnode_uuid):
2458 update_params_dict = dict([(key, params[key])
2459 for key in constants.NICS_PARAMETERS
2462 req_link = update_params_dict.get(constants.NIC_LINK, None)
2463 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2466 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2467 if new_net_uuid_or_name:
2468 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2469 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2472 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2475 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2477 raise errors.OpPrereqError("No netparams found for the network"
2478 " %s, probably not connected" %
2479 new_net_obj.name, errors.ECODE_INVAL)
2480 new_params = dict(netparams)
2482 new_params = GetUpdatedParams(old_params, update_params_dict)
2484 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2486 new_filled_params = cluster.SimpleFillNIC(new_params)
2487 objects.NIC.CheckParameterSyntax(new_filled_params)
2489 new_mode = new_filled_params[constants.NIC_MODE]
2490 if new_mode == constants.NIC_MODE_BRIDGED:
2491 bridge = new_filled_params[constants.NIC_LINK]
2492 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2494 msg = "Error checking bridges on node '%s': %s" % \
2495 (self.cfg.GetNodeName(pnode_uuid), msg)
2497 self.warn.append(msg)
2499 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2501 elif new_mode == constants.NIC_MODE_ROUTED:
2502 ip = params.get(constants.INIC_IP, old_ip)
2504 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2505 " on a routed NIC", errors.ECODE_INVAL)
2507 elif new_mode == constants.NIC_MODE_OVS:
2508 # TODO: check OVS link
2509 self.LogInfo("OVS links are currently not checked for correctness")
2511 if constants.INIC_MAC in params:
2512 mac = params[constants.INIC_MAC]
2514 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2516 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2517 # otherwise generate the MAC address
2518 params[constants.INIC_MAC] = \
2519 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2521 # or validate/reserve the current one
2523 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2524 except errors.ReservationError:
2525 raise errors.OpPrereqError("MAC address '%s' already in use"
2526 " in cluster" % mac,
2527 errors.ECODE_NOTUNIQUE)
2528 elif new_net_uuid != old_net_uuid:
2530 def get_net_prefix(net_uuid):
2533 nobj = self.cfg.GetNetwork(net_uuid)
2534 mac_prefix = nobj.mac_prefix
2538 new_prefix = get_net_prefix(new_net_uuid)
2539 old_prefix = get_net_prefix(old_net_uuid)
2540 if old_prefix != new_prefix:
2541 params[constants.INIC_MAC] = \
2542 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2544 # if there is a change in (ip, network) tuple
2545 new_ip = params.get(constants.INIC_IP, old_ip)
2546 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2548 # if IP is pool then require a network and generate one IP
2549 if new_ip.lower() == constants.NIC_IP_POOL:
2552 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2553 except errors.ReservationError:
2554 raise errors.OpPrereqError("Unable to get a free IP"
2555 " from the address pool",
2557 self.LogInfo("Chose IP %s from network %s",
2560 params[constants.INIC_IP] = new_ip
2562 raise errors.OpPrereqError("ip=pool, but no network found",
2564 # Reserve new IP if in the new network if any
2567 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2568 self.LogInfo("Reserving IP %s in network %s",
2569 new_ip, new_net_obj.name)
2570 except errors.ReservationError:
2571 raise errors.OpPrereqError("IP %s not available in network %s" %
2572 (new_ip, new_net_obj.name),
2573 errors.ECODE_NOTUNIQUE)
2574 # new network is None so check if new IP is a conflicting IP
2575 elif self.op.conflicts_check:
2576 _CheckForConflictingIp(self, new_ip, pnode_uuid)
2578 # release old IP if old network is not None
2579 if old_ip and old_net_uuid:
2581 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2582 except errors.AddressPoolError:
2583 logging.warning("Release IP %s not contained in network %s",
2584 old_ip, old_net_obj.name)
2586 # there are no changes in (ip, network) tuple and old network is not None
2587 elif (old_net_uuid is not None and
2588 (req_link is not None or req_mode is not None)):
2589 raise errors.OpPrereqError("Not allowed to change link or mode of"
2590 " a NIC that is connected to a network",
2593 private.params = new_params
2594 private.filled = new_filled_params
2596 def _PreCheckDiskTemplate(self, pnode_info):
2597 """CheckPrereq checks related to a new disk template."""
2598 # Arguments are passed to avoid configuration lookups
2599 pnode_uuid = self.instance.primary_node
2600 if self.instance.disk_template == self.op.disk_template:
2601 raise errors.OpPrereqError("Instance already has disk template %s" %
2602 self.instance.disk_template,
2605 if not self.cluster.IsDiskTemplateEnabled(self.instance.disk_template):
2606 raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2607 " cluster." % self.instance.disk_template)
2609 if (self.instance.disk_template,
2610 self.op.disk_template) not in self._DISK_CONVERSIONS:
2611 raise errors.OpPrereqError("Unsupported disk template conversion from"
2612 " %s to %s" % (self.instance.disk_template,
2613 self.op.disk_template),
2615 CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2616 msg="cannot change disk template")
2617 if self.op.disk_template in constants.DTS_INT_MIRROR:
2618 if self.op.remote_node_uuid == pnode_uuid:
2619 raise errors.OpPrereqError("Given new secondary node %s is the same"
2620 " as the primary node of the instance" %
2621 self.op.remote_node, errors.ECODE_STATE)
2622 CheckNodeOnline(self, self.op.remote_node_uuid)
2623 CheckNodeNotDrained(self, self.op.remote_node_uuid)
2624 # FIXME: here we assume that the old instance type is DT_PLAIN
2625 assert self.instance.disk_template == constants.DT_PLAIN
2626 disks = [{constants.IDISK_SIZE: d.size,
2627 constants.IDISK_VG: d.logical_id[0]}
2628 for d in self.instance.disks]
2629 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2630 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2632 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2633 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2634 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2636 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2637 ignore=self.op.ignore_ipolicy)
2638 if pnode_info.group != snode_info.group:
2639 self.LogWarning("The primary and secondary nodes are in two"
2640 " different node groups; the disk parameters"
2641 " from the first disk's node group will be"
2644 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2645 # Make sure none of the nodes require exclusive storage
2646 nodes = [pnode_info]
2647 if self.op.disk_template in constants.DTS_INT_MIRROR:
2649 nodes.append(snode_info)
2650 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2651 if compat.any(map(has_es, nodes)):
2652 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2653 " storage is enabled" % (self.instance.disk_template,
2654 self.op.disk_template))
2655 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2657 def _PreCheckDisks(self, ispec):
2658 """CheckPrereq checks related to disk changes.
2661 @param ispec: instance specs to be updated with the new disks
2664 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2666 excl_stor = compat.any(
2667 rpc.GetExclusiveStorageForNodes(self.cfg,
2668 self.instance.all_nodes).values()
2671 # Check disk modifications. This is done here and not in CheckArguments
2672 # (as with NICs), because we need to know the instance's disk template
2673 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2674 if self.instance.disk_template == constants.DT_EXT:
2675 self._CheckMods("disk", self.op.disks, {}, ver_fn)
2677 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2680 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2682 # Check the validity of the `provider' parameter
2683 if self.instance.disk_template in constants.DT_EXT:
2684 for mod in self.diskmod:
2685 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2686 if mod[0] == constants.DDM_ADD:
2687 if ext_provider is None:
2688 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2689 " '%s' missing, during disk add" %
2691 constants.IDISK_PROVIDER),
2693 elif mod[0] == constants.DDM_MODIFY:
2695 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2697 constants.IDISK_PROVIDER,
2700 for mod in self.diskmod:
2701 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2702 if ext_provider is not None:
2703 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2704 " instances of type '%s'" %
2705 (constants.IDISK_PROVIDER,
2709 if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2710 raise errors.OpPrereqError("Disk operations not supported for"
2711 " diskless instances", errors.ECODE_INVAL)
2713 def _PrepareDiskMod(_, disk, params, __):
2714 disk.name = params.get(constants.IDISK_NAME, None)
2716 # Verify disk changes (operating on a copy)
2717 disks = copy.deepcopy(self.instance.disks)
2718 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2719 _PrepareDiskMod, None)
2720 utils.ValidateDeviceNames("disk", disks)
2721 if len(disks) > constants.MAX_DISKS:
2722 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2723 " more" % constants.MAX_DISKS,
2725 disk_sizes = [disk.size for disk in self.instance.disks]
2726 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2727 self.diskmod if op == constants.DDM_ADD)
2728 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2729 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2731 if self.op.offline is not None and self.op.offline:
2732 CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2733 msg="can't change to offline")
2735 def CheckPrereq(self):
2736 """Check prerequisites.
2738 This only checks the instance list against the existing names.
2741 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2742 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2743 self.cluster = self.cfg.GetClusterInfo()
2745 assert self.instance is not None, \
2746 "Cannot retrieve locked instance %s" % self.op.instance_name
2748 pnode_uuid = self.instance.primary_node
2752 if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2754 # verify that the instance is not up
2755 instance_info = self.rpc.call_instance_info(
2756 pnode_uuid, self.instance.name, self.instance.hypervisor,
2757 self.instance.hvparams)
2758 if instance_info.fail_msg:
2759 self.warn.append("Can't get instance runtime information: %s" %
2760 instance_info.fail_msg)
2761 elif instance_info.payload:
2762 raise errors.OpPrereqError("Instance is still running on %s" %
2763 self.cfg.GetNodeName(pnode_uuid),
2766 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2767 node_uuids = list(self.instance.all_nodes)
2768 pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2770 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2771 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2772 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2774 # dictionary with instance information after the modification
2777 # Prepare NIC modifications
2778 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2781 if self.op.os_name and not self.op.force:
2782 CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2783 self.op.force_variant)
2784 instance_os = self.op.os_name
2786 instance_os = self.instance.os
2788 assert not (self.op.disk_template and self.op.disks), \
2789 "Can't modify disk template and apply disk changes at the same time"
2791 if self.op.disk_template:
2792 self._PreCheckDiskTemplate(pnode_info)
2794 self._PreCheckDisks(ispec)
2796 # hvparams processing
2797 if self.op.hvparams:
2798 hv_type = self.instance.hypervisor
2799 i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2800 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2801 hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2804 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2805 CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2806 self.hv_proposed = self.hv_new = hv_new # the new actual values
2807 self.hv_inst = i_hvdict # the new dict (without defaults)
2809 self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2811 self.instance.hvparams)
2812 self.hv_new = self.hv_inst = {}
2814 # beparams processing
2815 if self.op.beparams:
2816 i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2818 objects.UpgradeBeParams(i_bedict)
2819 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2820 be_new = self.cluster.SimpleFillBE(i_bedict)
2821 self.be_proposed = self.be_new = be_new # the new actual values
2822 self.be_inst = i_bedict # the new dict (without defaults)
2824 self.be_new = self.be_inst = {}
2825 self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2826 be_old = self.cluster.FillBE(self.instance)
2828 # CPU param validation -- checking every time a parameter is
2829 # changed to cover all cases where either CPU mask or vcpus have
2831 if (constants.BE_VCPUS in self.be_proposed and
2832 constants.HV_CPU_MASK in self.hv_proposed):
2834 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2835 # Verify mask is consistent with number of vCPUs. Can skip this
2836 # test if only 1 entry in the CPU mask, which means same mask
2837 # is applied to all vCPUs.
2838 if (len(cpu_list) > 1 and
2839 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2840 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2842 (self.be_proposed[constants.BE_VCPUS],
2843 self.hv_proposed[constants.HV_CPU_MASK]),
2846 # Only perform this test if a new CPU mask is given
2847 if constants.HV_CPU_MASK in self.hv_new:
2848 # Calculate the largest CPU number requested
2849 max_requested_cpu = max(map(max, cpu_list))
2850 # Check that all of the instance's nodes have enough physical CPUs to
2851 # satisfy the requested CPU mask
2852 hvspecs = [(self.instance.hypervisor,
2853 self.cfg.GetClusterInfo()
2854 .hvparams[self.instance.hypervisor])]
2855 _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2856 max_requested_cpu + 1,
2859 # osparams processing
2860 if self.op.osparams:
2861 i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2862 CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2863 self.os_inst = i_osdict # the new dict (without defaults)
2867 #TODO(dynmem): do the appropriate check involving MINMEM
2868 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2869 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2870 mem_check_list = [pnode_uuid]
2871 if be_new[constants.BE_AUTO_BALANCE]:
2872 # either we changed auto_balance to yes or it was from before
2873 mem_check_list.extend(self.instance.secondary_nodes)
2874 instance_info = self.rpc.call_instance_info(
2875 pnode_uuid, self.instance.name, self.instance.hypervisor,
2876 self.instance.hvparams)
2877 hvspecs = [(self.instance.hypervisor,
2878 self.cluster.hvparams[self.instance.hypervisor])]
2879 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2881 pninfo = nodeinfo[pnode_uuid]
2882 msg = pninfo.fail_msg
2884 # Assume the primary node is unreachable and go ahead
2885 self.warn.append("Can't get info from primary node %s: %s" %
2886 (self.cfg.GetNodeName(pnode_uuid), msg))
2888 (_, _, (pnhvinfo, )) = pninfo.payload
2889 if not isinstance(pnhvinfo.get("memory_free", None), int):
2890 self.warn.append("Node data from primary node %s doesn't contain"
2891 " free memory information" %
2892 self.cfg.GetNodeName(pnode_uuid))
2893 elif instance_info.fail_msg:
2894 self.warn.append("Can't get instance runtime information: %s" %
2895 instance_info.fail_msg)
2897 if instance_info.payload:
2898 current_mem = int(instance_info.payload["memory"])
2900 # Assume instance not running
2901 # (there is a slight race condition here, but it's not very
2902 # probable, and we have no other way to check)
2903 # TODO: Describe race condition
2905 #TODO(dynmem): do the appropriate check involving MINMEM
2906 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2907 pnhvinfo["memory_free"])
2909 raise errors.OpPrereqError("This change will prevent the instance"
2910 " from starting, due to %d MB of memory"
2911 " missing on its primary node" %
2912 miss_mem, errors.ECODE_NORES)
2914 if be_new[constants.BE_AUTO_BALANCE]:
2915 for node_uuid, nres in nodeinfo.items():
2916 if node_uuid not in self.instance.secondary_nodes:
2918 nres.Raise("Can't get info from secondary node %s" %
2919 self.cfg.GetNodeName(node_uuid), prereq=True,
2920 ecode=errors.ECODE_STATE)
2921 (_, _, (nhvinfo, )) = nres.payload
2922 if not isinstance(nhvinfo.get("memory_free", None), int):
2923 raise errors.OpPrereqError("Secondary node %s didn't return free"
2924 " memory information" %
2925 self.cfg.GetNodeName(node_uuid),
2927 #TODO(dynmem): do the appropriate check involving MINMEM
2928 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2929 raise errors.OpPrereqError("This change will prevent the instance"
2930 " from failover to its secondary node"
2931 " %s, due to not enough memory" %
2932 self.cfg.GetNodeName(node_uuid),
2935 if self.op.runtime_mem:
2936 remote_info = self.rpc.call_instance_info(
2937 self.instance.primary_node, self.instance.name,
2938 self.instance.hypervisor,
2939 self.cluster.hvparams[self.instance.hypervisor])
2940 remote_info.Raise("Error checking node %s" %
2941 self.cfg.GetNodeName(self.instance.primary_node))
2942 if not remote_info.payload: # not running already
2943 raise errors.OpPrereqError("Instance %s is not running" %
2944 self.instance.name, errors.ECODE_STATE)
2946 current_memory = remote_info.payload["memory"]
2947 if (not self.op.force and
2948 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2949 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2950 raise errors.OpPrereqError("Instance %s must have memory between %d"
2951 " and %d MB of memory unless --force is"
2953 (self.instance.name,
2954 self.be_proposed[constants.BE_MINMEM],
2955 self.be_proposed[constants.BE_MAXMEM]),
2958 delta = self.op.runtime_mem - current_memory
2960 CheckNodeFreeMemory(
2961 self, self.instance.primary_node,
2962 "ballooning memory for instance %s" % self.instance.name, delta,
2963 self.instance.hypervisor,
2964 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
2966 # make self.cluster visible in the functions below
2967 cluster = self.cluster
2969 def _PrepareNicCreate(_, params, private):
2970 self._PrepareNicModification(params, private, None, None,
2971 {}, cluster, pnode_uuid)
2974 def _PrepareNicMod(_, nic, params, private):
2975 self._PrepareNicModification(params, private, nic.ip, nic.network,
2976 nic.nicparams, cluster, pnode_uuid)
2979 def _PrepareNicRemove(_, params, __):
2981 net = params.network
2982 if net is not None and ip is not None:
2983 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2985 # Verify NIC changes (operating on copy)
2986 nics = self.instance.nics[:]
2987 _ApplyContainerMods("NIC", nics, None, self.nicmod,
2988 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2989 if len(nics) > constants.MAX_NICS:
2990 raise errors.OpPrereqError("Instance has too many network interfaces"
2991 " (%d), cannot add more" % constants.MAX_NICS,
2994 # Pre-compute NIC changes (necessary to use result in hooks)
2995 self._nic_chgdesc = []
2997 # Operate on copies as this is still in prereq
2998 nics = [nic.Copy() for nic in self.instance.nics]
2999 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3000 self._CreateNewNic, self._ApplyNicMods, None)
3001 # Verify that NIC names are unique and valid
3002 utils.ValidateDeviceNames("NIC", nics)
3003 self._new_nics = nics
3004 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3006 self._new_nics = None
3007 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3009 if not self.op.ignore_ipolicy:
3010 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3013 # Fill ispec with backend parameters
3014 ispec[constants.ISPEC_SPINDLE_USE] = \
3015 self.be_new.get(constants.BE_SPINDLE_USE, None)
3016 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3019 # Copy ispec to verify parameters with min/max values separately
3020 if self.op.disk_template:
3021 new_disk_template = self.op.disk_template
3023 new_disk_template = self.instance.disk_template
3024 ispec_max = ispec.copy()
3025 ispec_max[constants.ISPEC_MEM_SIZE] = \
3026 self.be_new.get(constants.BE_MAXMEM, None)
3027 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3029 ispec_min = ispec.copy()
3030 ispec_min[constants.ISPEC_MEM_SIZE] = \
3031 self.be_new.get(constants.BE_MINMEM, None)
3032 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3035 if (res_max or res_min):
3036 # FIXME: Improve error message by including information about whether
3037 # the upper or lower limit of the parameter fails the ipolicy.
3038 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3039 (group_info, group_info.name,
3040 utils.CommaJoin(set(res_max + res_min))))
3041 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3043 def _ConvertPlainToDrbd(self, feedback_fn):
3044 """Converts an instance from plain to drbd.
3047 feedback_fn("Converting template to drbd")
3048 pnode_uuid = self.instance.primary_node
3049 snode_uuid = self.op.remote_node_uuid
3051 assert self.instance.disk_template == constants.DT_PLAIN
3053 # create a fake disk info for _GenerateDiskTemplate
3054 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3055 constants.IDISK_VG: d.logical_id[0],
3056 constants.IDISK_NAME: d.name}
3057 for d in self.instance.disks]
3058 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3059 self.instance.uuid, pnode_uuid,
3060 [snode_uuid], disk_info, None, None, 0,
3061 feedback_fn, self.diskparams)
3062 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3064 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3065 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3066 info = GetInstanceInfoText(self.instance)
3067 feedback_fn("Creating additional volumes...")
3068 # first, create the missing data and meta devices
3069 for disk in anno_disks:
3070 # unfortunately this is... not too nice
3071 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3072 info, True, p_excl_stor)
3073 for child in disk.children:
3074 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3076 # at this stage, all new LVs have been created, we can rename the
3078 feedback_fn("Renaming original volumes...")
3079 rename_list = [(o, n.children[0].logical_id)
3080 for (o, n) in zip(self.instance.disks, new_disks)]
3081 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3082 result.Raise("Failed to rename original LVs")
3084 feedback_fn("Initializing DRBD devices...")
3085 # all child devices are in place, we can now create the DRBD devices
3087 for disk in anno_disks:
3088 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3089 (snode_uuid, s_excl_stor)]:
3090 f_create = node_uuid == pnode_uuid
3091 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3092 f_create, excl_stor)
3093 except errors.GenericError, e:
3094 feedback_fn("Initializing of DRBD devices failed;"
3095 " renaming back original volumes...")
3096 for disk in new_disks:
3097 self.cfg.SetDiskID(disk, pnode_uuid)
3098 rename_back_list = [(n.children[0], o.logical_id)
3099 for (n, o) in zip(new_disks, self.instance.disks)]
3100 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3101 result.Raise("Failed to rename LVs back after error %s" % str(e))
3104 # at this point, the instance has been modified
3105 self.instance.disk_template = constants.DT_DRBD8
3106 self.instance.disks = new_disks
3107 self.cfg.Update(self.instance, feedback_fn)
3109 # Release node locks while waiting for sync
3110 ReleaseLocks(self, locking.LEVEL_NODE)
3112 # disks are created, waiting for sync
3113 disk_abort = not WaitForSync(self, self.instance,
3114 oneshot=not self.op.wait_for_sync)
3116 raise errors.OpExecError("There are some degraded disks for"
3117 " this instance, please cleanup manually")
3119 # Node resource locks will be released by caller
3121 def _ConvertDrbdToPlain(self, feedback_fn):
3122 """Converts an instance from drbd to plain.
3125 assert len(self.instance.secondary_nodes) == 1
3126 assert self.instance.disk_template == constants.DT_DRBD8
3128 pnode_uuid = self.instance.primary_node
3129 snode_uuid = self.instance.secondary_nodes[0]
3130 feedback_fn("Converting template to plain")
3132 old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3133 new_disks = [d.children[0] for d in self.instance.disks]
3135 # copy over size, mode and name
3136 for parent, child in zip(old_disks, new_disks):
3137 child.size = parent.size
3138 child.mode = parent.mode
3139 child.name = parent.name
3141 # this is a DRBD disk, return its port to the pool
3142 # NOTE: this must be done right before the call to cfg.Update!
3143 for disk in old_disks:
3144 tcp_port = disk.logical_id[2]
3145 self.cfg.AddTcpUdpPort(tcp_port)
3147 # update instance structure
3148 self.instance.disks = new_disks
3149 self.instance.disk_template = constants.DT_PLAIN
3150 _UpdateIvNames(0, self.instance.disks)
3151 self.cfg.Update(self.instance, feedback_fn)
3153 # Release locks in case removing disks takes a while
3154 ReleaseLocks(self, locking.LEVEL_NODE)
3156 feedback_fn("Removing volumes on the secondary node...")
3157 for disk in old_disks:
3158 self.cfg.SetDiskID(disk, snode_uuid)
3159 msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3161 self.LogWarning("Could not remove block device %s on node %s,"
3162 " continuing anyway: %s", disk.iv_name,
3163 self.cfg.GetNodeName(snode_uuid), msg)
3165 feedback_fn("Removing unneeded volumes on the primary node...")
3166 for idx, disk in enumerate(old_disks):
3167 meta = disk.children[1]
3168 self.cfg.SetDiskID(meta, pnode_uuid)
3169 msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3171 self.LogWarning("Could not remove metadata for disk %d on node %s,"
3172 " continuing anyway: %s", idx,
3173 self.cfg.GetNodeName(pnode_uuid), msg)
3175 def _CreateNewDisk(self, idx, params, _):
3176 """Creates a new disk.
3180 if self.instance.disk_template in constants.DTS_FILEBASED:
3181 (file_driver, file_path) = self.instance.disks[0].logical_id
3182 file_path = os.path.dirname(file_path)
3184 file_driver = file_path = None
3187 GenerateDiskTemplate(self, self.instance.disk_template,
3188 self.instance.uuid, self.instance.primary_node,
3189 self.instance.secondary_nodes, [params], file_path,
3190 file_driver, idx, self.Log, self.diskparams)[0]
3192 new_disks = CreateDisks(self, self.instance, disks=[disk])
3194 if self.cluster.prealloc_wipe_disks:
3196 WipeOrCleanupDisks(self, self.instance,
3197 disks=[(idx, disk, 0)],
3201 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3205 def _ModifyDisk(idx, disk, params, _):
3210 mode = params.get(constants.IDISK_MODE, None)
3213 changes.append(("disk.mode/%d" % idx, disk.mode))
3215 name = params.get(constants.IDISK_NAME, None)
3217 changes.append(("disk.name/%d" % idx, disk.name))
3221 def _RemoveDisk(self, idx, root, _):
3225 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3226 for node_uuid, disk in anno_disk.ComputeNodeTree(
3227 self.instance.primary_node):
3228 self.cfg.SetDiskID(disk, node_uuid)
3229 msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3231 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3232 " continuing anyway", idx,
3233 self.cfg.GetNodeName(node_uuid), msg)
3235 # if this is a DRBD disk, return its port to the pool
3236 if root.dev_type in constants.LDS_DRBD:
3237 self.cfg.AddTcpUdpPort(root.logical_id[2])
3239 def _CreateNewNic(self, idx, params, private):
3240 """Creates data structure for a new network interface.
3243 mac = params[constants.INIC_MAC]
3244 ip = params.get(constants.INIC_IP, None)
3245 net = params.get(constants.INIC_NETWORK, None)
3246 name = params.get(constants.INIC_NAME, None)
3247 net_uuid = self.cfg.LookupNetwork(net)
3248 #TODO: not private.filled?? can a nic have no nicparams??
3249 nicparams = private.filled
3250 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3251 nicparams=nicparams)
3252 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3256 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3257 (mac, ip, private.filled[constants.NIC_MODE],
3258 private.filled[constants.NIC_LINK],
3262 def _ApplyNicMods(self, idx, nic, params, private):
3263 """Modifies a network interface.
3268 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3270 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3271 setattr(nic, key, params[key])
3273 new_net = params.get(constants.INIC_NETWORK, nic.network)
3274 new_net_uuid = self.cfg.LookupNetwork(new_net)
3275 if new_net_uuid != nic.network:
3276 changes.append(("nic.network/%d" % idx, new_net))
3277 nic.network = new_net_uuid
3280 nic.nicparams = private.filled
3282 for (key, val) in nic.nicparams.items():
3283 changes.append(("nic.%s/%d" % (key, idx), val))
3287 def Exec(self, feedback_fn):
3288 """Modifies an instance.
3290 All parameters take effect only at the next restart of the instance.
3293 # Process here the warnings from CheckPrereq, as we don't have a
3294 # feedback_fn there.
3295 # TODO: Replace with self.LogWarning
3296 for warn in self.warn:
3297 feedback_fn("WARNING: %s" % warn)
3299 assert ((self.op.disk_template is None) ^
3300 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3301 "Not owning any node resource locks"
3306 if self.op.pnode_uuid:
3307 self.instance.primary_node = self.op.pnode_uuid
3310 if self.op.runtime_mem:
3311 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3313 self.op.runtime_mem)
3314 rpcres.Raise("Cannot modify instance runtime memory")
3315 result.append(("runtime_memory", self.op.runtime_mem))
3317 # Apply disk changes
3318 _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3319 self._CreateNewDisk, self._ModifyDisk,
3321 _UpdateIvNames(0, self.instance.disks)
3323 if self.op.disk_template:
3325 check_nodes = set(self.instance.all_nodes)
3326 if self.op.remote_node_uuid:
3327 check_nodes.add(self.op.remote_node_uuid)
3328 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3329 owned = self.owned_locks(level)
3330 assert not (check_nodes - owned), \
3331 ("Not owning the correct locks, owning %r, expected at least %r" %
3332 (owned, check_nodes))
3334 r_shut = ShutdownInstanceDisks(self, self.instance)
3336 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3337 " proceed with disk template conversion")
3338 mode = (self.instance.disk_template, self.op.disk_template)
3340 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3342 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3344 result.append(("disk_template", self.op.disk_template))
3346 assert self.instance.disk_template == self.op.disk_template, \
3347 ("Expected disk template '%s', found '%s'" %
3348 (self.op.disk_template, self.instance.disk_template))
3350 # Release node and resource locks if there are any (they might already have
3351 # been released during disk conversion)
3352 ReleaseLocks(self, locking.LEVEL_NODE)
3353 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3356 if self._new_nics is not None:
3357 self.instance.nics = self._new_nics
3358 result.extend(self._nic_chgdesc)
3361 if self.op.hvparams:
3362 self.instance.hvparams = self.hv_inst
3363 for key, val in self.op.hvparams.iteritems():
3364 result.append(("hv/%s" % key, val))
3367 if self.op.beparams:
3368 self.instance.beparams = self.be_inst
3369 for key, val in self.op.beparams.iteritems():
3370 result.append(("be/%s" % key, val))
3374 self.instance.os = self.op.os_name
3377 if self.op.osparams:
3378 self.instance.osparams = self.os_inst
3379 for key, val in self.op.osparams.iteritems():
3380 result.append(("os/%s" % key, val))
3382 if self.op.offline is None:
3385 elif self.op.offline:
3386 # Mark instance as offline
3387 self.cfg.MarkInstanceOffline(self.instance.uuid)
3388 result.append(("admin_state", constants.ADMINST_OFFLINE))
3390 # Mark instance as online, but stopped
3391 self.cfg.MarkInstanceDown(self.instance.uuid)
3392 result.append(("admin_state", constants.ADMINST_DOWN))
3394 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3396 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3397 self.owned_locks(locking.LEVEL_NODE)), \
3398 "All node locks should have been released by now"
3402 _DISK_CONVERSIONS = {
3403 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3404 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3408 class LUInstanceChangeGroup(LogicalUnit):
3409 HPATH = "instance-change-group"
3410 HTYPE = constants.HTYPE_INSTANCE
3413 def ExpandNames(self):
3414 self.share_locks = ShareAll()
3416 self.needed_locks = {
3417 locking.LEVEL_NODEGROUP: [],
3418 locking.LEVEL_NODE: [],
3419 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3422 self._ExpandAndLockInstance()
3424 if self.op.target_groups:
3425 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3426 self.op.target_groups)
3428 self.req_target_uuids = None
3430 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3432 def DeclareLocks(self, level):
3433 if level == locking.LEVEL_NODEGROUP:
3434 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3436 if self.req_target_uuids:
3437 lock_groups = set(self.req_target_uuids)
3439 # Lock all groups used by instance optimistically; this requires going
3440 # via the node before it's locked, requiring verification later on
3441 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3442 lock_groups.update(instance_groups)
3444 # No target groups, need to lock all of them
3445 lock_groups = locking.ALL_SET
3447 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3449 elif level == locking.LEVEL_NODE:
3450 if self.req_target_uuids:
3451 # Lock all nodes used by instances
3452 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3453 self._LockInstancesNodes()
3455 # Lock all nodes in all potential target groups
3456 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3457 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3458 member_nodes = [node_uuid
3459 for group in lock_groups
3460 for node_uuid in self.cfg.GetNodeGroup(group).members]
3461 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3463 # Lock all nodes as all groups are potential targets
3464 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3466 def CheckPrereq(self):
3467 owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3468 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3469 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3471 assert (self.req_target_uuids is None or
3472 owned_groups.issuperset(self.req_target_uuids))
3473 assert owned_instance_names == set([self.op.instance_name])
3475 # Get instance information
3476 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3478 # Check if node groups for locked instance are still correct
3479 assert owned_nodes.issuperset(self.instance.all_nodes), \
3480 ("Instance %s's nodes changed while we kept the lock" %
3481 self.op.instance_name)
3483 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3486 if self.req_target_uuids:
3487 # User requested specific target groups
3488 self.target_uuids = frozenset(self.req_target_uuids)
3490 # All groups except those used by the instance are potential targets
3491 self.target_uuids = owned_groups - inst_groups
3493 conflicting_groups = self.target_uuids & inst_groups
3494 if conflicting_groups:
3495 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3496 " used by the instance '%s'" %
3497 (utils.CommaJoin(conflicting_groups),
3498 self.op.instance_name),
3501 if not self.target_uuids:
3502 raise errors.OpPrereqError("There are no possible target groups",
3505 def BuildHooksEnv(self):
3509 assert self.target_uuids
3512 "TARGET_GROUPS": " ".join(self.target_uuids),
3515 env.update(BuildInstanceHookEnvByObject(self, self.instance))
3519 def BuildHooksNodes(self):
3520 """Build hooks nodes.
3523 mn = self.cfg.GetMasterNode()
3526 def Exec(self, feedback_fn):
3527 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3529 assert instances == [self.op.instance_name], "Instance not locked"
3531 req = iallocator.IAReqGroupChange(instances=instances,
3532 target_groups=list(self.target_uuids))
3533 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3535 ial.Run(self.op.iallocator)
3538 raise errors.OpPrereqError("Can't compute solution for changing group of"
3539 " instance '%s' using iallocator '%s': %s" %
3540 (self.op.instance_name, self.op.iallocator,
3541 ial.info), errors.ECODE_NORES)
3543 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3545 self.LogInfo("Iallocator returned %s job(s) for changing group of"
3546 " instance '%s'", len(jobs), self.op.instance_name)
3548 return ResultWithJobs(jobs)