4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Logical units dealing with instances."""
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65 import ganeti.masterd.instance
68 #: Type description for changes as returned by L{_ApplyContainerMods}'s
70 _TApplyContModsCbChanges = \
71 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
77 def _CheckHostnameSane(lu, name):
78 """Ensures that a given hostname resolves to a 'sane' name.
80 The given name is required to be a prefix of the resolved hostname,
81 to prevent accidental mismatches.
83 @param lu: the logical unit on behalf of which we're checking
84 @param name: the name we should resolve and check
85 @return: the resolved hostname object
88 hostname = netutils.GetHostname(name=name)
89 if hostname.name != name:
90 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91 if not utils.MatchNameComponent(name, [hostname.name]):
92 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93 " same as given hostname '%s'") %
94 (hostname.name, name), errors.ECODE_INVAL)
98 def _CheckOpportunisticLocking(op):
99 """Generate error if opportunistic locking is not possible.
102 if op.opportunistic_locking and not op.iallocator:
103 raise errors.OpPrereqError("Opportunistic locking is only available in"
104 " combination with an instance allocator",
108 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109 """Wrapper around IAReqInstanceAlloc.
111 @param op: The instance opcode
112 @param disks: The computed disks
113 @param nics: The computed nics
114 @param beparams: The full filled beparams
115 @param node_whitelist: List of nodes which should appear as online to the
116 allocator (unless the node is already marked offline)
118 @returns: A filled L{iallocator.IAReqInstanceAlloc}
121 spindle_use = beparams[constants.BE_SPINDLE_USE]
122 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123 disk_template=op.disk_template,
126 vcpus=beparams[constants.BE_VCPUS],
127 memory=beparams[constants.BE_MAXMEM],
128 spindle_use=spindle_use,
130 nics=[n.ToDict() for n in nics],
131 hypervisor=op.hypervisor,
132 node_whitelist=node_whitelist)
135 def _ComputeFullBeParams(op, cluster):
136 """Computes the full beparams.
138 @param op: The instance opcode
139 @param cluster: The cluster config object
141 @return: The fully filled beparams
144 default_beparams = cluster.beparams[constants.PP_DEFAULT]
145 for param, value in op.beparams.iteritems():
146 if value == constants.VALUE_AUTO:
147 op.beparams[param] = default_beparams[param]
148 objects.UpgradeBeParams(op.beparams)
149 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150 return cluster.SimpleFillBE(op.beparams)
153 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154 """Computes the nics.
156 @param op: The instance opcode
157 @param cluster: Cluster configuration object
158 @param default_ip: The default ip to assign
159 @param cfg: An instance of the configuration object
160 @param ec_id: Execution context ID
162 @returns: The build up nics
167 nic_mode_req = nic.get(constants.INIC_MODE, None)
168 nic_mode = nic_mode_req
169 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172 net = nic.get(constants.INIC_NETWORK, None)
173 link = nic.get(constants.NIC_LINK, None)
174 ip = nic.get(constants.INIC_IP, None)
176 if net is None or net.lower() == constants.VALUE_NONE:
179 if nic_mode_req is not None or link is not None:
180 raise errors.OpPrereqError("If network is given, no mode or link"
181 " is allowed to be passed",
185 if ip is None or ip.lower() == constants.VALUE_NONE:
187 elif ip.lower() == constants.VALUE_AUTO:
188 if not op.name_check:
189 raise errors.OpPrereqError("IP address set to auto but name checks"
190 " have been skipped",
194 # We defer pool operations until later, so that the iallocator has
195 # filled in the instance's node(s) dimara
196 if ip.lower() == constants.NIC_IP_POOL:
198 raise errors.OpPrereqError("if ip=pool, parameter network"
199 " must be passed too",
202 elif not netutils.IPAddress.IsValid(ip):
203 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
208 # MAC address verification
209 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
210 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
211 mac = utils.NormalizeAndValidateMac(mac)
214 # TODO: We need to factor this out
215 cfg.ReserveMAC(mac, ec_id)
216 except errors.ReservationError:
217 raise errors.OpPrereqError("MAC address %s already in use"
219 errors.ECODE_NOTUNIQUE)
221 # Build nic parameters
224 nicparams[constants.NIC_MODE] = nic_mode
226 nicparams[constants.NIC_LINK] = link
228 check_params = cluster.SimpleFillNIC(nicparams)
229 objects.NIC.CheckParameterSyntax(check_params)
230 net_uuid = cfg.LookupNetwork(net)
231 name = nic.get(constants.INIC_NAME, None)
232 if name is not None and name.lower() == constants.VALUE_NONE:
234 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
235 network=net_uuid, nicparams=nicparams)
236 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
242 def _CheckForConflictingIp(lu, ip, node):
243 """In case of conflicting IP address raise error.
246 @param ip: IP address
248 @param node: node name
251 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
252 if conf_net is not None:
253 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
254 " network %s, but the target NIC does not." %
261 def _ComputeIPolicyInstanceSpecViolation(
262 ipolicy, instance_spec, disk_template,
263 _compute_fn=ComputeIPolicySpecViolation):
264 """Compute if instance specs meets the specs of ipolicy.
267 @param ipolicy: The ipolicy to verify against
268 @param instance_spec: dict
269 @param instance_spec: The instance spec to verify
270 @type disk_template: string
271 @param disk_template: the disk template of the instance
272 @param _compute_fn: The function to verify ipolicy (unittest only)
273 @see: L{ComputeIPolicySpecViolation}
276 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
277 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
278 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
279 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
280 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
281 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
283 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
284 disk_sizes, spindle_use, disk_template)
287 def _CheckOSVariant(os_obj, name):
288 """Check whether an OS name conforms to the os variants specification.
290 @type os_obj: L{objects.OS}
291 @param os_obj: OS object to check
293 @param name: OS name passed by the user, to check for validity
296 variant = objects.OS.GetVariant(name)
297 if not os_obj.supported_variants:
299 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
300 " passed)" % (os_obj.name, variant),
304 raise errors.OpPrereqError("OS name must include a variant",
307 if variant not in os_obj.supported_variants:
308 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
311 class LUInstanceCreate(LogicalUnit):
312 """Create an instance.
315 HPATH = "instance-add"
316 HTYPE = constants.HTYPE_INSTANCE
319 def CheckArguments(self):
323 # do not require name_check to ease forward/backward compatibility
325 if self.op.no_install and self.op.start:
326 self.LogInfo("No-installation mode selected, disabling startup")
327 self.op.start = False
328 # validate/normalize the instance name
329 self.op.instance_name = \
330 netutils.Hostname.GetNormalizedName(self.op.instance_name)
332 if self.op.ip_check and not self.op.name_check:
333 # TODO: make the ip check more flexible and not depend on the name check
334 raise errors.OpPrereqError("Cannot do IP address check without a name"
335 " check", errors.ECODE_INVAL)
337 # check nics' parameter names
338 for nic in self.op.nics:
339 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
340 # check that NIC's parameters names are unique and valid
341 utils.ValidateDeviceNames("NIC", self.op.nics)
343 # check that disk's names are unique and valid
344 utils.ValidateDeviceNames("disk", self.op.disks)
346 cluster = self.cfg.GetClusterInfo()
347 if not self.op.disk_template in cluster.enabled_disk_templates:
348 raise errors.OpPrereqError("Cannot create an instance with disk template"
349 " '%s', because it is not enabled in the"
350 " cluster. Enabled disk templates are: %s." %
351 (self.op.disk_template,
352 ",".join(cluster.enabled_disk_templates)))
354 # check disks. parameter names and consistent adopt/no-adopt strategy
355 has_adopt = has_no_adopt = False
356 for disk in self.op.disks:
357 if self.op.disk_template != constants.DT_EXT:
358 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
359 if constants.IDISK_ADOPT in disk:
363 if has_adopt and has_no_adopt:
364 raise errors.OpPrereqError("Either all disks are adopted or none is",
367 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
368 raise errors.OpPrereqError("Disk adoption is not supported for the"
369 " '%s' disk template" %
370 self.op.disk_template,
372 if self.op.iallocator is not None:
373 raise errors.OpPrereqError("Disk adoption not allowed with an"
374 " iallocator script", errors.ECODE_INVAL)
375 if self.op.mode == constants.INSTANCE_IMPORT:
376 raise errors.OpPrereqError("Disk adoption not allowed for"
377 " instance import", errors.ECODE_INVAL)
379 if self.op.disk_template in constants.DTS_MUST_ADOPT:
380 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
381 " but no 'adopt' parameter given" %
382 self.op.disk_template,
385 self.adopt_disks = has_adopt
387 # instance name verification
388 if self.op.name_check:
389 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
390 self.op.instance_name = self.hostname1.name
391 # used in CheckPrereq for ip ping check
392 self.check_ip = self.hostname1.ip
396 # file storage checks
397 if (self.op.file_driver and
398 not self.op.file_driver in constants.FILE_DRIVER):
399 raise errors.OpPrereqError("Invalid file driver name '%s'" %
400 self.op.file_driver, errors.ECODE_INVAL)
402 # set default file_driver if unset and required
403 if (not self.op.file_driver and
404 self.op.disk_template in [constants.DT_FILE,
405 constants.DT_SHARED_FILE]):
406 self.op.file_driver = constants.FD_DEFAULT
408 if self.op.disk_template == constants.DT_FILE:
409 opcodes.RequireFileStorage()
410 elif self.op.disk_template == constants.DT_SHARED_FILE:
411 opcodes.RequireSharedFileStorage()
413 ### Node/iallocator related checks
414 CheckIAllocatorOrNode(self, "iallocator", "pnode")
416 if self.op.pnode is not None:
417 if self.op.disk_template in constants.DTS_INT_MIRROR:
418 if self.op.snode is None:
419 raise errors.OpPrereqError("The networked disk templates need"
420 " a mirror node", errors.ECODE_INVAL)
422 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
426 _CheckOpportunisticLocking(self.op)
428 self._cds = GetClusterDomainSecret()
430 if self.op.mode == constants.INSTANCE_IMPORT:
431 # On import force_variant must be True, because if we forced it at
432 # initial install, our only chance when importing it back is that it
434 self.op.force_variant = True
436 if self.op.no_install:
437 self.LogInfo("No-installation mode has no effect during import")
439 elif self.op.mode == constants.INSTANCE_CREATE:
440 if self.op.os_type is None:
441 raise errors.OpPrereqError("No guest OS specified",
443 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
444 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
445 " installation" % self.op.os_type,
447 if self.op.disk_template is None:
448 raise errors.OpPrereqError("No disk template specified",
451 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
452 # Check handshake to ensure both clusters have the same domain secret
453 src_handshake = self.op.source_handshake
454 if not src_handshake:
455 raise errors.OpPrereqError("Missing source handshake",
458 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
461 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
464 # Load and check source CA
465 self.source_x509_ca_pem = self.op.source_x509_ca
466 if not self.source_x509_ca_pem:
467 raise errors.OpPrereqError("Missing source X509 CA",
471 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
473 except OpenSSL.crypto.Error, err:
474 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
475 (err, ), errors.ECODE_INVAL)
477 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
478 if errcode is not None:
479 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
482 self.source_x509_ca = cert
484 src_instance_name = self.op.source_instance_name
485 if not src_instance_name:
486 raise errors.OpPrereqError("Missing source instance name",
489 self.source_instance_name = \
490 netutils.GetHostname(name=src_instance_name).name
493 raise errors.OpPrereqError("Invalid instance creation mode %r" %
494 self.op.mode, errors.ECODE_INVAL)
496 def ExpandNames(self):
497 """ExpandNames for CreateInstance.
499 Figure out the right locks for instance creation.
502 self.needed_locks = {}
504 instance_name = self.op.instance_name
505 # this is just a preventive check, but someone might still add this
506 # instance in the meantime, and creation will fail at lock-add time
507 if instance_name in self.cfg.GetInstanceList():
508 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
509 instance_name, errors.ECODE_EXISTS)
511 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
513 if self.op.iallocator:
514 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
515 # specifying a group on instance creation and then selecting nodes from
517 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
518 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
520 if self.op.opportunistic_locking:
521 self.opportunistic_locks[locking.LEVEL_NODE] = True
523 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
524 nodelist = [self.op.pnode]
525 if self.op.snode is not None:
526 self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
527 nodelist.append(self.op.snode)
528 self.needed_locks[locking.LEVEL_NODE] = nodelist
530 # in case of import lock the source node too
531 if self.op.mode == constants.INSTANCE_IMPORT:
532 src_node = self.op.src_node
533 src_path = self.op.src_path
536 self.op.src_path = src_path = self.op.instance_name
539 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
540 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
541 self.op.src_node = None
542 if os.path.isabs(src_path):
543 raise errors.OpPrereqError("Importing an instance from a path"
544 " requires a source node option",
547 self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
548 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
549 self.needed_locks[locking.LEVEL_NODE].append(src_node)
550 if not os.path.isabs(src_path):
551 self.op.src_path = src_path = \
552 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
554 self.needed_locks[locking.LEVEL_NODE_RES] = \
555 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
557 # Optimistically acquire shared group locks (we're reading the
558 # configuration). We can't just call GetInstanceNodeGroups, because the
559 # instance doesn't exist yet. Therefore we lock all node groups of all
561 if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
562 # In the case we lock all nodes for opportunistic allocation, we have no
563 # choice than to lock all groups, because they're allocated before nodes.
564 # This is sad, but true. At least we release all those we don't need in
566 self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
568 self.needed_locks[locking.LEVEL_NODEGROUP] = \
569 list(self.cfg.GetNodeGroupsFromNodes(
570 self.needed_locks[locking.LEVEL_NODE]))
571 self.share_locks[locking.LEVEL_NODEGROUP] = 1
573 def DeclareLocks(self, level):
574 if level == locking.LEVEL_NODE_RES and \
575 self.opportunistic_locks[locking.LEVEL_NODE]:
576 # Even when using opportunistic locking, we require the same set of
577 # NODE_RES locks as we got NODE locks
578 self.needed_locks[locking.LEVEL_NODE_RES] = \
579 self.owned_locks(locking.LEVEL_NODE)
581 def _RunAllocator(self):
582 """Run the allocator based on input opcode.
585 if self.op.opportunistic_locking:
586 # Only consider nodes for which a lock is held
587 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
589 node_whitelist = None
591 #TODO Export network to iallocator so that it chooses a pnode
592 # in a nodegroup that has the desired network connected to
593 req = _CreateInstanceAllocRequest(self.op, self.disks,
594 self.nics, self.be_full,
596 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
598 ial.Run(self.op.iallocator)
601 # When opportunistic locks are used only a temporary failure is generated
602 if self.op.opportunistic_locking:
603 ecode = errors.ECODE_TEMP_NORES
605 ecode = errors.ECODE_NORES
607 raise errors.OpPrereqError("Can't compute nodes using"
608 " iallocator '%s': %s" %
609 (self.op.iallocator, ial.info),
612 self.op.pnode = ial.result[0]
613 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
614 self.op.instance_name, self.op.iallocator,
615 utils.CommaJoin(ial.result))
617 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
619 if req.RequiredNodes() == 2:
620 self.op.snode = ial.result[1]
622 def BuildHooksEnv(self):
625 This runs on master, primary and secondary nodes of the instance.
629 "ADD_MODE": self.op.mode,
631 if self.op.mode == constants.INSTANCE_IMPORT:
632 env["SRC_NODE"] = self.op.src_node
633 env["SRC_PATH"] = self.op.src_path
634 env["SRC_IMAGES"] = self.src_images
636 env.update(BuildInstanceHookEnv(
637 name=self.op.instance_name,
638 primary_node=self.op.pnode,
639 secondary_nodes=self.secondaries,
640 status=self.op.start,
641 os_type=self.op.os_type,
642 minmem=self.be_full[constants.BE_MINMEM],
643 maxmem=self.be_full[constants.BE_MAXMEM],
644 vcpus=self.be_full[constants.BE_VCPUS],
645 nics=NICListToTuple(self, self.nics),
646 disk_template=self.op.disk_template,
647 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
648 d[constants.IDISK_SIZE], d[constants.IDISK_MODE], {})
649 for d in self.disks],
652 hypervisor_name=self.op.hypervisor,
658 def BuildHooksNodes(self):
659 """Build hooks nodes.
662 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
665 def _ReadExportInfo(self):
666 """Reads the export information from disk.
668 It will override the opcode source node and path with the actual
669 information, if these two were not specified before.
671 @return: the export information
674 assert self.op.mode == constants.INSTANCE_IMPORT
676 src_node = self.op.src_node
677 src_path = self.op.src_path
680 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
681 exp_list = self.rpc.call_export_list(locked_nodes)
683 for node in exp_list:
684 if exp_list[node].fail_msg:
686 if src_path in exp_list[node].payload:
688 self.op.src_node = src_node = node
689 self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
693 raise errors.OpPrereqError("No export found for relative path %s" %
694 src_path, errors.ECODE_INVAL)
696 CheckNodeOnline(self, src_node)
697 result = self.rpc.call_export_info(src_node, src_path)
698 result.Raise("No export or invalid export found in dir %s" % src_path)
700 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
701 if not export_info.has_section(constants.INISECT_EXP):
702 raise errors.ProgrammerError("Corrupted export config",
703 errors.ECODE_ENVIRON)
705 ei_version = export_info.get(constants.INISECT_EXP, "version")
706 if (int(ei_version) != constants.EXPORT_VERSION):
707 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
708 (ei_version, constants.EXPORT_VERSION),
709 errors.ECODE_ENVIRON)
712 def _ReadExportParams(self, einfo):
713 """Use export parameters as defaults.
715 In case the opcode doesn't specify (as in override) some instance
716 parameters, then try to use them from the export information, if
720 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
722 if self.op.disk_template is None:
723 if einfo.has_option(constants.INISECT_INS, "disk_template"):
724 self.op.disk_template = einfo.get(constants.INISECT_INS,
726 if self.op.disk_template not in constants.DISK_TEMPLATES:
727 raise errors.OpPrereqError("Disk template specified in configuration"
728 " file is not one of the allowed values:"
730 " ".join(constants.DISK_TEMPLATES),
733 raise errors.OpPrereqError("No disk template specified and the export"
734 " is missing the disk_template information",
737 if not self.op.disks:
739 # TODO: import the disk iv_name too
740 for idx in range(constants.MAX_DISKS):
741 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
742 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
743 disks.append({constants.IDISK_SIZE: disk_sz})
744 self.op.disks = disks
745 if not disks and self.op.disk_template != constants.DT_DISKLESS:
746 raise errors.OpPrereqError("No disk info specified and the export"
747 " is missing the disk information",
752 for idx in range(constants.MAX_NICS):
753 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
755 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
756 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
763 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
764 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
766 if (self.op.hypervisor is None and
767 einfo.has_option(constants.INISECT_INS, "hypervisor")):
768 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
770 if einfo.has_section(constants.INISECT_HYP):
771 # use the export parameters but do not override the ones
772 # specified by the user
773 for name, value in einfo.items(constants.INISECT_HYP):
774 if name not in self.op.hvparams:
775 self.op.hvparams[name] = value
777 if einfo.has_section(constants.INISECT_BEP):
778 # use the parameters, without overriding
779 for name, value in einfo.items(constants.INISECT_BEP):
780 if name not in self.op.beparams:
781 self.op.beparams[name] = value
782 # Compatibility for the old "memory" be param
783 if name == constants.BE_MEMORY:
784 if constants.BE_MAXMEM not in self.op.beparams:
785 self.op.beparams[constants.BE_MAXMEM] = value
786 if constants.BE_MINMEM not in self.op.beparams:
787 self.op.beparams[constants.BE_MINMEM] = value
789 # try to read the parameters old style, from the main section
790 for name in constants.BES_PARAMETERS:
791 if (name not in self.op.beparams and
792 einfo.has_option(constants.INISECT_INS, name)):
793 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
795 if einfo.has_section(constants.INISECT_OSP):
796 # use the parameters, without overriding
797 for name, value in einfo.items(constants.INISECT_OSP):
798 if name not in self.op.osparams:
799 self.op.osparams[name] = value
801 def _RevertToDefaults(self, cluster):
802 """Revert the instance parameters to the default values.
806 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
807 for name in self.op.hvparams.keys():
808 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
809 del self.op.hvparams[name]
811 be_defs = cluster.SimpleFillBE({})
812 for name in self.op.beparams.keys():
813 if name in be_defs and be_defs[name] == self.op.beparams[name]:
814 del self.op.beparams[name]
816 nic_defs = cluster.SimpleFillNIC({})
817 for nic in self.op.nics:
818 for name in constants.NICS_PARAMETERS:
819 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
822 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
823 for name in self.op.osparams.keys():
824 if name in os_defs and os_defs[name] == self.op.osparams[name]:
825 del self.op.osparams[name]
827 def _CalculateFileStorageDir(self):
828 """Calculate final instance file storage dir.
831 # file storage dir calculation/check
832 self.instance_file_storage_dir = None
833 if self.op.disk_template in constants.DTS_FILEBASED:
834 # build the full file storage dir path
837 if self.op.disk_template == constants.DT_SHARED_FILE:
838 get_fsd_fn = self.cfg.GetSharedFileStorageDir
840 get_fsd_fn = self.cfg.GetFileStorageDir
842 cfg_storagedir = get_fsd_fn()
843 if not cfg_storagedir:
844 raise errors.OpPrereqError("Cluster file storage dir not defined",
846 joinargs.append(cfg_storagedir)
848 if self.op.file_storage_dir is not None:
849 joinargs.append(self.op.file_storage_dir)
851 joinargs.append(self.op.instance_name)
853 # pylint: disable=W0142
854 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
856 def CheckPrereq(self): # pylint: disable=R0914
857 """Check prerequisites.
860 # Check that the optimistically acquired groups are correct wrt the
862 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
863 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
864 cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
865 if not owned_groups.issuperset(cur_groups):
866 raise errors.OpPrereqError("New instance %s's node groups changed since"
867 " locks were acquired, current groups are"
868 " are '%s', owning groups '%s'; retry the"
870 (self.op.instance_name,
871 utils.CommaJoin(cur_groups),
872 utils.CommaJoin(owned_groups)),
875 self._CalculateFileStorageDir()
877 if self.op.mode == constants.INSTANCE_IMPORT:
878 export_info = self._ReadExportInfo()
879 self._ReadExportParams(export_info)
880 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
882 self._old_instance_name = None
884 if (not self.cfg.GetVGName() and
885 self.op.disk_template not in constants.DTS_NOT_LVM):
886 raise errors.OpPrereqError("Cluster does not support lvm-based"
887 " instances", errors.ECODE_STATE)
889 if (self.op.hypervisor is None or
890 self.op.hypervisor == constants.VALUE_AUTO):
891 self.op.hypervisor = self.cfg.GetHypervisorType()
893 cluster = self.cfg.GetClusterInfo()
894 enabled_hvs = cluster.enabled_hypervisors
895 if self.op.hypervisor not in enabled_hvs:
896 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
898 (self.op.hypervisor, ",".join(enabled_hvs)),
902 for tag in self.op.tags:
903 objects.TaggableObject.ValidateTag(tag)
905 # check hypervisor parameter syntax (locally)
906 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
907 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
909 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
910 hv_type.CheckParameterSyntax(filled_hvp)
911 self.hv_full = filled_hvp
912 # check that we don't specify global parameters on an instance
913 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
914 "instance", "cluster")
916 # fill and remember the beparams dict
917 self.be_full = _ComputeFullBeParams(self.op, cluster)
919 # build os parameters
920 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
922 # now that hvp/bep are in final format, let's reset to defaults,
924 if self.op.identify_defaults:
925 self._RevertToDefaults(cluster)
928 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
931 # disk checks/pre-build
932 default_vg = self.cfg.GetVGName()
933 self.disks = ComputeDisks(self.op, default_vg)
935 if self.op.mode == constants.INSTANCE_IMPORT:
937 for idx in range(len(self.disks)):
938 option = "disk%d_dump" % idx
939 if export_info.has_option(constants.INISECT_INS, option):
940 # FIXME: are the old os-es, disk sizes, etc. useful?
941 export_name = export_info.get(constants.INISECT_INS, option)
942 image = utils.PathJoin(self.op.src_path, export_name)
943 disk_images.append(image)
945 disk_images.append(False)
947 self.src_images = disk_images
949 if self.op.instance_name == self._old_instance_name:
950 for idx, nic in enumerate(self.nics):
951 if nic.mac == constants.VALUE_AUTO:
952 nic_mac_ini = "nic%d_mac" % idx
953 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
955 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
957 # ip ping checks (we use the same ip that was resolved in ExpandNames)
959 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
960 raise errors.OpPrereqError("IP %s of instance %s already in use" %
961 (self.check_ip, self.op.instance_name),
962 errors.ECODE_NOTUNIQUE)
964 #### mac address generation
965 # By generating here the mac address both the allocator and the hooks get
966 # the real final mac address rather than the 'auto' or 'generate' value.
967 # There is a race condition between the generation and the instance object
968 # creation, which means that we know the mac is valid now, but we're not
969 # sure it will be when we actually add the instance. If things go bad
970 # adding the instance will abort because of a duplicate mac, and the
971 # creation job will fail.
972 for nic in self.nics:
973 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
974 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
978 if self.op.iallocator is not None:
981 # Release all unneeded node locks
982 keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
983 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
984 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
985 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
986 # Release all unneeded group locks
987 ReleaseLocks(self, locking.LEVEL_NODEGROUP,
988 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
990 assert (self.owned_locks(locking.LEVEL_NODE) ==
991 self.owned_locks(locking.LEVEL_NODE_RES)), \
992 "Node locks differ from node resource locks"
994 #### node related checks
997 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
998 assert self.pnode is not None, \
999 "Cannot retrieve locked node %s" % self.op.pnode
1001 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1002 pnode.name, errors.ECODE_STATE)
1004 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1005 pnode.name, errors.ECODE_STATE)
1006 if not pnode.vm_capable:
1007 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1008 " '%s'" % pnode.name, errors.ECODE_STATE)
1010 self.secondaries = []
1012 # Fill in any IPs from IP pools. This must happen here, because we need to
1013 # know the nic's primary node, as specified by the iallocator
1014 for idx, nic in enumerate(self.nics):
1015 net_uuid = nic.network
1016 if net_uuid is not None:
1017 nobj = self.cfg.GetNetwork(net_uuid)
1018 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
1019 if netparams is None:
1020 raise errors.OpPrereqError("No netparams found for network"
1021 " %s. Propably not connected to"
1022 " node's %s nodegroup" %
1023 (nobj.name, self.pnode.name),
1025 self.LogInfo("NIC/%d inherits netparams %s" %
1026 (idx, netparams.values()))
1027 nic.nicparams = dict(netparams)
1028 if nic.ip is not None:
1029 if nic.ip.lower() == constants.NIC_IP_POOL:
1031 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1032 except errors.ReservationError:
1033 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1034 " from the address pool" % idx,
1036 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1039 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1040 check=self.op.conflicts_check)
1041 except errors.ReservationError:
1042 raise errors.OpPrereqError("IP address %s already in use"
1043 " or does not belong to network %s" %
1044 (nic.ip, nobj.name),
1045 errors.ECODE_NOTUNIQUE)
1047 # net is None, ip None or given
1048 elif self.op.conflicts_check:
1049 _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1051 # mirror node verification
1052 if self.op.disk_template in constants.DTS_INT_MIRROR:
1053 if self.op.snode == pnode.name:
1054 raise errors.OpPrereqError("The secondary node cannot be the"
1055 " primary node", errors.ECODE_INVAL)
1056 CheckNodeOnline(self, self.op.snode)
1057 CheckNodeNotDrained(self, self.op.snode)
1058 CheckNodeVmCapable(self, self.op.snode)
1059 self.secondaries.append(self.op.snode)
1061 snode = self.cfg.GetNodeInfo(self.op.snode)
1062 if pnode.group != snode.group:
1063 self.LogWarning("The primary and secondary nodes are in two"
1064 " different node groups; the disk parameters"
1065 " from the first disk's node group will be"
1068 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1070 if self.op.disk_template in constants.DTS_INT_MIRROR:
1072 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1073 if compat.any(map(has_es, nodes)):
1074 raise errors.OpPrereqError("Disk template %s not supported with"
1075 " exclusive storage" % self.op.disk_template,
1078 nodenames = [pnode.name] + self.secondaries
1080 if not self.adopt_disks:
1081 if self.op.disk_template == constants.DT_RBD:
1082 # _CheckRADOSFreeSpace() is just a placeholder.
1083 # Any function that checks prerequisites can be placed here.
1084 # Check if there is enough space on the RADOS cluster.
1085 CheckRADOSFreeSpace()
1086 elif self.op.disk_template == constants.DT_EXT:
1087 # FIXME: Function that checks prereqs if needed
1090 # Check lv size requirements, if not adopting
1091 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1092 CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1094 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1095 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1096 disk[constants.IDISK_ADOPT])
1097 for disk in self.disks])
1098 if len(all_lvs) != len(self.disks):
1099 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1101 for lv_name in all_lvs:
1103 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1104 # to ReserveLV uses the same syntax
1105 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1106 except errors.ReservationError:
1107 raise errors.OpPrereqError("LV named %s used by another instance" %
1108 lv_name, errors.ECODE_NOTUNIQUE)
1110 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1111 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1113 node_lvs = self.rpc.call_lv_list([pnode.name],
1114 vg_names.payload.keys())[pnode.name]
1115 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1116 node_lvs = node_lvs.payload
1118 delta = all_lvs.difference(node_lvs.keys())
1120 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1121 utils.CommaJoin(delta),
1123 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1125 raise errors.OpPrereqError("Online logical volumes found, cannot"
1126 " adopt: %s" % utils.CommaJoin(online_lvs),
1128 # update the size of disk based on what is found
1129 for dsk in self.disks:
1130 dsk[constants.IDISK_SIZE] = \
1131 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1132 dsk[constants.IDISK_ADOPT])][0]))
1134 elif self.op.disk_template == constants.DT_BLOCK:
1135 # Normalize and de-duplicate device paths
1136 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1137 for disk in self.disks])
1138 if len(all_disks) != len(self.disks):
1139 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1141 baddisks = [d for d in all_disks
1142 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1144 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1145 " cannot be adopted" %
1146 (utils.CommaJoin(baddisks),
1147 constants.ADOPTABLE_BLOCKDEV_ROOT),
1150 node_disks = self.rpc.call_bdev_sizes([pnode.name],
1151 list(all_disks))[pnode.name]
1152 node_disks.Raise("Cannot get block device information from node %s" %
1154 node_disks = node_disks.payload
1155 delta = all_disks.difference(node_disks.keys())
1157 raise errors.OpPrereqError("Missing block device(s): %s" %
1158 utils.CommaJoin(delta),
1160 for dsk in self.disks:
1161 dsk[constants.IDISK_SIZE] = \
1162 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1164 # Verify instance specs
1165 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1167 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1168 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1169 constants.ISPEC_DISK_COUNT: len(self.disks),
1170 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1171 for disk in self.disks],
1172 constants.ISPEC_NIC_COUNT: len(self.nics),
1173 constants.ISPEC_SPINDLE_USE: spindle_use,
1176 group_info = self.cfg.GetNodeGroup(pnode.group)
1177 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1178 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1179 self.op.disk_template)
1180 if not self.op.ignore_ipolicy and res:
1181 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1182 (pnode.group, group_info.name, utils.CommaJoin(res)))
1183 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1185 CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1187 CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1188 # check OS parameters (remotely)
1189 CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1191 CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1193 #TODO: _CheckExtParams (remotely)
1194 # Check parameters for extstorage
1196 # memory check on primary node
1197 #TODO(dynmem): use MINMEM for checking
1199 CheckNodeFreeMemory(self, self.pnode.name,
1200 "creating instance %s" % self.op.instance_name,
1201 self.be_full[constants.BE_MAXMEM],
1204 self.dry_run_result = list(nodenames)
1206 def Exec(self, feedback_fn):
1207 """Create and add the instance to the cluster.
1210 instance = self.op.instance_name
1211 pnode_name = self.pnode.name
1213 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1214 self.owned_locks(locking.LEVEL_NODE)), \
1215 "Node locks differ from node resource locks"
1216 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1218 ht_kind = self.op.hypervisor
1219 if ht_kind in constants.HTS_REQ_PORT:
1220 network_port = self.cfg.AllocatePort()
1224 # This is ugly but we got a chicken-egg problem here
1225 # We can only take the group disk parameters, as the instance
1226 # has no disks yet (we are generating them right here).
1227 node = self.cfg.GetNodeInfo(pnode_name)
1228 nodegroup = self.cfg.GetNodeGroup(node.group)
1229 disks = GenerateDiskTemplate(self,
1230 self.op.disk_template,
1231 instance, pnode_name,
1234 self.instance_file_storage_dir,
1235 self.op.file_driver,
1238 self.cfg.GetGroupDiskParams(nodegroup))
1240 iobj = objects.Instance(name=instance, os=self.op.os_type,
1241 primary_node=pnode_name,
1242 nics=self.nics, disks=disks,
1243 disk_template=self.op.disk_template,
1245 admin_state=constants.ADMINST_DOWN,
1246 network_port=network_port,
1247 beparams=self.op.beparams,
1248 hvparams=self.op.hvparams,
1249 hypervisor=self.op.hypervisor,
1250 osparams=self.op.osparams,
1254 for tag in self.op.tags:
1257 if self.adopt_disks:
1258 if self.op.disk_template == constants.DT_PLAIN:
1259 # rename LVs to the newly-generated names; we need to construct
1260 # 'fake' LV disks with the old data, plus the new unique_id
1261 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1263 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1264 rename_to.append(t_dsk.logical_id)
1265 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1266 self.cfg.SetDiskID(t_dsk, pnode_name)
1267 result = self.rpc.call_blockdev_rename(pnode_name,
1268 zip(tmp_disks, rename_to))
1269 result.Raise("Failed to rename adoped LVs")
1271 feedback_fn("* creating instance disks...")
1273 CreateDisks(self, iobj)
1274 except errors.OpExecError:
1275 self.LogWarning("Device creation failed")
1276 self.cfg.ReleaseDRBDMinors(instance)
1279 feedback_fn("adding instance %s to cluster config" % instance)
1281 self.cfg.AddInstance(iobj, self.proc.GetECId())
1283 # Declare that we don't want to remove the instance lock anymore, as we've
1284 # added the instance to the config
1285 del self.remove_locks[locking.LEVEL_INSTANCE]
1287 if self.op.mode == constants.INSTANCE_IMPORT:
1288 # Release unused nodes
1289 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1292 ReleaseLocks(self, locking.LEVEL_NODE)
1295 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1296 feedback_fn("* wiping instance disks...")
1298 WipeDisks(self, iobj)
1299 except errors.OpExecError, err:
1300 logging.exception("Wiping disks failed")
1301 self.LogWarning("Wiping instance disks failed (%s)", err)
1305 # Something is already wrong with the disks, don't do anything else
1307 elif self.op.wait_for_sync:
1308 disk_abort = not WaitForSync(self, iobj)
1309 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1310 # make sure the disks are not degraded (still sync-ing is ok)
1311 feedback_fn("* checking mirrors status")
1312 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1317 RemoveDisks(self, iobj)
1318 self.cfg.RemoveInstance(iobj.name)
1319 # Make sure the instance lock gets removed
1320 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1321 raise errors.OpExecError("There are some degraded disks for"
1324 # instance disks are now active
1325 iobj.disks_active = True
1327 # Release all node resource locks
1328 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1330 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1331 # we need to set the disks ID to the primary node, since the
1332 # preceding code might or might have not done it, depending on
1333 # disk template and other options
1334 for disk in iobj.disks:
1335 self.cfg.SetDiskID(disk, pnode_name)
1336 if self.op.mode == constants.INSTANCE_CREATE:
1337 if not self.op.no_install:
1338 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1339 not self.op.wait_for_sync)
1341 feedback_fn("* pausing disk sync to install instance OS")
1342 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1345 for idx, success in enumerate(result.payload):
1347 logging.warn("pause-sync of instance %s for disk %d failed",
1350 feedback_fn("* running the instance OS create scripts...")
1351 # FIXME: pass debug option from opcode to backend
1353 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1354 self.op.debug_level)
1356 feedback_fn("* resuming disk sync")
1357 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1360 for idx, success in enumerate(result.payload):
1362 logging.warn("resume-sync of instance %s for disk %d failed",
1365 os_add_result.Raise("Could not add os for instance %s"
1366 " on node %s" % (instance, pnode_name))
1369 if self.op.mode == constants.INSTANCE_IMPORT:
1370 feedback_fn("* running the instance OS import scripts...")
1374 for idx, image in enumerate(self.src_images):
1378 # FIXME: pass debug option from opcode to backend
1379 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1380 constants.IEIO_FILE, (image, ),
1381 constants.IEIO_SCRIPT,
1382 (iobj.disks[idx], idx),
1384 transfers.append(dt)
1387 masterd.instance.TransferInstanceData(self, feedback_fn,
1388 self.op.src_node, pnode_name,
1389 self.pnode.secondary_ip,
1391 if not compat.all(import_result):
1392 self.LogWarning("Some disks for instance %s on node %s were not"
1393 " imported successfully" % (instance, pnode_name))
1395 rename_from = self._old_instance_name
1397 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1398 feedback_fn("* preparing remote import...")
1399 # The source cluster will stop the instance before attempting to make
1400 # a connection. In some cases stopping an instance can take a long
1401 # time, hence the shutdown timeout is added to the connection
1403 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1404 self.op.source_shutdown_timeout)
1405 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1407 assert iobj.primary_node == self.pnode.name
1409 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1410 self.source_x509_ca,
1411 self._cds, timeouts)
1412 if not compat.all(disk_results):
1413 # TODO: Should the instance still be started, even if some disks
1414 # failed to import (valid for local imports, too)?
1415 self.LogWarning("Some disks for instance %s on node %s were not"
1416 " imported successfully" % (instance, pnode_name))
1418 rename_from = self.source_instance_name
1421 # also checked in the prereq part
1422 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1425 # Run rename script on newly imported instance
1426 assert iobj.name == instance
1427 feedback_fn("Running rename script for %s" % instance)
1428 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1430 self.op.debug_level)
1432 self.LogWarning("Failed to run rename script for %s on node"
1433 " %s: %s" % (instance, pnode_name, result.fail_msg))
1435 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1438 iobj.admin_state = constants.ADMINST_UP
1439 self.cfg.Update(iobj, feedback_fn)
1440 logging.info("Starting instance %s on node %s", instance, pnode_name)
1441 feedback_fn("* starting instance...")
1442 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1443 False, self.op.reason)
1444 result.Raise("Could not start instance")
1446 return list(iobj.all_nodes)
1449 class LUInstanceRename(LogicalUnit):
1450 """Rename an instance.
1453 HPATH = "instance-rename"
1454 HTYPE = constants.HTYPE_INSTANCE
1456 def CheckArguments(self):
1460 if self.op.ip_check and not self.op.name_check:
1461 # TODO: make the ip check more flexible and not depend on the name check
1462 raise errors.OpPrereqError("IP address check requires a name check",
1465 def BuildHooksEnv(self):
1468 This runs on master, primary and secondary nodes of the instance.
1471 env = BuildInstanceHookEnvByObject(self, self.instance)
1472 env["INSTANCE_NEW_NAME"] = self.op.new_name
1475 def BuildHooksNodes(self):
1476 """Build hooks nodes.
1479 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1482 def CheckPrereq(self):
1483 """Check prerequisites.
1485 This checks that the instance is in the cluster and is not running.
1488 self.op.instance_name = ExpandInstanceName(self.cfg,
1489 self.op.instance_name)
1490 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1491 assert instance is not None
1492 CheckNodeOnline(self, instance.primary_node)
1493 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1494 msg="cannot rename")
1495 self.instance = instance
1497 new_name = self.op.new_name
1498 if self.op.name_check:
1499 hostname = _CheckHostnameSane(self, new_name)
1500 new_name = self.op.new_name = hostname.name
1501 if (self.op.ip_check and
1502 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1503 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1504 (hostname.ip, new_name),
1505 errors.ECODE_NOTUNIQUE)
1507 instance_list = self.cfg.GetInstanceList()
1508 if new_name in instance_list and new_name != instance.name:
1509 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1510 new_name, errors.ECODE_EXISTS)
1512 def Exec(self, feedback_fn):
1513 """Rename the instance.
1516 inst = self.instance
1517 old_name = inst.name
1519 rename_file_storage = False
1520 if (inst.disk_template in constants.DTS_FILEBASED and
1521 self.op.new_name != inst.name):
1522 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1523 rename_file_storage = True
1525 self.cfg.RenameInstance(inst.name, self.op.new_name)
1526 # Change the instance lock. This is definitely safe while we hold the BGL.
1527 # Otherwise the new lock would have to be added in acquired mode.
1529 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1530 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1531 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1533 # re-read the instance from the configuration after rename
1534 inst = self.cfg.GetInstanceInfo(self.op.new_name)
1536 if rename_file_storage:
1537 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1538 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1539 old_file_storage_dir,
1540 new_file_storage_dir)
1541 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1542 " (but the instance has been renamed in Ganeti)" %
1543 (inst.primary_node, old_file_storage_dir,
1544 new_file_storage_dir))
1546 StartInstanceDisks(self, inst, None)
1547 # update info on disks
1548 info = GetInstanceInfoText(inst)
1549 for (idx, disk) in enumerate(inst.disks):
1550 for node in inst.all_nodes:
1551 self.cfg.SetDiskID(disk, node)
1552 result = self.rpc.call_blockdev_setinfo(node, disk, info)
1554 self.LogWarning("Error setting info on node %s for disk %s: %s",
1555 node, idx, result.fail_msg)
1557 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1558 old_name, self.op.debug_level)
1559 msg = result.fail_msg
1561 msg = ("Could not run OS rename script for instance %s on node %s"
1562 " (but the instance has been renamed in Ganeti): %s" %
1563 (inst.name, inst.primary_node, msg))
1564 self.LogWarning(msg)
1566 ShutdownInstanceDisks(self, inst)
1571 class LUInstanceRemove(LogicalUnit):
1572 """Remove an instance.
1575 HPATH = "instance-remove"
1576 HTYPE = constants.HTYPE_INSTANCE
1579 def ExpandNames(self):
1580 self._ExpandAndLockInstance()
1581 self.needed_locks[locking.LEVEL_NODE] = []
1582 self.needed_locks[locking.LEVEL_NODE_RES] = []
1583 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1585 def DeclareLocks(self, level):
1586 if level == locking.LEVEL_NODE:
1587 self._LockInstancesNodes()
1588 elif level == locking.LEVEL_NODE_RES:
1590 self.needed_locks[locking.LEVEL_NODE_RES] = \
1591 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1593 def BuildHooksEnv(self):
1596 This runs on master, primary and secondary nodes of the instance.
1599 env = BuildInstanceHookEnvByObject(self, self.instance)
1600 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1603 def BuildHooksNodes(self):
1604 """Build hooks nodes.
1607 nl = [self.cfg.GetMasterNode()]
1608 nl_post = list(self.instance.all_nodes) + nl
1609 return (nl, nl_post)
1611 def CheckPrereq(self):
1612 """Check prerequisites.
1614 This checks that the instance is in the cluster.
1617 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1618 assert self.instance is not None, \
1619 "Cannot retrieve locked instance %s" % self.op.instance_name
1621 def Exec(self, feedback_fn):
1622 """Remove the instance.
1625 instance = self.instance
1626 logging.info("Shutting down instance %s on node %s",
1627 instance.name, instance.primary_node)
1629 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1630 self.op.shutdown_timeout,
1632 msg = result.fail_msg
1634 if self.op.ignore_failures:
1635 feedback_fn("Warning: can't shutdown instance: %s" % msg)
1637 raise errors.OpExecError("Could not shutdown instance %s on"
1639 (instance.name, instance.primary_node, msg))
1641 assert (self.owned_locks(locking.LEVEL_NODE) ==
1642 self.owned_locks(locking.LEVEL_NODE_RES))
1643 assert not (set(instance.all_nodes) -
1644 self.owned_locks(locking.LEVEL_NODE)), \
1645 "Not owning correct locks"
1647 RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1650 class LUInstanceMove(LogicalUnit):
1651 """Move an instance by data-copying.
1654 HPATH = "instance-move"
1655 HTYPE = constants.HTYPE_INSTANCE
1658 def ExpandNames(self):
1659 self._ExpandAndLockInstance()
1660 target_node = ExpandNodeName(self.cfg, self.op.target_node)
1661 self.op.target_node = target_node
1662 self.needed_locks[locking.LEVEL_NODE] = [target_node]
1663 self.needed_locks[locking.LEVEL_NODE_RES] = []
1664 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1666 def DeclareLocks(self, level):
1667 if level == locking.LEVEL_NODE:
1668 self._LockInstancesNodes(primary_only=True)
1669 elif level == locking.LEVEL_NODE_RES:
1671 self.needed_locks[locking.LEVEL_NODE_RES] = \
1672 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1674 def BuildHooksEnv(self):
1677 This runs on master, primary and secondary nodes of the instance.
1681 "TARGET_NODE": self.op.target_node,
1682 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1684 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1687 def BuildHooksNodes(self):
1688 """Build hooks nodes.
1692 self.cfg.GetMasterNode(),
1693 self.instance.primary_node,
1694 self.op.target_node,
1698 def CheckPrereq(self):
1699 """Check prerequisites.
1701 This checks that the instance is in the cluster.
1704 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1705 assert self.instance is not None, \
1706 "Cannot retrieve locked instance %s" % self.op.instance_name
1708 if instance.disk_template not in constants.DTS_COPYABLE:
1709 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1710 instance.disk_template, errors.ECODE_STATE)
1712 node = self.cfg.GetNodeInfo(self.op.target_node)
1713 assert node is not None, \
1714 "Cannot retrieve locked node %s" % self.op.target_node
1716 self.target_node = target_node = node.name
1718 if target_node == instance.primary_node:
1719 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1720 (instance.name, target_node),
1723 bep = self.cfg.GetClusterInfo().FillBE(instance)
1725 for idx, dsk in enumerate(instance.disks):
1726 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1727 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1728 " cannot copy" % idx, errors.ECODE_STATE)
1730 CheckNodeOnline(self, target_node)
1731 CheckNodeNotDrained(self, target_node)
1732 CheckNodeVmCapable(self, target_node)
1733 cluster = self.cfg.GetClusterInfo()
1734 group_info = self.cfg.GetNodeGroup(node.group)
1735 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1736 CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1737 ignore=self.op.ignore_ipolicy)
1739 if instance.admin_state == constants.ADMINST_UP:
1740 # check memory requirements on the secondary node
1741 CheckNodeFreeMemory(self, target_node,
1742 "failing over instance %s" %
1743 instance.name, bep[constants.BE_MAXMEM],
1744 instance.hypervisor)
1746 self.LogInfo("Not checking memory on the secondary node as"
1747 " instance will not be started")
1749 # check bridge existance
1750 CheckInstanceBridgesExist(self, instance, node=target_node)
1752 def Exec(self, feedback_fn):
1753 """Move an instance.
1755 The move is done by shutting it down on its present node, copying
1756 the data over (slow) and starting it on the new node.
1759 instance = self.instance
1761 source_node = instance.primary_node
1762 target_node = self.target_node
1764 self.LogInfo("Shutting down instance %s on source node %s",
1765 instance.name, source_node)
1767 assert (self.owned_locks(locking.LEVEL_NODE) ==
1768 self.owned_locks(locking.LEVEL_NODE_RES))
1770 result = self.rpc.call_instance_shutdown(source_node, instance,
1771 self.op.shutdown_timeout,
1773 msg = result.fail_msg
1775 if self.op.ignore_consistency:
1776 self.LogWarning("Could not shutdown instance %s on node %s."
1777 " Proceeding anyway. Please make sure node"
1778 " %s is down. Error details: %s",
1779 instance.name, source_node, source_node, msg)
1781 raise errors.OpExecError("Could not shutdown instance %s on"
1783 (instance.name, source_node, msg))
1785 # create the target disks
1787 CreateDisks(self, instance, target_node=target_node)
1788 except errors.OpExecError:
1789 self.LogWarning("Device creation failed")
1790 self.cfg.ReleaseDRBDMinors(instance.name)
1793 cluster_name = self.cfg.GetClusterInfo().cluster_name
1796 # activate, get path, copy the data over
1797 for idx, disk in enumerate(instance.disks):
1798 self.LogInfo("Copying data for disk %d", idx)
1799 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1800 instance.name, True, idx)
1802 self.LogWarning("Can't assemble newly created disk %d: %s",
1803 idx, result.fail_msg)
1804 errs.append(result.fail_msg)
1806 dev_path, _ = result.payload
1807 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1808 target_node, dev_path,
1811 self.LogWarning("Can't copy data over for disk %d: %s",
1812 idx, result.fail_msg)
1813 errs.append(result.fail_msg)
1817 self.LogWarning("Some disks failed to copy, aborting")
1819 RemoveDisks(self, instance, target_node=target_node)
1821 self.cfg.ReleaseDRBDMinors(instance.name)
1822 raise errors.OpExecError("Errors during disk copy: %s" %
1825 instance.primary_node = target_node
1826 self.cfg.Update(instance, feedback_fn)
1828 self.LogInfo("Removing the disks on the original node")
1829 RemoveDisks(self, instance, target_node=source_node)
1831 # Only start the instance if it's marked as up
1832 if instance.admin_state == constants.ADMINST_UP:
1833 self.LogInfo("Starting instance %s on node %s",
1834 instance.name, target_node)
1836 disks_ok, _ = AssembleInstanceDisks(self, instance,
1837 ignore_secondaries=True)
1839 ShutdownInstanceDisks(self, instance)
1840 raise errors.OpExecError("Can't activate the instance's disks")
1842 result = self.rpc.call_instance_start(target_node,
1843 (instance, None, None), False,
1845 msg = result.fail_msg
1847 ShutdownInstanceDisks(self, instance)
1848 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1849 (instance.name, target_node, msg))
1852 class LUInstanceMultiAlloc(NoHooksLU):
1853 """Allocates multiple instances at the same time.
1858 def CheckArguments(self):
1863 for inst in self.op.instances:
1864 if inst.iallocator is not None:
1865 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1866 " instance objects", errors.ECODE_INVAL)
1867 nodes.append(bool(inst.pnode))
1868 if inst.disk_template in constants.DTS_INT_MIRROR:
1869 nodes.append(bool(inst.snode))
1871 has_nodes = compat.any(nodes)
1872 if compat.all(nodes) ^ has_nodes:
1873 raise errors.OpPrereqError("There are instance objects providing"
1874 " pnode/snode while others do not",
1877 if not has_nodes and self.op.iallocator is None:
1878 default_iallocator = self.cfg.GetDefaultIAllocator()
1879 if default_iallocator:
1880 self.op.iallocator = default_iallocator
1882 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1883 " given and no cluster-wide default"
1884 " iallocator found; please specify either"
1885 " an iallocator or nodes on the instances"
1886 " or set a cluster-wide default iallocator",
1889 _CheckOpportunisticLocking(self.op)
1891 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1893 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1894 utils.CommaJoin(dups), errors.ECODE_INVAL)
1896 def ExpandNames(self):
1897 """Calculate the locks.
1900 self.share_locks = ShareAll()
1901 self.needed_locks = {
1902 # iallocator will select nodes and even if no iallocator is used,
1903 # collisions with LUInstanceCreate should be avoided
1904 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1907 if self.op.iallocator:
1908 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1909 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1911 if self.op.opportunistic_locking:
1912 self.opportunistic_locks[locking.LEVEL_NODE] = True
1915 for inst in self.op.instances:
1916 inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1917 nodeslist.append(inst.pnode)
1918 if inst.snode is not None:
1919 inst.snode = ExpandNodeName(self.cfg, inst.snode)
1920 nodeslist.append(inst.snode)
1922 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1923 # Lock resources of instance's primary and secondary nodes (copy to
1924 # prevent accidential modification)
1925 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1927 def DeclareLocks(self, level):
1928 if level == locking.LEVEL_NODE_RES and \
1929 self.opportunistic_locks[locking.LEVEL_NODE]:
1930 # Even when using opportunistic locking, we require the same set of
1931 # NODE_RES locks as we got NODE locks
1932 self.needed_locks[locking.LEVEL_NODE_RES] = \
1933 self.owned_locks(locking.LEVEL_NODE)
1935 def CheckPrereq(self):
1936 """Check prerequisite.
1939 if self.op.iallocator:
1940 cluster = self.cfg.GetClusterInfo()
1941 default_vg = self.cfg.GetVGName()
1942 ec_id = self.proc.GetECId()
1944 if self.op.opportunistic_locking:
1945 # Only consider nodes for which a lock is held
1946 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1948 node_whitelist = None
1950 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1951 _ComputeNics(op, cluster, None,
1953 _ComputeFullBeParams(op, cluster),
1955 for op in self.op.instances]
1957 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1958 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1960 ial.Run(self.op.iallocator)
1963 raise errors.OpPrereqError("Can't compute nodes using"
1964 " iallocator '%s': %s" %
1965 (self.op.iallocator, ial.info),
1968 self.ia_result = ial.result
1971 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1972 constants.JOB_IDS_KEY: [],
1975 def _ConstructPartialResult(self):
1976 """Contructs the partial result.
1979 if self.op.iallocator:
1980 (allocatable, failed_insts) = self.ia_result
1981 allocatable_insts = map(compat.fst, allocatable)
1983 allocatable_insts = [op.instance_name for op in self.op.instances]
1987 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
1988 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
1991 def Exec(self, feedback_fn):
1992 """Executes the opcode.
1996 if self.op.iallocator:
1997 op2inst = dict((op.instance_name, op) for op in self.op.instances)
1998 (allocatable, failed) = self.ia_result
2000 for (name, nodes) in allocatable:
2001 op = op2inst.pop(name)
2004 (op.pnode, op.snode) = nodes
2010 missing = set(op2inst.keys()) - set(failed)
2011 assert not missing, \
2012 "Iallocator did return incomplete result: %s" % \
2013 utils.CommaJoin(missing)
2015 jobs.extend([op] for op in self.op.instances)
2017 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2020 class _InstNicModPrivate:
2021 """Data structure for network interface modifications.
2023 Used by L{LUInstanceSetParams}.
2031 def _PrepareContainerMods(mods, private_fn):
2032 """Prepares a list of container modifications by adding a private data field.
2034 @type mods: list of tuples; (operation, index, parameters)
2035 @param mods: List of modifications
2036 @type private_fn: callable or None
2037 @param private_fn: Callable for constructing a private data field for a
2042 if private_fn is None:
2047 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2050 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
2051 """Checks if nodes have enough physical CPUs
2053 This function checks if all given nodes have the needed number of
2054 physical CPUs. In case any node has less CPUs or we cannot get the
2055 information from the node, this function raises an OpPrereqError
2058 @type lu: C{LogicalUnit}
2059 @param lu: a logical unit from which we get configuration data
2060 @type nodenames: C{list}
2061 @param nodenames: the list of node names to check
2062 @type requested: C{int}
2063 @param requested: the minimum acceptable number of physical CPUs
2064 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2065 or we cannot check the node
2068 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2069 for node in nodenames:
2070 info = nodeinfo[node]
2071 info.Raise("Cannot get current information from node %s" % node,
2072 prereq=True, ecode=errors.ECODE_ENVIRON)
2073 (_, _, (hv_info, )) = info.payload
2074 num_cpus = hv_info.get("cpu_total", None)
2075 if not isinstance(num_cpus, int):
2076 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2077 " on node %s, result was '%s'" %
2078 (node, num_cpus), errors.ECODE_ENVIRON)
2079 if requested > num_cpus:
2080 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2081 "required" % (node, num_cpus, requested),
2085 def GetItemFromContainer(identifier, kind, container):
2086 """Return the item refered by the identifier.
2088 @type identifier: string
2089 @param identifier: Item index or name or UUID
2091 @param kind: One-word item description
2092 @type container: list
2093 @param container: Container to get the item from
2098 idx = int(identifier)
2101 absidx = len(container) - 1
2103 raise IndexError("Not accepting negative indices other than -1")
2104 elif idx > len(container):
2105 raise IndexError("Got %s index %s, but there are only %s" %
2106 (kind, idx, len(container)))
2109 return (absidx, container[idx])
2113 for idx, item in enumerate(container):
2114 if item.uuid == identifier or item.name == identifier:
2117 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2118 (kind, identifier), errors.ECODE_NOENT)
2121 def _ApplyContainerMods(kind, container, chgdesc, mods,
2122 create_fn, modify_fn, remove_fn):
2123 """Applies descriptions in C{mods} to C{container}.
2126 @param kind: One-word item description
2127 @type container: list
2128 @param container: Container to modify
2129 @type chgdesc: None or list
2130 @param chgdesc: List of applied changes
2132 @param mods: Modifications as returned by L{_PrepareContainerMods}
2133 @type create_fn: callable
2134 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2135 receives absolute item index, parameters and private data object as added
2136 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2138 @type modify_fn: callable
2139 @param modify_fn: Callback for modifying an existing item
2140 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2141 and private data object as added by L{_PrepareContainerMods}, returns
2143 @type remove_fn: callable
2144 @param remove_fn: Callback on removing item; receives absolute item index,
2145 item and private data object as added by L{_PrepareContainerMods}
2148 for (op, identifier, params, private) in mods:
2151 if op == constants.DDM_ADD:
2152 # Calculate where item will be added
2153 # When adding an item, identifier can only be an index
2155 idx = int(identifier)
2157 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2158 " identifier for %s" % constants.DDM_ADD,
2161 addidx = len(container)
2164 raise IndexError("Not accepting negative indices other than -1")
2165 elif idx > len(container):
2166 raise IndexError("Got %s index %s, but there are only %s" %
2167 (kind, idx, len(container)))
2170 if create_fn is None:
2173 (item, changes) = create_fn(addidx, params, private)
2176 container.append(item)
2179 assert idx <= len(container)
2180 # list.insert does so before the specified index
2181 container.insert(idx, item)
2183 # Retrieve existing item
2184 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2186 if op == constants.DDM_REMOVE:
2189 changes = [("%s/%s" % (kind, absidx), "remove")]
2191 if remove_fn is not None:
2192 msg = remove_fn(absidx, item, private)
2194 changes.append(("%s/%s" % (kind, absidx), msg))
2196 assert container[absidx] == item
2197 del container[absidx]
2198 elif op == constants.DDM_MODIFY:
2199 if modify_fn is not None:
2200 changes = modify_fn(absidx, item, params, private)
2202 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2204 assert _TApplyContModsCbChanges(changes)
2206 if not (chgdesc is None or changes is None):
2207 chgdesc.extend(changes)
2210 def _UpdateIvNames(base_index, disks):
2211 """Updates the C{iv_name} attribute of disks.
2213 @type disks: list of L{objects.Disk}
2216 for (idx, disk) in enumerate(disks):
2217 disk.iv_name = "disk/%s" % (base_index + idx, )
2220 class LUInstanceSetParams(LogicalUnit):
2221 """Modifies an instances's parameters.
2224 HPATH = "instance-modify"
2225 HTYPE = constants.HTYPE_INSTANCE
2229 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2230 assert ht.TList(mods)
2231 assert not mods or len(mods[0]) in (2, 3)
2233 if mods and len(mods[0]) == 2:
2237 for op, params in mods:
2238 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2239 result.append((op, -1, params))
2243 raise errors.OpPrereqError("Only one %s add or remove operation is"
2244 " supported at a time" % kind,
2247 result.append((constants.DDM_MODIFY, op, params))
2249 assert verify_fn(result)
2256 def _CheckMods(kind, mods, key_types, item_fn):
2257 """Ensures requested disk/NIC modifications are valid.
2260 for (op, _, params) in mods:
2261 assert ht.TDict(params)
2263 # If 'key_types' is an empty dict, we assume we have an
2264 # 'ext' template and thus do not ForceDictType
2266 utils.ForceDictType(params, key_types)
2268 if op == constants.DDM_REMOVE:
2270 raise errors.OpPrereqError("No settings should be passed when"
2271 " removing a %s" % kind,
2273 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2276 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2278 def _VerifyDiskModification(self, op, params):
2279 """Verifies a disk modification.
2282 if op == constants.DDM_ADD:
2283 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2284 if mode not in constants.DISK_ACCESS_SET:
2285 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2288 size = params.get(constants.IDISK_SIZE, None)
2290 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2291 constants.IDISK_SIZE, errors.ECODE_INVAL)
2295 except (TypeError, ValueError), err:
2296 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2299 params[constants.IDISK_SIZE] = size
2300 name = params.get(constants.IDISK_NAME, None)
2301 if name is not None and name.lower() == constants.VALUE_NONE:
2302 params[constants.IDISK_NAME] = None
2304 elif op == constants.DDM_MODIFY:
2305 if constants.IDISK_SIZE in params:
2306 raise errors.OpPrereqError("Disk size change not possible, use"
2307 " grow-disk", errors.ECODE_INVAL)
2309 # Disk modification supports changing only the disk name and mode.
2310 # Changing arbitrary parameters is allowed only for ext disk template",
2311 if self.instance.disk_template != constants.DT_EXT:
2312 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2314 name = params.get(constants.IDISK_NAME, None)
2315 if name is not None and name.lower() == constants.VALUE_NONE:
2316 params[constants.IDISK_NAME] = None
2319 def _VerifyNicModification(op, params):
2320 """Verifies a network interface modification.
2323 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2324 ip = params.get(constants.INIC_IP, None)
2325 name = params.get(constants.INIC_NAME, None)
2326 req_net = params.get(constants.INIC_NETWORK, None)
2327 link = params.get(constants.NIC_LINK, None)
2328 mode = params.get(constants.NIC_MODE, None)
2329 if name is not None and name.lower() == constants.VALUE_NONE:
2330 params[constants.INIC_NAME] = None
2331 if req_net is not None:
2332 if req_net.lower() == constants.VALUE_NONE:
2333 params[constants.INIC_NETWORK] = None
2335 elif link is not None or mode is not None:
2336 raise errors.OpPrereqError("If network is given"
2337 " mode or link should not",
2340 if op == constants.DDM_ADD:
2341 macaddr = params.get(constants.INIC_MAC, None)
2343 params[constants.INIC_MAC] = constants.VALUE_AUTO
2346 if ip.lower() == constants.VALUE_NONE:
2347 params[constants.INIC_IP] = None
2349 if ip.lower() == constants.NIC_IP_POOL:
2350 if op == constants.DDM_ADD and req_net is None:
2351 raise errors.OpPrereqError("If ip=pool, parameter network"
2355 if not netutils.IPAddress.IsValid(ip):
2356 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2359 if constants.INIC_MAC in params:
2360 macaddr = params[constants.INIC_MAC]
2361 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2362 macaddr = utils.NormalizeAndValidateMac(macaddr)
2364 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2365 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2366 " modifying an existing NIC",
2369 def CheckArguments(self):
2370 if not (self.op.nics or self.op.disks or self.op.disk_template or
2371 self.op.hvparams or self.op.beparams or self.op.os_name or
2372 self.op.osparams or self.op.offline is not None or
2373 self.op.runtime_mem or self.op.pnode):
2374 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2376 if self.op.hvparams:
2377 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2378 "hypervisor", "instance", "cluster")
2380 self.op.disks = self._UpgradeDiskNicMods(
2381 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2382 self.op.nics = self._UpgradeDiskNicMods(
2383 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2385 if self.op.disks and self.op.disk_template is not None:
2386 raise errors.OpPrereqError("Disk template conversion and other disk"
2387 " changes not supported at the same time",
2390 if (self.op.disk_template and
2391 self.op.disk_template in constants.DTS_INT_MIRROR and
2392 self.op.remote_node is None):
2393 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2394 " one requires specifying a secondary node",
2397 # Check NIC modifications
2398 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2399 self._VerifyNicModification)
2402 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2404 def ExpandNames(self):
2405 self._ExpandAndLockInstance()
2406 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2407 # Can't even acquire node locks in shared mode as upcoming changes in
2408 # Ganeti 2.6 will start to modify the node object on disk conversion
2409 self.needed_locks[locking.LEVEL_NODE] = []
2410 self.needed_locks[locking.LEVEL_NODE_RES] = []
2411 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2412 # Look node group to look up the ipolicy
2413 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2415 def DeclareLocks(self, level):
2416 if level == locking.LEVEL_NODEGROUP:
2417 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2418 # Acquire locks for the instance's nodegroups optimistically. Needs
2419 # to be verified in CheckPrereq
2420 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2421 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2422 elif level == locking.LEVEL_NODE:
2423 self._LockInstancesNodes()
2424 if self.op.disk_template and self.op.remote_node:
2425 self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2426 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2427 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2429 self.needed_locks[locking.LEVEL_NODE_RES] = \
2430 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2432 def BuildHooksEnv(self):
2435 This runs on the master, primary and secondaries.
2439 if constants.BE_MINMEM in self.be_new:
2440 args["minmem"] = self.be_new[constants.BE_MINMEM]
2441 if constants.BE_MAXMEM in self.be_new:
2442 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2443 if constants.BE_VCPUS in self.be_new:
2444 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2445 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2446 # information at all.
2448 if self._new_nics is not None:
2451 for nic in self._new_nics:
2452 n = copy.deepcopy(nic)
2453 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2454 n.nicparams = nicparams
2455 nics.append(NICToTuple(self, n))
2459 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2460 if self.op.disk_template:
2461 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2462 if self.op.runtime_mem:
2463 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2467 def BuildHooksNodes(self):
2468 """Build hooks nodes.
2471 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2474 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2475 old_params, cluster, pnode):
2477 update_params_dict = dict([(key, params[key])
2478 for key in constants.NICS_PARAMETERS
2481 req_link = update_params_dict.get(constants.NIC_LINK, None)
2482 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2485 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2486 if new_net_uuid_or_name:
2487 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2488 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2491 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2494 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2496 raise errors.OpPrereqError("No netparams found for the network"
2497 " %s, probably not connected" %
2498 new_net_obj.name, errors.ECODE_INVAL)
2499 new_params = dict(netparams)
2501 new_params = GetUpdatedParams(old_params, update_params_dict)
2503 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2505 new_filled_params = cluster.SimpleFillNIC(new_params)
2506 objects.NIC.CheckParameterSyntax(new_filled_params)
2508 new_mode = new_filled_params[constants.NIC_MODE]
2509 if new_mode == constants.NIC_MODE_BRIDGED:
2510 bridge = new_filled_params[constants.NIC_LINK]
2511 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2513 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2515 self.warn.append(msg)
2517 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2519 elif new_mode == constants.NIC_MODE_ROUTED:
2520 ip = params.get(constants.INIC_IP, old_ip)
2522 elif new_mode == constants.NIC_MODE_OVS:
2523 # TODO: check OVS link
2524 self.LogInfo("OVS links are currently not checked for correctness")
2526 if constants.INIC_MAC in params:
2527 mac = params[constants.INIC_MAC]
2529 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2531 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2532 # otherwise generate the MAC address
2533 params[constants.INIC_MAC] = \
2534 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2536 # or validate/reserve the current one
2538 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2539 except errors.ReservationError:
2540 raise errors.OpPrereqError("MAC address '%s' already in use"
2541 " in cluster" % mac,
2542 errors.ECODE_NOTUNIQUE)
2543 elif new_net_uuid != old_net_uuid:
2545 def get_net_prefix(net_uuid):
2548 nobj = self.cfg.GetNetwork(net_uuid)
2549 mac_prefix = nobj.mac_prefix
2553 new_prefix = get_net_prefix(new_net_uuid)
2554 old_prefix = get_net_prefix(old_net_uuid)
2555 if old_prefix != new_prefix:
2556 params[constants.INIC_MAC] = \
2557 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2559 # if there is a change in (ip, network) tuple
2560 new_ip = params.get(constants.INIC_IP, old_ip)
2561 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2563 # if IP is pool then require a network and generate one IP
2564 if new_ip.lower() == constants.NIC_IP_POOL:
2567 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2568 except errors.ReservationError:
2569 raise errors.OpPrereqError("Unable to get a free IP"
2570 " from the address pool",
2572 self.LogInfo("Chose IP %s from network %s",
2575 params[constants.INIC_IP] = new_ip
2577 raise errors.OpPrereqError("ip=pool, but no network found",
2579 # Reserve new IP if in the new network if any
2582 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2583 check=self.op.conflicts_check)
2584 self.LogInfo("Reserving IP %s in network %s",
2585 new_ip, new_net_obj.name)
2586 except errors.ReservationError:
2587 raise errors.OpPrereqError("IP %s not available in network %s" %
2588 (new_ip, new_net_obj.name),
2589 errors.ECODE_NOTUNIQUE)
2590 # new network is None so check if new IP is a conflicting IP
2591 elif self.op.conflicts_check:
2592 _CheckForConflictingIp(self, new_ip, pnode)
2594 # release old IP if old network is not None
2595 if old_ip and old_net_uuid:
2597 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2598 except errors.AddressPoolError:
2599 logging.warning("Release IP %s not contained in network %s",
2600 old_ip, old_net_obj.name)
2602 # there are no changes in (ip, network) tuple and old network is not None
2603 elif (old_net_uuid is not None and
2604 (req_link is not None or req_mode is not None)):
2605 raise errors.OpPrereqError("Not allowed to change link or mode of"
2606 " a NIC that is connected to a network",
2609 private.params = new_params
2610 private.filled = new_filled_params
2612 def _PreCheckDiskTemplate(self, pnode_info):
2613 """CheckPrereq checks related to a new disk template."""
2614 # Arguments are passed to avoid configuration lookups
2615 instance = self.instance
2616 pnode = instance.primary_node
2617 cluster = self.cluster
2618 if instance.disk_template == self.op.disk_template:
2619 raise errors.OpPrereqError("Instance already has disk template %s" %
2620 instance.disk_template, errors.ECODE_INVAL)
2622 if (instance.disk_template,
2623 self.op.disk_template) not in self._DISK_CONVERSIONS:
2624 raise errors.OpPrereqError("Unsupported disk template conversion from"
2625 " %s to %s" % (instance.disk_template,
2626 self.op.disk_template),
2628 CheckInstanceState(self, instance, INSTANCE_DOWN,
2629 msg="cannot change disk template")
2630 if self.op.disk_template in constants.DTS_INT_MIRROR:
2631 if self.op.remote_node == pnode:
2632 raise errors.OpPrereqError("Given new secondary node %s is the same"
2633 " as the primary node of the instance" %
2634 self.op.remote_node, errors.ECODE_STATE)
2635 CheckNodeOnline(self, self.op.remote_node)
2636 CheckNodeNotDrained(self, self.op.remote_node)
2637 # FIXME: here we assume that the old instance type is DT_PLAIN
2638 assert instance.disk_template == constants.DT_PLAIN
2639 disks = [{constants.IDISK_SIZE: d.size,
2640 constants.IDISK_VG: d.logical_id[0]}
2641 for d in instance.disks]
2642 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2643 CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2645 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2646 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2647 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2649 CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2650 ignore=self.op.ignore_ipolicy)
2651 if pnode_info.group != snode_info.group:
2652 self.LogWarning("The primary and secondary nodes are in two"
2653 " different node groups; the disk parameters"
2654 " from the first disk's node group will be"
2657 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2658 # Make sure none of the nodes require exclusive storage
2659 nodes = [pnode_info]
2660 if self.op.disk_template in constants.DTS_INT_MIRROR:
2662 nodes.append(snode_info)
2663 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2664 if compat.any(map(has_es, nodes)):
2665 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2666 " storage is enabled" % (instance.disk_template,
2667 self.op.disk_template))
2668 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2670 # too many local variables
2671 # pylint: disable=R0914
2672 def CheckPrereq(self):
2673 """Check prerequisites.
2675 This only checks the instance list against the existing names.
2678 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2679 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2681 cluster = self.cluster = self.cfg.GetClusterInfo()
2682 assert self.instance is not None, \
2683 "Cannot retrieve locked instance %s" % self.op.instance_name
2685 pnode = instance.primary_node
2689 if (self.op.pnode is not None and self.op.pnode != pnode and
2691 # verify that the instance is not up
2692 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2693 instance.hypervisor)
2694 if instance_info.fail_msg:
2695 self.warn.append("Can't get instance runtime information: %s" %
2696 instance_info.fail_msg)
2697 elif instance_info.payload:
2698 raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2701 assert pnode in self.owned_locks(locking.LEVEL_NODE)
2702 nodelist = list(instance.all_nodes)
2703 pnode_info = self.cfg.GetNodeInfo(pnode)
2704 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2706 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2707 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2708 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2710 # dictionary with instance information after the modification
2713 # Check disk modifications. This is done here and not in CheckArguments
2714 # (as with NICs), because we need to know the instance's disk template
2715 if instance.disk_template == constants.DT_EXT:
2716 self._CheckMods("disk", self.op.disks, {},
2717 self._VerifyDiskModification)
2719 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2720 self._VerifyDiskModification)
2722 # Prepare disk/NIC modifications
2723 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2724 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2726 # Check the validity of the `provider' parameter
2727 if instance.disk_template in constants.DT_EXT:
2728 for mod in self.diskmod:
2729 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2730 if mod[0] == constants.DDM_ADD:
2731 if ext_provider is None:
2732 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2733 " '%s' missing, during disk add" %
2735 constants.IDISK_PROVIDER),
2737 elif mod[0] == constants.DDM_MODIFY:
2739 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2741 constants.IDISK_PROVIDER,
2744 for mod in self.diskmod:
2745 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2746 if ext_provider is not None:
2747 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2748 " instances of type '%s'" %
2749 (constants.IDISK_PROVIDER,
2753 if self.op.hotplug or self.op.hotplug_if_possible:
2754 result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2758 result.Raise("Hotplug is not possible: %s" % result.fail_msg,
2761 self.LogWarning(result.fail_msg)
2762 self.op.hotplug = False
2763 self.LogInfo("Modification will take place without hotplugging.")
2765 self.op.hotplug = True
2768 if self.op.os_name and not self.op.force:
2769 CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2770 self.op.force_variant)
2771 instance_os = self.op.os_name
2773 instance_os = instance.os
2775 assert not (self.op.disk_template and self.op.disks), \
2776 "Can't modify disk template and apply disk changes at the same time"
2778 if self.op.disk_template:
2779 self._PreCheckDiskTemplate(pnode_info)
2781 # hvparams processing
2782 if self.op.hvparams:
2783 hv_type = instance.hypervisor
2784 i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2785 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2786 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2789 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2790 CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2791 self.hv_proposed = self.hv_new = hv_new # the new actual values
2792 self.hv_inst = i_hvdict # the new dict (without defaults)
2794 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2796 self.hv_new = self.hv_inst = {}
2798 # beparams processing
2799 if self.op.beparams:
2800 i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2802 objects.UpgradeBeParams(i_bedict)
2803 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2804 be_new = cluster.SimpleFillBE(i_bedict)
2805 self.be_proposed = self.be_new = be_new # the new actual values
2806 self.be_inst = i_bedict # the new dict (without defaults)
2808 self.be_new = self.be_inst = {}
2809 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2810 be_old = cluster.FillBE(instance)
2812 # CPU param validation -- checking every time a parameter is
2813 # changed to cover all cases where either CPU mask or vcpus have
2815 if (constants.BE_VCPUS in self.be_proposed and
2816 constants.HV_CPU_MASK in self.hv_proposed):
2818 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2819 # Verify mask is consistent with number of vCPUs. Can skip this
2820 # test if only 1 entry in the CPU mask, which means same mask
2821 # is applied to all vCPUs.
2822 if (len(cpu_list) > 1 and
2823 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2824 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2826 (self.be_proposed[constants.BE_VCPUS],
2827 self.hv_proposed[constants.HV_CPU_MASK]),
2830 # Only perform this test if a new CPU mask is given
2831 if constants.HV_CPU_MASK in self.hv_new:
2832 # Calculate the largest CPU number requested
2833 max_requested_cpu = max(map(max, cpu_list))
2834 # Check that all of the instance's nodes have enough physical CPUs to
2835 # satisfy the requested CPU mask
2836 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2837 max_requested_cpu + 1, instance.hypervisor)
2839 # osparams processing
2840 if self.op.osparams:
2841 i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2842 CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2843 self.os_inst = i_osdict # the new dict (without defaults)
2847 #TODO(dynmem): do the appropriate check involving MINMEM
2848 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2849 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2850 mem_check_list = [pnode]
2851 if be_new[constants.BE_AUTO_BALANCE]:
2852 # either we changed auto_balance to yes or it was from before
2853 mem_check_list.extend(instance.secondary_nodes)
2854 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2855 instance.hypervisor)
2856 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2857 [instance.hypervisor], False)
2858 pninfo = nodeinfo[pnode]
2859 msg = pninfo.fail_msg
2861 # Assume the primary node is unreachable and go ahead
2862 self.warn.append("Can't get info from primary node %s: %s" %
2865 (_, _, (pnhvinfo, )) = pninfo.payload
2866 if not isinstance(pnhvinfo.get("memory_free", None), int):
2867 self.warn.append("Node data from primary node %s doesn't contain"
2868 " free memory information" % pnode)
2869 elif instance_info.fail_msg:
2870 self.warn.append("Can't get instance runtime information: %s" %
2871 instance_info.fail_msg)
2873 if instance_info.payload:
2874 current_mem = int(instance_info.payload["memory"])
2876 # Assume instance not running
2877 # (there is a slight race condition here, but it's not very
2878 # probable, and we have no other way to check)
2879 # TODO: Describe race condition
2881 #TODO(dynmem): do the appropriate check involving MINMEM
2882 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2883 pnhvinfo["memory_free"])
2885 raise errors.OpPrereqError("This change will prevent the instance"
2886 " from starting, due to %d MB of memory"
2887 " missing on its primary node" %
2888 miss_mem, errors.ECODE_NORES)
2890 if be_new[constants.BE_AUTO_BALANCE]:
2891 for node, nres in nodeinfo.items():
2892 if node not in instance.secondary_nodes:
2894 nres.Raise("Can't get info from secondary node %s" % node,
2895 prereq=True, ecode=errors.ECODE_STATE)
2896 (_, _, (nhvinfo, )) = nres.payload
2897 if not isinstance(nhvinfo.get("memory_free", None), int):
2898 raise errors.OpPrereqError("Secondary node %s didn't return free"
2899 " memory information" % node,
2901 #TODO(dynmem): do the appropriate check involving MINMEM
2902 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2903 raise errors.OpPrereqError("This change will prevent the instance"
2904 " from failover to its secondary node"
2905 " %s, due to not enough memory" % node,
2908 if self.op.runtime_mem:
2909 remote_info = self.rpc.call_instance_info(instance.primary_node,
2911 instance.hypervisor)
2912 remote_info.Raise("Error checking node %s" % instance.primary_node)
2913 if not remote_info.payload: # not running already
2914 raise errors.OpPrereqError("Instance %s is not running" %
2915 instance.name, errors.ECODE_STATE)
2917 current_memory = remote_info.payload["memory"]
2918 if (not self.op.force and
2919 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2920 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2921 raise errors.OpPrereqError("Instance %s must have memory between %d"
2922 " and %d MB of memory unless --force is"
2925 self.be_proposed[constants.BE_MINMEM],
2926 self.be_proposed[constants.BE_MAXMEM]),
2929 delta = self.op.runtime_mem - current_memory
2931 CheckNodeFreeMemory(self, instance.primary_node,
2932 "ballooning memory for instance %s" %
2933 instance.name, delta, instance.hypervisor)
2935 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2936 raise errors.OpPrereqError("Disk operations not supported for"
2937 " diskless instances", errors.ECODE_INVAL)
2939 def _PrepareNicCreate(_, params, private):
2940 self._PrepareNicModification(params, private, None, None,
2944 def _PrepareNicMod(_, nic, params, private):
2945 self._PrepareNicModification(params, private, nic.ip, nic.network,
2946 nic.nicparams, cluster, pnode)
2949 def _PrepareNicRemove(_, params, __):
2951 net = params.network
2952 if net is not None and ip is not None:
2953 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2955 # Verify NIC changes (operating on copy)
2956 nics = instance.nics[:]
2957 _ApplyContainerMods("NIC", nics, None, self.nicmod,
2958 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2959 if len(nics) > constants.MAX_NICS:
2960 raise errors.OpPrereqError("Instance has too many network interfaces"
2961 " (%d), cannot add more" % constants.MAX_NICS,
2964 def _PrepareDiskMod(_, disk, params, __):
2965 disk.name = params.get(constants.IDISK_NAME, None)
2967 # Verify disk changes (operating on a copy)
2968 disks = copy.deepcopy(instance.disks)
2969 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2970 _PrepareDiskMod, None)
2971 utils.ValidateDeviceNames("disk", disks)
2972 if len(disks) > constants.MAX_DISKS:
2973 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2974 " more" % constants.MAX_DISKS,
2976 disk_sizes = [disk.size for disk in instance.disks]
2977 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2978 self.diskmod if op == constants.DDM_ADD)
2979 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2980 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2982 if self.op.offline is not None and self.op.offline:
2983 CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2984 msg="can't change to offline")
2986 # Pre-compute NIC changes (necessary to use result in hooks)
2987 self._nic_chgdesc = []
2989 # Operate on copies as this is still in prereq
2990 nics = [nic.Copy() for nic in instance.nics]
2991 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2992 self._CreateNewNic, self._ApplyNicMods,
2994 # Verify that NIC names are unique and valid
2995 utils.ValidateDeviceNames("NIC", nics)
2996 self._new_nics = nics
2997 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2999 self._new_nics = None
3000 ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
3002 if not self.op.ignore_ipolicy:
3003 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
3006 # Fill ispec with backend parameters
3007 ispec[constants.ISPEC_SPINDLE_USE] = \
3008 self.be_new.get(constants.BE_SPINDLE_USE, None)
3009 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3012 # Copy ispec to verify parameters with min/max values separately
3013 if self.op.disk_template:
3014 new_disk_template = self.op.disk_template
3016 new_disk_template = instance.disk_template
3017 ispec_max = ispec.copy()
3018 ispec_max[constants.ISPEC_MEM_SIZE] = \
3019 self.be_new.get(constants.BE_MAXMEM, None)
3020 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3022 ispec_min = ispec.copy()
3023 ispec_min[constants.ISPEC_MEM_SIZE] = \
3024 self.be_new.get(constants.BE_MINMEM, None)
3025 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3028 if (res_max or res_min):
3029 # FIXME: Improve error message by including information about whether
3030 # the upper or lower limit of the parameter fails the ipolicy.
3031 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3032 (group_info, group_info.name,
3033 utils.CommaJoin(set(res_max + res_min))))
3034 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3036 def _ConvertPlainToDrbd(self, feedback_fn):
3037 """Converts an instance from plain to drbd.
3040 feedback_fn("Converting template to drbd")
3041 instance = self.instance
3042 pnode = instance.primary_node
3043 snode = self.op.remote_node
3045 assert instance.disk_template == constants.DT_PLAIN
3047 # create a fake disk info for _GenerateDiskTemplate
3048 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3049 constants.IDISK_VG: d.logical_id[0],
3050 constants.IDISK_NAME: d.name}
3051 for d in instance.disks]
3052 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3053 instance.name, pnode, [snode],
3054 disk_info, None, None, 0, feedback_fn,
3056 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3058 p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3059 s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3060 info = GetInstanceInfoText(instance)
3061 feedback_fn("Creating additional volumes...")
3062 # first, create the missing data and meta devices
3063 for disk in anno_disks:
3064 # unfortunately this is... not too nice
3065 CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3066 info, True, p_excl_stor)
3067 for child in disk.children:
3068 CreateSingleBlockDev(self, snode, instance, child, info, True,
3070 # at this stage, all new LVs have been created, we can rename the
3072 feedback_fn("Renaming original volumes...")
3073 rename_list = [(o, n.children[0].logical_id)
3074 for (o, n) in zip(instance.disks, new_disks)]
3075 result = self.rpc.call_blockdev_rename(pnode, rename_list)
3076 result.Raise("Failed to rename original LVs")
3078 feedback_fn("Initializing DRBD devices...")
3079 # all child devices are in place, we can now create the DRBD devices
3081 for disk in anno_disks:
3082 for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3083 f_create = node == pnode
3084 CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3086 except errors.GenericError, e:
3087 feedback_fn("Initializing of DRBD devices failed;"
3088 " renaming back original volumes...")
3089 for disk in new_disks:
3090 self.cfg.SetDiskID(disk, pnode)
3091 rename_back_list = [(n.children[0], o.logical_id)
3092 for (n, o) in zip(new_disks, instance.disks)]
3093 result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3094 result.Raise("Failed to rename LVs back after error %s" % str(e))
3097 # at this point, the instance has been modified
3098 instance.disk_template = constants.DT_DRBD8
3099 instance.disks = new_disks
3100 self.cfg.Update(instance, feedback_fn)
3102 # Release node locks while waiting for sync
3103 ReleaseLocks(self, locking.LEVEL_NODE)
3105 # disks are created, waiting for sync
3106 disk_abort = not WaitForSync(self, instance,
3107 oneshot=not self.op.wait_for_sync)
3109 raise errors.OpExecError("There are some degraded disks for"
3110 " this instance, please cleanup manually")
3112 # Node resource locks will be released by caller
3114 def _ConvertDrbdToPlain(self, feedback_fn):
3115 """Converts an instance from drbd to plain.
3118 instance = self.instance
3120 assert len(instance.secondary_nodes) == 1
3121 assert instance.disk_template == constants.DT_DRBD8
3123 pnode = instance.primary_node
3124 snode = instance.secondary_nodes[0]
3125 feedback_fn("Converting template to plain")
3127 old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3128 new_disks = [d.children[0] for d in instance.disks]
3130 # copy over size, mode and name
3131 for parent, child in zip(old_disks, new_disks):
3132 child.size = parent.size
3133 child.mode = parent.mode
3134 child.name = parent.name
3136 # this is a DRBD disk, return its port to the pool
3137 # NOTE: this must be done right before the call to cfg.Update!
3138 for disk in old_disks:
3139 tcp_port = disk.logical_id[2]
3140 self.cfg.AddTcpUdpPort(tcp_port)
3142 # update instance structure
3143 instance.disks = new_disks
3144 instance.disk_template = constants.DT_PLAIN
3145 _UpdateIvNames(0, instance.disks)
3146 self.cfg.Update(instance, feedback_fn)
3148 # Release locks in case removing disks takes a while
3149 ReleaseLocks(self, locking.LEVEL_NODE)
3151 feedback_fn("Removing volumes on the secondary node...")
3152 for disk in old_disks:
3153 self.cfg.SetDiskID(disk, snode)
3154 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3156 self.LogWarning("Could not remove block device %s on node %s,"
3157 " continuing anyway: %s", disk.iv_name, snode, msg)
3159 feedback_fn("Removing unneeded volumes on the primary node...")
3160 for idx, disk in enumerate(old_disks):
3161 meta = disk.children[1]
3162 self.cfg.SetDiskID(meta, pnode)
3163 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3165 self.LogWarning("Could not remove metadata for disk %d on node %s,"
3166 " continuing anyway: %s", idx, pnode, msg)
3168 def _HotplugDevice(self, action, dev_type, device, extra, seq):
3169 self.LogInfo("Trying to hotplug device...")
3171 result = self.rpc.call_hotplug_device(self.instance.primary_node,
3172 self.instance, action, dev_type,
3173 (device, self.instance),
3176 self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3177 self.LogInfo("Continuing execution..")
3180 self.LogInfo("Hotplug done.")
3184 def _CreateNewDisk(self, idx, params, _):
3185 """Creates a new disk.
3188 instance = self.instance
3191 if instance.disk_template in constants.DTS_FILEBASED:
3192 (file_driver, file_path) = instance.disks[0].logical_id
3193 file_path = os.path.dirname(file_path)
3195 file_driver = file_path = None
3198 GenerateDiskTemplate(self, instance.disk_template, instance.name,
3199 instance.primary_node, instance.secondary_nodes,
3200 [params], file_path, file_driver, idx,
3201 self.Log, self.diskparams)[0]
3203 new_disks = CreateDisks(self, instance, disks=[disk])
3205 if self.cluster.prealloc_wipe_disks:
3207 WipeOrCleanupDisks(self, instance,
3208 disks=[(idx, disk, 0)],
3213 "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3216 self.cfg.SetDiskID(disk, self.instance.primary_node)
3217 result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3218 (disk, self.instance),
3219 self.instance.name, True, idx)
3221 changes.append(("disk/%d" % idx, "assemble:failed"))
3222 self.LogWarning("Can't assemble newly created disk %d: %s",
3223 idx, result.fail_msg)
3225 _, link_name = result.payload
3226 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3227 constants.HOTPLUG_TARGET_DISK,
3228 disk, link_name, idx)
3229 changes.append(("disk/%d" % idx, msg))
3231 return (disk, changes)
3233 def _ModifyDisk(self, idx, disk, params, _):
3238 if constants.IDISK_MODE in params:
3239 disk.mode = params.get(constants.IDISK_MODE)
3240 changes.append(("disk.mode/%d" % idx, disk.mode))
3242 if constants.IDISK_NAME in params:
3243 disk.name = params.get(constants.IDISK_NAME)
3244 changes.append(("disk.name/%d" % idx, disk.name))
3246 # Modify arbitrary params in case instance template is ext
3247 for key, value in params.iteritems():
3248 if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3249 self.instance.disk_template == constants.DT_EXT):
3250 # stolen from GetUpdatedParams: default means reset/delete
3251 if value.lower() == constants.VALUE_DEFAULT:
3253 del disk.params[key]
3257 disk.params[key] = value
3258 changes.append(("disk.params:%s/%d" % (key, idx), value))
3262 def _RemoveDisk(self, idx, root, _):
3268 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3269 constants.HOTPLUG_TARGET_DISK,
3271 ShutdownInstanceDisks(self, self.instance, [root])
3273 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3274 for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3275 if self.op.keep_disks and disk.dev_type in constants.DT_EXT:
3277 self.cfg.SetDiskID(disk, node)
3278 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3280 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3281 " continuing anyway", idx, node, msg)
3283 # if this is a DRBD disk, return its port to the pool
3284 if root.dev_type in constants.LDS_DRBD:
3285 self.cfg.AddTcpUdpPort(root.logical_id[2])
3289 def _CreateNewNic(self, idx, params, private):
3290 """Creates data structure for a new network interface.
3293 mac = params[constants.INIC_MAC]
3294 ip = params.get(constants.INIC_IP, None)
3295 net = params.get(constants.INIC_NETWORK, None)
3296 name = params.get(constants.INIC_NAME, None)
3297 net_uuid = self.cfg.LookupNetwork(net)
3298 #TODO: not private.filled?? can a nic have no nicparams??
3299 nicparams = private.filled
3300 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3301 nicparams=nicparams)
3302 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3306 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3307 (mac, ip, private.filled[constants.NIC_MODE],
3308 private.filled[constants.NIC_LINK], net)),
3312 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3313 constants.HOTPLUG_TARGET_NIC,
3315 changes.append(("nic.%d" % idx, msg))
3317 return (nobj, changes)
3319 def _ApplyNicMods(self, idx, nic, params, private):
3320 """Modifies a network interface.
3325 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3327 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3328 setattr(nic, key, params[key])
3330 new_net = params.get(constants.INIC_NETWORK, nic.network)
3331 new_net_uuid = self.cfg.LookupNetwork(new_net)
3332 if new_net_uuid != nic.network:
3333 changes.append(("nic.network/%d" % idx, new_net))
3334 nic.network = new_net_uuid
3337 nic.nicparams = private.filled
3339 for (key, val) in nic.nicparams.items():
3340 changes.append(("nic.%s/%d" % (key, idx), val))
3343 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3344 constants.HOTPLUG_TARGET_NIC,
3346 changes.append(("nic/%d" % idx, msg))
3350 def _RemoveNic(self, idx, nic, _):
3352 return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3353 constants.HOTPLUG_TARGET_NIC,
3356 def Exec(self, feedback_fn):
3357 """Modifies an instance.
3359 All parameters take effect only at the next restart of the instance.
3362 # Process here the warnings from CheckPrereq, as we don't have a
3363 # feedback_fn there.
3364 # TODO: Replace with self.LogWarning
3365 for warn in self.warn:
3366 feedback_fn("WARNING: %s" % warn)
3368 assert ((self.op.disk_template is None) ^
3369 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3370 "Not owning any node resource locks"
3373 instance = self.instance
3377 instance.primary_node = self.op.pnode
3380 if self.op.runtime_mem:
3381 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3383 self.op.runtime_mem)
3384 rpcres.Raise("Cannot modify instance runtime memory")
3385 result.append(("runtime_memory", self.op.runtime_mem))
3387 # Apply disk changes
3388 _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3389 self._CreateNewDisk, self._ModifyDisk,
3391 _UpdateIvNames(0, instance.disks)
3393 if self.op.disk_template:
3395 check_nodes = set(instance.all_nodes)
3396 if self.op.remote_node:
3397 check_nodes.add(self.op.remote_node)
3398 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3399 owned = self.owned_locks(level)
3400 assert not (check_nodes - owned), \
3401 ("Not owning the correct locks, owning %r, expected at least %r" %
3402 (owned, check_nodes))
3404 r_shut = ShutdownInstanceDisks(self, instance)
3406 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3407 " proceed with disk template conversion")
3408 mode = (instance.disk_template, self.op.disk_template)
3410 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3412 self.cfg.ReleaseDRBDMinors(instance.name)
3414 result.append(("disk_template", self.op.disk_template))
3416 assert instance.disk_template == self.op.disk_template, \
3417 ("Expected disk template '%s', found '%s'" %
3418 (self.op.disk_template, instance.disk_template))
3420 # Release node and resource locks if there are any (they might already have
3421 # been released during disk conversion)
3422 ReleaseLocks(self, locking.LEVEL_NODE)
3423 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3426 if self._new_nics is not None:
3427 instance.nics = self._new_nics
3428 result.extend(self._nic_chgdesc)
3431 if self.op.hvparams:
3432 instance.hvparams = self.hv_inst
3433 for key, val in self.op.hvparams.iteritems():
3434 result.append(("hv/%s" % key, val))
3437 if self.op.beparams:
3438 instance.beparams = self.be_inst
3439 for key, val in self.op.beparams.iteritems():
3440 result.append(("be/%s" % key, val))
3444 instance.os = self.op.os_name
3447 if self.op.osparams:
3448 instance.osparams = self.os_inst
3449 for key, val in self.op.osparams.iteritems():
3450 result.append(("os/%s" % key, val))
3452 if self.op.offline is None:
3455 elif self.op.offline:
3456 # Mark instance as offline
3457 self.cfg.MarkInstanceOffline(instance.name)
3458 result.append(("admin_state", constants.ADMINST_OFFLINE))
3460 # Mark instance as online, but stopped
3461 self.cfg.MarkInstanceDown(instance.name)
3462 result.append(("admin_state", constants.ADMINST_DOWN))
3464 self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3466 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3467 self.owned_locks(locking.LEVEL_NODE)), \
3468 "All node locks should have been released by now"
3472 _DISK_CONVERSIONS = {
3473 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3474 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3478 class LUInstanceChangeGroup(LogicalUnit):
3479 HPATH = "instance-change-group"
3480 HTYPE = constants.HTYPE_INSTANCE
3483 def ExpandNames(self):
3484 self.share_locks = ShareAll()
3486 self.needed_locks = {
3487 locking.LEVEL_NODEGROUP: [],
3488 locking.LEVEL_NODE: [],
3489 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3492 self._ExpandAndLockInstance()
3494 if self.op.target_groups:
3495 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3496 self.op.target_groups)
3498 self.req_target_uuids = None
3500 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3502 def DeclareLocks(self, level):
3503 if level == locking.LEVEL_NODEGROUP:
3504 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3506 if self.req_target_uuids:
3507 lock_groups = set(self.req_target_uuids)
3509 # Lock all groups used by instance optimistically; this requires going
3510 # via the node before it's locked, requiring verification later on
3511 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3512 lock_groups.update(instance_groups)
3514 # No target groups, need to lock all of them
3515 lock_groups = locking.ALL_SET
3517 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3519 elif level == locking.LEVEL_NODE:
3520 if self.req_target_uuids:
3521 # Lock all nodes used by instances
3522 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3523 self._LockInstancesNodes()
3525 # Lock all nodes in all potential target groups
3526 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3527 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3528 member_nodes = [node_name
3529 for group in lock_groups
3530 for node_name in self.cfg.GetNodeGroup(group).members]
3531 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3533 # Lock all nodes as all groups are potential targets
3534 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3536 def CheckPrereq(self):
3537 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3538 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3539 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3541 assert (self.req_target_uuids is None or
3542 owned_groups.issuperset(self.req_target_uuids))
3543 assert owned_instances == set([self.op.instance_name])
3545 # Get instance information
3546 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3548 # Check if node groups for locked instance are still correct
3549 assert owned_nodes.issuperset(self.instance.all_nodes), \
3550 ("Instance %s's nodes changed while we kept the lock" %
3551 self.op.instance_name)
3553 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3556 if self.req_target_uuids:
3557 # User requested specific target groups
3558 self.target_uuids = frozenset(self.req_target_uuids)
3560 # All groups except those used by the instance are potential targets
3561 self.target_uuids = owned_groups - inst_groups
3563 conflicting_groups = self.target_uuids & inst_groups
3564 if conflicting_groups:
3565 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3566 " used by the instance '%s'" %
3567 (utils.CommaJoin(conflicting_groups),
3568 self.op.instance_name),
3571 if not self.target_uuids:
3572 raise errors.OpPrereqError("There are no possible target groups",
3575 def BuildHooksEnv(self):
3579 assert self.target_uuids
3582 "TARGET_GROUPS": " ".join(self.target_uuids),
3585 env.update(BuildInstanceHookEnvByObject(self, self.instance))
3589 def BuildHooksNodes(self):
3590 """Build hooks nodes.
3593 mn = self.cfg.GetMasterNode()
3596 def Exec(self, feedback_fn):
3597 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3599 assert instances == [self.op.instance_name], "Instance not locked"
3601 req = iallocator.IAReqGroupChange(instances=instances,
3602 target_groups=list(self.target_uuids))
3603 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3605 ial.Run(self.op.iallocator)
3608 raise errors.OpPrereqError("Can't compute solution for changing group of"
3609 " instance '%s' using iallocator '%s': %s" %
3610 (self.op.instance_name, self.op.iallocator,
3611 ial.info), errors.ECODE_NORES)
3613 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3615 self.LogInfo("Iallocator returned %s job(s) for changing group of"
3616 " instance '%s'", len(jobs), self.op.instance_name)
3618 return ResultWithJobs(jobs)