4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Logical units dealing with instances."""
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58 CheckSpindlesExclusiveStorage
59 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66 import ganeti.masterd.instance
69 #: Type description for changes as returned by L{_ApplyContainerMods}'s
71 _TApplyContModsCbChanges = \
72 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
78 def _CheckHostnameSane(lu, name):
79 """Ensures that a given hostname resolves to a 'sane' name.
81 The given name is required to be a prefix of the resolved hostname,
82 to prevent accidental mismatches.
84 @param lu: the logical unit on behalf of which we're checking
85 @param name: the name we should resolve and check
86 @return: the resolved hostname object
89 hostname = netutils.GetHostname(name=name)
90 if hostname.name != name:
91 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92 if not utils.MatchNameComponent(name, [hostname.name]):
93 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94 " same as given hostname '%s'") %
95 (hostname.name, name), errors.ECODE_INVAL)
99 def _CheckOpportunisticLocking(op):
100 """Generate error if opportunistic locking is not possible.
103 if op.opportunistic_locking and not op.iallocator:
104 raise errors.OpPrereqError("Opportunistic locking is only available in"
105 " combination with an instance allocator",
109 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110 """Wrapper around IAReqInstanceAlloc.
112 @param op: The instance opcode
113 @param disks: The computed disks
114 @param nics: The computed nics
115 @param beparams: The full filled beparams
116 @param node_name_whitelist: List of nodes which should appear as online to the
117 allocator (unless the node is already marked offline)
119 @returns: A filled L{iallocator.IAReqInstanceAlloc}
122 spindle_use = beparams[constants.BE_SPINDLE_USE]
123 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124 disk_template=op.disk_template,
127 vcpus=beparams[constants.BE_VCPUS],
128 memory=beparams[constants.BE_MAXMEM],
129 spindle_use=spindle_use,
131 nics=[n.ToDict() for n in nics],
132 hypervisor=op.hypervisor,
133 node_whitelist=node_name_whitelist)
136 def _ComputeFullBeParams(op, cluster):
137 """Computes the full beparams.
139 @param op: The instance opcode
140 @param cluster: The cluster config object
142 @return: The fully filled beparams
145 default_beparams = cluster.beparams[constants.PP_DEFAULT]
146 for param, value in op.beparams.iteritems():
147 if value == constants.VALUE_AUTO:
148 op.beparams[param] = default_beparams[param]
149 objects.UpgradeBeParams(op.beparams)
150 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151 return cluster.SimpleFillBE(op.beparams)
154 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155 """Computes the nics.
157 @param op: The instance opcode
158 @param cluster: Cluster configuration object
159 @param default_ip: The default ip to assign
160 @param cfg: An instance of the configuration object
161 @param ec_id: Execution context ID
163 @returns: The build up nics
168 nic_mode_req = nic.get(constants.INIC_MODE, None)
169 nic_mode = nic_mode_req
170 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173 net = nic.get(constants.INIC_NETWORK, None)
174 link = nic.get(constants.NIC_LINK, None)
175 ip = nic.get(constants.INIC_IP, None)
177 if net is None or net.lower() == constants.VALUE_NONE:
180 if nic_mode_req is not None or link is not None:
181 raise errors.OpPrereqError("If network is given, no mode or link"
182 " is allowed to be passed",
186 if ip is None or ip.lower() == constants.VALUE_NONE:
188 elif ip.lower() == constants.VALUE_AUTO:
189 if not op.name_check:
190 raise errors.OpPrereqError("IP address set to auto but name checks"
191 " have been skipped",
195 # We defer pool operations until later, so that the iallocator has
196 # filled in the instance's node(s) dimara
197 if ip.lower() == constants.NIC_IP_POOL:
199 raise errors.OpPrereqError("if ip=pool, parameter network"
200 " must be passed too",
203 elif not netutils.IPAddress.IsValid(ip):
204 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
209 # TODO: check the ip address for uniqueness
210 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
211 raise errors.OpPrereqError("Routed nic mode requires an ip address",
214 # MAC address verification
215 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
216 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
217 mac = utils.NormalizeAndValidateMac(mac)
220 # TODO: We need to factor this out
221 cfg.ReserveMAC(mac, ec_id)
222 except errors.ReservationError:
223 raise errors.OpPrereqError("MAC address %s already in use"
225 errors.ECODE_NOTUNIQUE)
227 # Build nic parameters
230 nicparams[constants.NIC_MODE] = nic_mode
232 nicparams[constants.NIC_LINK] = link
234 check_params = cluster.SimpleFillNIC(nicparams)
235 objects.NIC.CheckParameterSyntax(check_params)
236 net_uuid = cfg.LookupNetwork(net)
237 name = nic.get(constants.INIC_NAME, None)
238 if name is not None and name.lower() == constants.VALUE_NONE:
240 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
241 network=net_uuid, nicparams=nicparams)
242 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
248 def _CheckForConflictingIp(lu, ip, node_uuid):
249 """In case of conflicting IP address raise error.
252 @param ip: IP address
253 @type node_uuid: string
254 @param node_uuid: node UUID
257 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
258 if conf_net is not None:
259 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
260 " network %s, but the target NIC does not." %
267 def _ComputeIPolicyInstanceSpecViolation(
268 ipolicy, instance_spec, disk_template,
269 _compute_fn=ComputeIPolicySpecViolation):
270 """Compute if instance specs meets the specs of ipolicy.
273 @param ipolicy: The ipolicy to verify against
274 @param instance_spec: dict
275 @param instance_spec: The instance spec to verify
276 @type disk_template: string
277 @param disk_template: the disk template of the instance
278 @param _compute_fn: The function to verify ipolicy (unittest only)
279 @see: L{ComputeIPolicySpecViolation}
282 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
283 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
284 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
285 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
286 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
287 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
289 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
290 disk_sizes, spindle_use, disk_template)
293 def _CheckOSVariant(os_obj, name):
294 """Check whether an OS name conforms to the os variants specification.
296 @type os_obj: L{objects.OS}
297 @param os_obj: OS object to check
299 @param name: OS name passed by the user, to check for validity
302 variant = objects.OS.GetVariant(name)
303 if not os_obj.supported_variants:
305 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
306 " passed)" % (os_obj.name, variant),
310 raise errors.OpPrereqError("OS name must include a variant",
313 if variant not in os_obj.supported_variants:
314 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
317 class LUInstanceCreate(LogicalUnit):
318 """Create an instance.
321 HPATH = "instance-add"
322 HTYPE = constants.HTYPE_INSTANCE
325 def CheckArguments(self):
329 # do not require name_check to ease forward/backward compatibility
331 if self.op.no_install and self.op.start:
332 self.LogInfo("No-installation mode selected, disabling startup")
333 self.op.start = False
334 # validate/normalize the instance name
335 self.op.instance_name = \
336 netutils.Hostname.GetNormalizedName(self.op.instance_name)
338 if self.op.ip_check and not self.op.name_check:
339 # TODO: make the ip check more flexible and not depend on the name check
340 raise errors.OpPrereqError("Cannot do IP address check without a name"
341 " check", errors.ECODE_INVAL)
343 # check nics' parameter names
344 for nic in self.op.nics:
345 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
346 # check that NIC's parameters names are unique and valid
347 utils.ValidateDeviceNames("NIC", self.op.nics)
349 # check that disk's names are unique and valid
350 utils.ValidateDeviceNames("disk", self.op.disks)
352 cluster = self.cfg.GetClusterInfo()
353 if not self.op.disk_template in cluster.enabled_disk_templates:
354 raise errors.OpPrereqError("Cannot create an instance with disk template"
355 " '%s', because it is not enabled in the"
356 " cluster. Enabled disk templates are: %s." %
357 (self.op.disk_template,
358 ",".join(cluster.enabled_disk_templates)))
360 # check disks. parameter names and consistent adopt/no-adopt strategy
361 has_adopt = has_no_adopt = False
362 for disk in self.op.disks:
363 if self.op.disk_template != constants.DT_EXT:
364 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
365 if constants.IDISK_ADOPT in disk:
369 if has_adopt and has_no_adopt:
370 raise errors.OpPrereqError("Either all disks are adopted or none is",
373 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
374 raise errors.OpPrereqError("Disk adoption is not supported for the"
375 " '%s' disk template" %
376 self.op.disk_template,
378 if self.op.iallocator is not None:
379 raise errors.OpPrereqError("Disk adoption not allowed with an"
380 " iallocator script", errors.ECODE_INVAL)
381 if self.op.mode == constants.INSTANCE_IMPORT:
382 raise errors.OpPrereqError("Disk adoption not allowed for"
383 " instance import", errors.ECODE_INVAL)
385 if self.op.disk_template in constants.DTS_MUST_ADOPT:
386 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
387 " but no 'adopt' parameter given" %
388 self.op.disk_template,
391 self.adopt_disks = has_adopt
393 # instance name verification
394 if self.op.name_check:
395 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
396 self.op.instance_name = self.hostname1.name
397 # used in CheckPrereq for ip ping check
398 self.check_ip = self.hostname1.ip
402 # file storage checks
403 if (self.op.file_driver and
404 not self.op.file_driver in constants.FILE_DRIVER):
405 raise errors.OpPrereqError("Invalid file driver name '%s'" %
406 self.op.file_driver, errors.ECODE_INVAL)
408 if self.op.disk_template == constants.DT_FILE:
409 opcodes.RequireFileStorage()
410 elif self.op.disk_template == constants.DT_SHARED_FILE:
411 opcodes.RequireSharedFileStorage()
413 ### Node/iallocator related checks
414 CheckIAllocatorOrNode(self, "iallocator", "pnode")
416 if self.op.pnode is not None:
417 if self.op.disk_template in constants.DTS_INT_MIRROR:
418 if self.op.snode is None:
419 raise errors.OpPrereqError("The networked disk templates need"
420 " a mirror node", errors.ECODE_INVAL)
422 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
426 _CheckOpportunisticLocking(self.op)
428 self._cds = GetClusterDomainSecret()
430 if self.op.mode == constants.INSTANCE_IMPORT:
431 # On import force_variant must be True, because if we forced it at
432 # initial install, our only chance when importing it back is that it
434 self.op.force_variant = True
436 if self.op.no_install:
437 self.LogInfo("No-installation mode has no effect during import")
439 elif self.op.mode == constants.INSTANCE_CREATE:
440 if self.op.os_type is None:
441 raise errors.OpPrereqError("No guest OS specified",
443 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
444 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
445 " installation" % self.op.os_type,
447 if self.op.disk_template is None:
448 raise errors.OpPrereqError("No disk template specified",
451 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
452 # Check handshake to ensure both clusters have the same domain secret
453 src_handshake = self.op.source_handshake
454 if not src_handshake:
455 raise errors.OpPrereqError("Missing source handshake",
458 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
461 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
464 # Load and check source CA
465 self.source_x509_ca_pem = self.op.source_x509_ca
466 if not self.source_x509_ca_pem:
467 raise errors.OpPrereqError("Missing source X509 CA",
471 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
473 except OpenSSL.crypto.Error, err:
474 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
475 (err, ), errors.ECODE_INVAL)
477 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
478 if errcode is not None:
479 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
482 self.source_x509_ca = cert
484 src_instance_name = self.op.source_instance_name
485 if not src_instance_name:
486 raise errors.OpPrereqError("Missing source instance name",
489 self.source_instance_name = \
490 netutils.GetHostname(name=src_instance_name).name
493 raise errors.OpPrereqError("Invalid instance creation mode %r" %
494 self.op.mode, errors.ECODE_INVAL)
496 def ExpandNames(self):
497 """ExpandNames for CreateInstance.
499 Figure out the right locks for instance creation.
502 self.needed_locks = {}
504 # this is just a preventive check, but someone might still add this
505 # instance in the meantime, and creation will fail at lock-add time
506 if self.op.instance_name in self.cfg.GetInstanceList():
507 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
508 self.op.instance_name, errors.ECODE_EXISTS)
510 self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
512 if self.op.iallocator:
513 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
514 # specifying a group on instance creation and then selecting nodes from
516 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
517 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
519 if self.op.opportunistic_locking:
520 self.opportunistic_locks[locking.LEVEL_NODE] = True
521 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
523 (self.op.pnode_uuid, self.op.pnode) = \
524 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
525 nodelist = [self.op.pnode_uuid]
526 if self.op.snode is not None:
527 (self.op.snode_uuid, self.op.snode) = \
528 ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
529 nodelist.append(self.op.snode_uuid)
530 self.needed_locks[locking.LEVEL_NODE] = nodelist
532 # in case of import lock the source node too
533 if self.op.mode == constants.INSTANCE_IMPORT:
534 src_node = self.op.src_node
535 src_path = self.op.src_path
538 self.op.src_path = src_path = self.op.instance_name
541 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
542 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
543 self.op.src_node = None
544 if os.path.isabs(src_path):
545 raise errors.OpPrereqError("Importing an instance from a path"
546 " requires a source node option",
549 (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
550 ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
551 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
552 self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
553 if not os.path.isabs(src_path):
554 self.op.src_path = src_path = \
555 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
557 self.needed_locks[locking.LEVEL_NODE_RES] = \
558 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
560 def _RunAllocator(self):
561 """Run the allocator based on input opcode.
564 if self.op.opportunistic_locking:
565 # Only consider nodes for which a lock is held
566 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
568 node_whitelist = None
570 #TODO Export network to iallocator so that it chooses a pnode
571 # in a nodegroup that has the desired network connected to
572 req = _CreateInstanceAllocRequest(self.op, self.disks,
573 self.nics, self.be_full,
574 self.cfg.GetNodeNames(node_whitelist))
575 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
577 ial.Run(self.op.iallocator)
580 # When opportunistic locks are used only a temporary failure is generated
581 if self.op.opportunistic_locking:
582 ecode = errors.ECODE_TEMP_NORES
584 ecode = errors.ECODE_NORES
586 raise errors.OpPrereqError("Can't compute nodes using"
587 " iallocator '%s': %s" %
588 (self.op.iallocator, ial.info),
591 (self.op.pnode_uuid, self.op.pnode) = \
592 ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
593 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
594 self.op.instance_name, self.op.iallocator,
595 utils.CommaJoin(ial.result))
597 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
599 if req.RequiredNodes() == 2:
600 (self.op.snode_uuid, self.op.snode) = \
601 ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
603 def BuildHooksEnv(self):
606 This runs on master, primary and secondary nodes of the instance.
610 "ADD_MODE": self.op.mode,
612 if self.op.mode == constants.INSTANCE_IMPORT:
613 env["SRC_NODE"] = self.op.src_node
614 env["SRC_PATH"] = self.op.src_path
615 env["SRC_IMAGES"] = self.src_images
617 env.update(BuildInstanceHookEnv(
618 name=self.op.instance_name,
619 primary_node_name=self.op.pnode,
620 secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
621 status=self.op.start,
622 os_type=self.op.os_type,
623 minmem=self.be_full[constants.BE_MINMEM],
624 maxmem=self.be_full[constants.BE_MAXMEM],
625 vcpus=self.be_full[constants.BE_VCPUS],
626 nics=NICListToTuple(self, self.nics),
627 disk_template=self.op.disk_template,
628 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
629 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
630 for d in self.disks],
633 hypervisor_name=self.op.hypervisor,
639 def BuildHooksNodes(self):
640 """Build hooks nodes.
643 nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
646 def _ReadExportInfo(self):
647 """Reads the export information from disk.
649 It will override the opcode source node and path with the actual
650 information, if these two were not specified before.
652 @return: the export information
655 assert self.op.mode == constants.INSTANCE_IMPORT
657 if self.op.src_node_uuid is None:
658 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
659 exp_list = self.rpc.call_export_list(locked_nodes)
661 for node in exp_list:
662 if exp_list[node].fail_msg:
664 if self.op.src_path in exp_list[node].payload:
666 self.op.src_node = node
667 self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
668 self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
672 raise errors.OpPrereqError("No export found for relative path %s" %
673 self.op.src_path, errors.ECODE_INVAL)
675 CheckNodeOnline(self, self.op.src_node_uuid)
676 result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
677 result.Raise("No export or invalid export found in dir %s" %
680 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
681 if not export_info.has_section(constants.INISECT_EXP):
682 raise errors.ProgrammerError("Corrupted export config",
683 errors.ECODE_ENVIRON)
685 ei_version = export_info.get(constants.INISECT_EXP, "version")
686 if int(ei_version) != constants.EXPORT_VERSION:
687 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
688 (ei_version, constants.EXPORT_VERSION),
689 errors.ECODE_ENVIRON)
692 def _ReadExportParams(self, einfo):
693 """Use export parameters as defaults.
695 In case the opcode doesn't specify (as in override) some instance
696 parameters, then try to use them from the export information, if
700 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
702 if self.op.disk_template is None:
703 if einfo.has_option(constants.INISECT_INS, "disk_template"):
704 self.op.disk_template = einfo.get(constants.INISECT_INS,
706 if self.op.disk_template not in constants.DISK_TEMPLATES:
707 raise errors.OpPrereqError("Disk template specified in configuration"
708 " file is not one of the allowed values:"
710 " ".join(constants.DISK_TEMPLATES),
713 raise errors.OpPrereqError("No disk template specified and the export"
714 " is missing the disk_template information",
717 if not self.op.disks:
719 # TODO: import the disk iv_name too
720 for idx in range(constants.MAX_DISKS):
721 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
722 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
723 disks.append({constants.IDISK_SIZE: disk_sz})
724 self.op.disks = disks
725 if not disks and self.op.disk_template != constants.DT_DISKLESS:
726 raise errors.OpPrereqError("No disk info specified and the export"
727 " is missing the disk information",
732 for idx in range(constants.MAX_NICS):
733 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
735 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
736 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
743 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
744 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
746 if (self.op.hypervisor is None and
747 einfo.has_option(constants.INISECT_INS, "hypervisor")):
748 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
750 if einfo.has_section(constants.INISECT_HYP):
751 # use the export parameters but do not override the ones
752 # specified by the user
753 for name, value in einfo.items(constants.INISECT_HYP):
754 if name not in self.op.hvparams:
755 self.op.hvparams[name] = value
757 if einfo.has_section(constants.INISECT_BEP):
758 # use the parameters, without overriding
759 for name, value in einfo.items(constants.INISECT_BEP):
760 if name not in self.op.beparams:
761 self.op.beparams[name] = value
762 # Compatibility for the old "memory" be param
763 if name == constants.BE_MEMORY:
764 if constants.BE_MAXMEM not in self.op.beparams:
765 self.op.beparams[constants.BE_MAXMEM] = value
766 if constants.BE_MINMEM not in self.op.beparams:
767 self.op.beparams[constants.BE_MINMEM] = value
769 # try to read the parameters old style, from the main section
770 for name in constants.BES_PARAMETERS:
771 if (name not in self.op.beparams and
772 einfo.has_option(constants.INISECT_INS, name)):
773 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
775 if einfo.has_section(constants.INISECT_OSP):
776 # use the parameters, without overriding
777 for name, value in einfo.items(constants.INISECT_OSP):
778 if name not in self.op.osparams:
779 self.op.osparams[name] = value
781 def _RevertToDefaults(self, cluster):
782 """Revert the instance parameters to the default values.
786 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
787 for name in self.op.hvparams.keys():
788 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
789 del self.op.hvparams[name]
791 be_defs = cluster.SimpleFillBE({})
792 for name in self.op.beparams.keys():
793 if name in be_defs and be_defs[name] == self.op.beparams[name]:
794 del self.op.beparams[name]
796 nic_defs = cluster.SimpleFillNIC({})
797 for nic in self.op.nics:
798 for name in constants.NICS_PARAMETERS:
799 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
802 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
803 for name in self.op.osparams.keys():
804 if name in os_defs and os_defs[name] == self.op.osparams[name]:
805 del self.op.osparams[name]
807 def _CalculateFileStorageDir(self):
808 """Calculate final instance file storage dir.
811 # file storage dir calculation/check
812 self.instance_file_storage_dir = None
813 if self.op.disk_template in constants.DTS_FILEBASED:
814 # build the full file storage dir path
817 if self.op.disk_template == constants.DT_SHARED_FILE:
818 get_fsd_fn = self.cfg.GetSharedFileStorageDir
820 get_fsd_fn = self.cfg.GetFileStorageDir
822 cfg_storagedir = get_fsd_fn()
823 if not cfg_storagedir:
824 raise errors.OpPrereqError("Cluster file storage dir not defined",
826 joinargs.append(cfg_storagedir)
828 if self.op.file_storage_dir is not None:
829 joinargs.append(self.op.file_storage_dir)
831 joinargs.append(self.op.instance_name)
833 # pylint: disable=W0142
834 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
836 def CheckPrereq(self): # pylint: disable=R0914
837 """Check prerequisites.
840 self._CalculateFileStorageDir()
842 if self.op.mode == constants.INSTANCE_IMPORT:
843 export_info = self._ReadExportInfo()
844 self._ReadExportParams(export_info)
845 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
847 self._old_instance_name = None
849 if (not self.cfg.GetVGName() and
850 self.op.disk_template not in constants.DTS_NOT_LVM):
851 raise errors.OpPrereqError("Cluster does not support lvm-based"
852 " instances", errors.ECODE_STATE)
854 if (self.op.hypervisor is None or
855 self.op.hypervisor == constants.VALUE_AUTO):
856 self.op.hypervisor = self.cfg.GetHypervisorType()
858 cluster = self.cfg.GetClusterInfo()
859 enabled_hvs = cluster.enabled_hypervisors
860 if self.op.hypervisor not in enabled_hvs:
861 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
863 (self.op.hypervisor, ",".join(enabled_hvs)),
867 for tag in self.op.tags:
868 objects.TaggableObject.ValidateTag(tag)
870 # check hypervisor parameter syntax (locally)
871 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
872 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
874 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
875 hv_type.CheckParameterSyntax(filled_hvp)
876 self.hv_full = filled_hvp
877 # check that we don't specify global parameters on an instance
878 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
879 "instance", "cluster")
881 # fill and remember the beparams dict
882 self.be_full = _ComputeFullBeParams(self.op, cluster)
884 # build os parameters
885 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
887 # now that hvp/bep are in final format, let's reset to defaults,
889 if self.op.identify_defaults:
890 self._RevertToDefaults(cluster)
893 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
896 # disk checks/pre-build
897 default_vg = self.cfg.GetVGName()
898 self.disks = ComputeDisks(self.op, default_vg)
900 if self.op.mode == constants.INSTANCE_IMPORT:
902 for idx in range(len(self.disks)):
903 option = "disk%d_dump" % idx
904 if export_info.has_option(constants.INISECT_INS, option):
905 # FIXME: are the old os-es, disk sizes, etc. useful?
906 export_name = export_info.get(constants.INISECT_INS, option)
907 image = utils.PathJoin(self.op.src_path, export_name)
908 disk_images.append(image)
910 disk_images.append(False)
912 self.src_images = disk_images
914 if self.op.instance_name == self._old_instance_name:
915 for idx, nic in enumerate(self.nics):
916 if nic.mac == constants.VALUE_AUTO:
917 nic_mac_ini = "nic%d_mac" % idx
918 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
920 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
922 # ip ping checks (we use the same ip that was resolved in ExpandNames)
924 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
925 raise errors.OpPrereqError("IP %s of instance %s already in use" %
926 (self.check_ip, self.op.instance_name),
927 errors.ECODE_NOTUNIQUE)
929 #### mac address generation
930 # By generating here the mac address both the allocator and the hooks get
931 # the real final mac address rather than the 'auto' or 'generate' value.
932 # There is a race condition between the generation and the instance object
933 # creation, which means that we know the mac is valid now, but we're not
934 # sure it will be when we actually add the instance. If things go bad
935 # adding the instance will abort because of a duplicate mac, and the
936 # creation job will fail.
937 for nic in self.nics:
938 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
939 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
943 if self.op.iallocator is not None:
946 # Release all unneeded node locks
947 keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
948 self.op.src_node_uuid])
949 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
950 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
951 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
953 assert (self.owned_locks(locking.LEVEL_NODE) ==
954 self.owned_locks(locking.LEVEL_NODE_RES)), \
955 "Node locks differ from node resource locks"
957 #### node related checks
960 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
961 assert self.pnode is not None, \
962 "Cannot retrieve locked node %s" % self.op.pnode_uuid
964 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
965 pnode.name, errors.ECODE_STATE)
967 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
968 pnode.name, errors.ECODE_STATE)
969 if not pnode.vm_capable:
970 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
971 " '%s'" % pnode.name, errors.ECODE_STATE)
973 self.secondaries = []
975 # Fill in any IPs from IP pools. This must happen here, because we need to
976 # know the nic's primary node, as specified by the iallocator
977 for idx, nic in enumerate(self.nics):
978 net_uuid = nic.network
979 if net_uuid is not None:
980 nobj = self.cfg.GetNetwork(net_uuid)
981 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
982 if netparams is None:
983 raise errors.OpPrereqError("No netparams found for network"
984 " %s. Propably not connected to"
985 " node's %s nodegroup" %
986 (nobj.name, self.pnode.name),
988 self.LogInfo("NIC/%d inherits netparams %s" %
989 (idx, netparams.values()))
990 nic.nicparams = dict(netparams)
991 if nic.ip is not None:
992 if nic.ip.lower() == constants.NIC_IP_POOL:
994 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
995 except errors.ReservationError:
996 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
997 " from the address pool" % idx,
999 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1002 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1003 except errors.ReservationError:
1004 raise errors.OpPrereqError("IP address %s already in use"
1005 " or does not belong to network %s" %
1006 (nic.ip, nobj.name),
1007 errors.ECODE_NOTUNIQUE)
1009 # net is None, ip None or given
1010 elif self.op.conflicts_check:
1011 _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1013 # mirror node verification
1014 if self.op.disk_template in constants.DTS_INT_MIRROR:
1015 if self.op.snode_uuid == pnode.uuid:
1016 raise errors.OpPrereqError("The secondary node cannot be the"
1017 " primary node", errors.ECODE_INVAL)
1018 CheckNodeOnline(self, self.op.snode_uuid)
1019 CheckNodeNotDrained(self, self.op.snode_uuid)
1020 CheckNodeVmCapable(self, self.op.snode_uuid)
1021 self.secondaries.append(self.op.snode_uuid)
1023 snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1024 if pnode.group != snode.group:
1025 self.LogWarning("The primary and secondary nodes are in two"
1026 " different node groups; the disk parameters"
1027 " from the first disk's node group will be"
1031 if self.op.disk_template in constants.DTS_INT_MIRROR:
1033 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1034 excl_stor = compat.any(map(has_es, nodes))
1035 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1036 raise errors.OpPrereqError("Disk template %s not supported with"
1037 " exclusive storage" % self.op.disk_template,
1039 for disk in self.disks:
1040 CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1042 node_uuids = [pnode.uuid] + self.secondaries
1044 if not self.adopt_disks:
1045 if self.op.disk_template == constants.DT_RBD:
1046 # _CheckRADOSFreeSpace() is just a placeholder.
1047 # Any function that checks prerequisites can be placed here.
1048 # Check if there is enough space on the RADOS cluster.
1049 CheckRADOSFreeSpace()
1050 elif self.op.disk_template == constants.DT_EXT:
1051 # FIXME: Function that checks prereqs if needed
1053 elif self.op.disk_template in utils.GetLvmDiskTemplates():
1054 # Check lv size requirements, if not adopting
1055 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1056 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1058 # FIXME: add checks for other, non-adopting, non-lvm disk templates
1061 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1062 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1063 disk[constants.IDISK_ADOPT])
1064 for disk in self.disks])
1065 if len(all_lvs) != len(self.disks):
1066 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1068 for lv_name in all_lvs:
1070 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1071 # to ReserveLV uses the same syntax
1072 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1073 except errors.ReservationError:
1074 raise errors.OpPrereqError("LV named %s used by another instance" %
1075 lv_name, errors.ECODE_NOTUNIQUE)
1077 vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1078 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1080 node_lvs = self.rpc.call_lv_list([pnode.uuid],
1081 vg_names.payload.keys())[pnode.uuid]
1082 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1083 node_lvs = node_lvs.payload
1085 delta = all_lvs.difference(node_lvs.keys())
1087 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1088 utils.CommaJoin(delta),
1090 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1092 raise errors.OpPrereqError("Online logical volumes found, cannot"
1093 " adopt: %s" % utils.CommaJoin(online_lvs),
1095 # update the size of disk based on what is found
1096 for dsk in self.disks:
1097 dsk[constants.IDISK_SIZE] = \
1098 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1099 dsk[constants.IDISK_ADOPT])][0]))
1101 elif self.op.disk_template == constants.DT_BLOCK:
1102 # Normalize and de-duplicate device paths
1103 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1104 for disk in self.disks])
1105 if len(all_disks) != len(self.disks):
1106 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1108 baddisks = [d for d in all_disks
1109 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1111 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1112 " cannot be adopted" %
1113 (utils.CommaJoin(baddisks),
1114 constants.ADOPTABLE_BLOCKDEV_ROOT),
1117 node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1118 list(all_disks))[pnode.uuid]
1119 node_disks.Raise("Cannot get block device information from node %s" %
1121 node_disks = node_disks.payload
1122 delta = all_disks.difference(node_disks.keys())
1124 raise errors.OpPrereqError("Missing block device(s): %s" %
1125 utils.CommaJoin(delta),
1127 for dsk in self.disks:
1128 dsk[constants.IDISK_SIZE] = \
1129 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1131 # Verify instance specs
1132 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1134 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1135 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1136 constants.ISPEC_DISK_COUNT: len(self.disks),
1137 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1138 for disk in self.disks],
1139 constants.ISPEC_NIC_COUNT: len(self.nics),
1140 constants.ISPEC_SPINDLE_USE: spindle_use,
1143 group_info = self.cfg.GetNodeGroup(pnode.group)
1144 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1145 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1146 self.op.disk_template)
1147 if not self.op.ignore_ipolicy and res:
1148 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1149 (pnode.group, group_info.name, utils.CommaJoin(res)))
1150 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1152 CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1154 CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1155 # check OS parameters (remotely)
1156 CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1158 CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1160 #TODO: _CheckExtParams (remotely)
1161 # Check parameters for extstorage
1163 # memory check on primary node
1164 #TODO(dynmem): use MINMEM for checking
1166 hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1168 CheckNodeFreeMemory(self, self.pnode.uuid,
1169 "creating instance %s" % self.op.instance_name,
1170 self.be_full[constants.BE_MAXMEM],
1171 self.op.hypervisor, hvfull)
1173 self.dry_run_result = list(node_uuids)
1175 def Exec(self, feedback_fn):
1176 """Create and add the instance to the cluster.
1179 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1180 self.owned_locks(locking.LEVEL_NODE)), \
1181 "Node locks differ from node resource locks"
1182 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1184 ht_kind = self.op.hypervisor
1185 if ht_kind in constants.HTS_REQ_PORT:
1186 network_port = self.cfg.AllocatePort()
1190 # This is ugly but we got a chicken-egg problem here
1191 # We can only take the group disk parameters, as the instance
1192 # has no disks yet (we are generating them right here).
1193 nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1194 disks = GenerateDiskTemplate(self,
1195 self.op.disk_template,
1196 self.op.instance_name, self.pnode.uuid,
1199 self.instance_file_storage_dir,
1200 self.op.file_driver,
1203 self.cfg.GetGroupDiskParams(nodegroup))
1205 iobj = objects.Instance(name=self.op.instance_name, os=self.op.os_type,
1206 primary_node=self.pnode.uuid,
1207 nics=self.nics, disks=disks,
1208 disk_template=self.op.disk_template,
1210 admin_state=constants.ADMINST_DOWN,
1211 network_port=network_port,
1212 beparams=self.op.beparams,
1213 hvparams=self.op.hvparams,
1214 hypervisor=self.op.hypervisor,
1215 osparams=self.op.osparams,
1219 for tag in self.op.tags:
1222 if self.adopt_disks:
1223 if self.op.disk_template == constants.DT_PLAIN:
1224 # rename LVs to the newly-generated names; we need to construct
1225 # 'fake' LV disks with the old data, plus the new unique_id
1226 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1228 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1229 rename_to.append(t_dsk.logical_id)
1230 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1231 self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1232 result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1233 zip(tmp_disks, rename_to))
1234 result.Raise("Failed to rename adoped LVs")
1236 feedback_fn("* creating instance disks...")
1238 CreateDisks(self, iobj)
1239 except errors.OpExecError:
1240 self.LogWarning("Device creation failed")
1241 self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1244 feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1246 self.cfg.AddInstance(iobj, self.proc.GetECId())
1248 # Declare that we don't want to remove the instance lock anymore, as we've
1249 # added the instance to the config
1250 del self.remove_locks[locking.LEVEL_INSTANCE]
1252 if self.op.mode == constants.INSTANCE_IMPORT:
1253 # Release unused nodes
1254 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1257 ReleaseLocks(self, locking.LEVEL_NODE)
1260 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1261 feedback_fn("* wiping instance disks...")
1263 WipeDisks(self, iobj)
1264 except errors.OpExecError, err:
1265 logging.exception("Wiping disks failed")
1266 self.LogWarning("Wiping instance disks failed (%s)", err)
1270 # Something is already wrong with the disks, don't do anything else
1272 elif self.op.wait_for_sync:
1273 disk_abort = not WaitForSync(self, iobj)
1274 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1275 # make sure the disks are not degraded (still sync-ing is ok)
1276 feedback_fn("* checking mirrors status")
1277 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1282 RemoveDisks(self, iobj)
1283 self.cfg.RemoveInstance(iobj.name)
1284 # Make sure the instance lock gets removed
1285 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1286 raise errors.OpExecError("There are some degraded disks for"
1289 # instance disks are now active
1290 iobj.disks_active = True
1292 # Release all node resource locks
1293 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1295 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1296 # we need to set the disks ID to the primary node, since the
1297 # preceding code might or might have not done it, depending on
1298 # disk template and other options
1299 for disk in iobj.disks:
1300 self.cfg.SetDiskID(disk, self.pnode.uuid)
1301 if self.op.mode == constants.INSTANCE_CREATE:
1302 if not self.op.no_install:
1303 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1304 not self.op.wait_for_sync)
1306 feedback_fn("* pausing disk sync to install instance OS")
1307 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1310 for idx, success in enumerate(result.payload):
1312 logging.warn("pause-sync of instance %s for disk %d failed",
1313 self.op.instance_name, idx)
1315 feedback_fn("* running the instance OS create scripts...")
1316 # FIXME: pass debug option from opcode to backend
1318 self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1319 self.op.debug_level)
1321 feedback_fn("* resuming disk sync")
1322 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1325 for idx, success in enumerate(result.payload):
1327 logging.warn("resume-sync of instance %s for disk %d failed",
1328 self.op.instance_name, idx)
1330 os_add_result.Raise("Could not add os for instance %s"
1331 " on node %s" % (self.op.instance_name,
1335 if self.op.mode == constants.INSTANCE_IMPORT:
1336 feedback_fn("* running the instance OS import scripts...")
1340 for idx, image in enumerate(self.src_images):
1344 # FIXME: pass debug option from opcode to backend
1345 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1346 constants.IEIO_FILE, (image, ),
1347 constants.IEIO_SCRIPT,
1348 (iobj.disks[idx], idx),
1350 transfers.append(dt)
1353 masterd.instance.TransferInstanceData(self, feedback_fn,
1354 self.op.src_node_uuid,
1356 self.pnode.secondary_ip,
1358 if not compat.all(import_result):
1359 self.LogWarning("Some disks for instance %s on node %s were not"
1360 " imported successfully" % (self.op.instance_name,
1363 rename_from = self._old_instance_name
1365 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1366 feedback_fn("* preparing remote import...")
1367 # The source cluster will stop the instance before attempting to make
1368 # a connection. In some cases stopping an instance can take a long
1369 # time, hence the shutdown timeout is added to the connection
1371 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1372 self.op.source_shutdown_timeout)
1373 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1375 assert iobj.primary_node == self.pnode.uuid
1377 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1378 self.source_x509_ca,
1379 self._cds, timeouts)
1380 if not compat.all(disk_results):
1381 # TODO: Should the instance still be started, even if some disks
1382 # failed to import (valid for local imports, too)?
1383 self.LogWarning("Some disks for instance %s on node %s were not"
1384 " imported successfully" % (self.op.instance_name,
1387 rename_from = self.source_instance_name
1390 # also checked in the prereq part
1391 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1394 # Run rename script on newly imported instance
1395 assert iobj.name == self.op.instance_name
1396 feedback_fn("Running rename script for %s" % self.op.instance_name)
1397 result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1399 self.op.debug_level)
1400 result.Warn("Failed to run rename script for %s on node %s" %
1401 (self.op.instance_name, self.pnode.name), self.LogWarning)
1403 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1406 iobj.admin_state = constants.ADMINST_UP
1407 self.cfg.Update(iobj, feedback_fn)
1408 logging.info("Starting instance %s on node %s", self.op.instance_name,
1410 feedback_fn("* starting instance...")
1411 result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1412 False, self.op.reason)
1413 result.Raise("Could not start instance")
1415 return list(iobj.all_nodes)
1418 class LUInstanceRename(LogicalUnit):
1419 """Rename an instance.
1422 HPATH = "instance-rename"
1423 HTYPE = constants.HTYPE_INSTANCE
1425 def CheckArguments(self):
1429 if self.op.ip_check and not self.op.name_check:
1430 # TODO: make the ip check more flexible and not depend on the name check
1431 raise errors.OpPrereqError("IP address check requires a name check",
1434 def BuildHooksEnv(self):
1437 This runs on master, primary and secondary nodes of the instance.
1440 env = BuildInstanceHookEnvByObject(self, self.instance)
1441 env["INSTANCE_NEW_NAME"] = self.op.new_name
1444 def BuildHooksNodes(self):
1445 """Build hooks nodes.
1448 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1451 def CheckPrereq(self):
1452 """Check prerequisites.
1454 This checks that the instance is in the cluster and is not running.
1457 self.op.instance_name = ExpandInstanceName(self.cfg,
1458 self.op.instance_name)
1459 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1460 assert instance is not None
1461 CheckNodeOnline(self, instance.primary_node)
1462 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1463 msg="cannot rename")
1464 self.instance = instance
1466 new_name = self.op.new_name
1467 if self.op.name_check:
1468 hostname = _CheckHostnameSane(self, new_name)
1469 new_name = self.op.new_name = hostname.name
1470 if (self.op.ip_check and
1471 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1472 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1473 (hostname.ip, new_name),
1474 errors.ECODE_NOTUNIQUE)
1476 instance_list = self.cfg.GetInstanceList()
1477 if new_name in instance_list and new_name != instance.name:
1478 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1479 new_name, errors.ECODE_EXISTS)
1481 def Exec(self, feedback_fn):
1482 """Rename the instance.
1485 old_name = self.instance.name
1487 rename_file_storage = False
1488 if (self.instance.disk_template in constants.DTS_FILEBASED and
1489 self.op.new_name != self.instance.name):
1490 old_file_storage_dir = os.path.dirname(
1491 self.instance.disks[0].logical_id[1])
1492 rename_file_storage = True
1494 self.cfg.RenameInstance(self.instance.name, self.op.new_name)
1495 # Change the instance lock. This is definitely safe while we hold the BGL.
1496 # Otherwise the new lock would have to be added in acquired mode.
1498 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1499 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1500 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1502 # re-read the instance from the configuration after rename
1503 renamed_inst = self.cfg.GetInstanceInfo(self.op.new_name)
1505 if rename_file_storage:
1506 new_file_storage_dir = os.path.dirname(
1507 renamed_inst.disks[0].logical_id[1])
1508 result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1509 old_file_storage_dir,
1510 new_file_storage_dir)
1511 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1512 " (but the instance has been renamed in Ganeti)" %
1513 (self.cfg.GetNodeName(renamed_inst.primary_node),
1514 old_file_storage_dir, new_file_storage_dir))
1516 StartInstanceDisks(self, renamed_inst, None)
1517 # update info on disks
1518 info = GetInstanceInfoText(renamed_inst)
1519 for (idx, disk) in enumerate(renamed_inst.disks):
1520 for node_uuid in renamed_inst.all_nodes:
1521 self.cfg.SetDiskID(disk, node_uuid)
1522 result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1523 result.Warn("Error setting info on node %s for disk %s" %
1524 (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1526 result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1527 renamed_inst, old_name,
1528 self.op.debug_level)
1529 result.Warn("Could not run OS rename script for instance %s on node %s"
1530 " (but the instance has been renamed in Ganeti)" %
1532 self.cfg.GetNodeName(renamed_inst.primary_node)),
1535 ShutdownInstanceDisks(self, renamed_inst)
1537 return renamed_inst.name
1540 class LUInstanceRemove(LogicalUnit):
1541 """Remove an instance.
1544 HPATH = "instance-remove"
1545 HTYPE = constants.HTYPE_INSTANCE
1548 def ExpandNames(self):
1549 self._ExpandAndLockInstance()
1550 self.needed_locks[locking.LEVEL_NODE] = []
1551 self.needed_locks[locking.LEVEL_NODE_RES] = []
1552 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1554 def DeclareLocks(self, level):
1555 if level == locking.LEVEL_NODE:
1556 self._LockInstancesNodes()
1557 elif level == locking.LEVEL_NODE_RES:
1559 self.needed_locks[locking.LEVEL_NODE_RES] = \
1560 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1562 def BuildHooksEnv(self):
1565 This runs on master, primary and secondary nodes of the instance.
1568 env = BuildInstanceHookEnvByObject(self, self.instance)
1569 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1572 def BuildHooksNodes(self):
1573 """Build hooks nodes.
1576 nl = [self.cfg.GetMasterNode()]
1577 nl_post = list(self.instance.all_nodes) + nl
1578 return (nl, nl_post)
1580 def CheckPrereq(self):
1581 """Check prerequisites.
1583 This checks that the instance is in the cluster.
1586 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1587 assert self.instance is not None, \
1588 "Cannot retrieve locked instance %s" % self.op.instance_name
1590 def Exec(self, feedback_fn):
1591 """Remove the instance.
1594 logging.info("Shutting down instance %s on node %s", self.instance.name,
1595 self.cfg.GetNodeName(self.instance.primary_node))
1597 result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1599 self.op.shutdown_timeout,
1601 if self.op.ignore_failures:
1602 result.Warn("Warning: can't shutdown instance", feedback_fn)
1604 result.Raise("Could not shutdown instance %s on node %s" %
1605 (self.instance.name,
1606 self.cfg.GetNodeName(self.instance.primary_node)))
1608 assert (self.owned_locks(locking.LEVEL_NODE) ==
1609 self.owned_locks(locking.LEVEL_NODE_RES))
1610 assert not (set(self.instance.all_nodes) -
1611 self.owned_locks(locking.LEVEL_NODE)), \
1612 "Not owning correct locks"
1614 RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1617 class LUInstanceMove(LogicalUnit):
1618 """Move an instance by data-copying.
1621 HPATH = "instance-move"
1622 HTYPE = constants.HTYPE_INSTANCE
1625 def ExpandNames(self):
1626 self._ExpandAndLockInstance()
1627 (self.op.target_node_uuid, self.op.target_node) = \
1628 ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1629 self.op.target_node)
1630 self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node]
1631 self.needed_locks[locking.LEVEL_NODE_RES] = []
1632 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1634 def DeclareLocks(self, level):
1635 if level == locking.LEVEL_NODE:
1636 self._LockInstancesNodes(primary_only=True)
1637 elif level == locking.LEVEL_NODE_RES:
1639 self.needed_locks[locking.LEVEL_NODE_RES] = \
1640 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1642 def BuildHooksEnv(self):
1645 This runs on master, primary and secondary nodes of the instance.
1649 "TARGET_NODE": self.op.target_node,
1650 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1652 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1655 def BuildHooksNodes(self):
1656 """Build hooks nodes.
1660 self.cfg.GetMasterNode(),
1661 self.instance.primary_node,
1662 self.op.target_node_uuid,
1666 def CheckPrereq(self):
1667 """Check prerequisites.
1669 This checks that the instance is in the cluster.
1672 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1673 assert self.instance is not None, \
1674 "Cannot retrieve locked instance %s" % self.op.instance_name
1676 if self.instance.disk_template not in constants.DTS_COPYABLE:
1677 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1678 self.instance.disk_template,
1681 target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1682 assert target_node is not None, \
1683 "Cannot retrieve locked node %s" % self.op.target_node
1685 self.target_node_uuid = target_node.uuid
1686 if target_node.uuid == self.instance.primary_node:
1687 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1688 (self.instance.name, target_node.name),
1691 bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1693 for idx, dsk in enumerate(self.instance.disks):
1694 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1695 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1696 " cannot copy" % idx, errors.ECODE_STATE)
1698 CheckNodeOnline(self, target_node.uuid)
1699 CheckNodeNotDrained(self, target_node.uuid)
1700 CheckNodeVmCapable(self, target_node.uuid)
1701 cluster = self.cfg.GetClusterInfo()
1702 group_info = self.cfg.GetNodeGroup(target_node.group)
1703 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1704 CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1705 ignore=self.op.ignore_ipolicy)
1707 if self.instance.admin_state == constants.ADMINST_UP:
1708 # check memory requirements on the secondary node
1709 CheckNodeFreeMemory(
1710 self, target_node.uuid, "failing over instance %s" %
1711 self.instance.name, bep[constants.BE_MAXMEM],
1712 self.instance.hypervisor,
1713 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1715 self.LogInfo("Not checking memory on the secondary node as"
1716 " instance will not be started")
1718 # check bridge existance
1719 CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1721 def Exec(self, feedback_fn):
1722 """Move an instance.
1724 The move is done by shutting it down on its present node, copying
1725 the data over (slow) and starting it on the new node.
1728 source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1729 target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1731 self.LogInfo("Shutting down instance %s on source node %s",
1732 self.instance.name, source_node.name)
1734 assert (self.owned_locks(locking.LEVEL_NODE) ==
1735 self.owned_locks(locking.LEVEL_NODE_RES))
1737 result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1738 self.op.shutdown_timeout,
1740 if self.op.ignore_consistency:
1741 result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1742 " anyway. Please make sure node %s is down. Error details" %
1743 (self.instance.name, source_node.name, source_node.name),
1746 result.Raise("Could not shutdown instance %s on node %s" %
1747 (self.instance.name, source_node.name))
1749 # create the target disks
1751 CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1752 except errors.OpExecError:
1753 self.LogWarning("Device creation failed")
1754 self.cfg.ReleaseDRBDMinors(self.instance.name)
1757 cluster_name = self.cfg.GetClusterInfo().cluster_name
1760 # activate, get path, copy the data over
1761 for idx, disk in enumerate(self.instance.disks):
1762 self.LogInfo("Copying data for disk %d", idx)
1763 result = self.rpc.call_blockdev_assemble(
1764 target_node.uuid, (disk, self.instance), self.instance.name,
1767 self.LogWarning("Can't assemble newly created disk %d: %s",
1768 idx, result.fail_msg)
1769 errs.append(result.fail_msg)
1771 dev_path = result.payload
1772 result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1774 target_node.name, dev_path,
1777 self.LogWarning("Can't copy data over for disk %d: %s",
1778 idx, result.fail_msg)
1779 errs.append(result.fail_msg)
1783 self.LogWarning("Some disks failed to copy, aborting")
1785 RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1787 self.cfg.ReleaseDRBDMinors(self.instance.name)
1788 raise errors.OpExecError("Errors during disk copy: %s" %
1791 self.instance.primary_node = target_node.uuid
1792 self.cfg.Update(self.instance, feedback_fn)
1794 self.LogInfo("Removing the disks on the original node")
1795 RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1797 # Only start the instance if it's marked as up
1798 if self.instance.admin_state == constants.ADMINST_UP:
1799 self.LogInfo("Starting instance %s on node %s",
1800 self.instance.name, target_node.name)
1802 disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1803 ignore_secondaries=True)
1805 ShutdownInstanceDisks(self, self.instance)
1806 raise errors.OpExecError("Can't activate the instance's disks")
1808 result = self.rpc.call_instance_start(target_node.uuid,
1809 (self.instance, None, None), False,
1811 msg = result.fail_msg
1813 ShutdownInstanceDisks(self, self.instance)
1814 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1815 (self.instance.name, target_node.name, msg))
1818 class LUInstanceMultiAlloc(NoHooksLU):
1819 """Allocates multiple instances at the same time.
1824 def CheckArguments(self):
1829 for inst in self.op.instances:
1830 if inst.iallocator is not None:
1831 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1832 " instance objects", errors.ECODE_INVAL)
1833 nodes.append(bool(inst.pnode))
1834 if inst.disk_template in constants.DTS_INT_MIRROR:
1835 nodes.append(bool(inst.snode))
1837 has_nodes = compat.any(nodes)
1838 if compat.all(nodes) ^ has_nodes:
1839 raise errors.OpPrereqError("There are instance objects providing"
1840 " pnode/snode while others do not",
1843 if self.op.iallocator is None:
1844 default_iallocator = self.cfg.GetDefaultIAllocator()
1845 if default_iallocator and has_nodes:
1846 self.op.iallocator = default_iallocator
1848 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1849 " given and no cluster-wide default"
1850 " iallocator found; please specify either"
1851 " an iallocator or nodes on the instances"
1852 " or set a cluster-wide default iallocator",
1855 _CheckOpportunisticLocking(self.op)
1857 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1859 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1860 utils.CommaJoin(dups), errors.ECODE_INVAL)
1862 def ExpandNames(self):
1863 """Calculate the locks.
1866 self.share_locks = ShareAll()
1867 self.needed_locks = {
1868 # iallocator will select nodes and even if no iallocator is used,
1869 # collisions with LUInstanceCreate should be avoided
1870 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1873 if self.op.iallocator:
1874 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1875 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1877 if self.op.opportunistic_locking:
1878 self.opportunistic_locks[locking.LEVEL_NODE] = True
1879 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1882 for inst in self.op.instances:
1883 (inst.pnode_uuid, inst.pnode) = \
1884 ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1885 nodeslist.append(inst.pnode)
1886 if inst.snode is not None:
1887 (inst.snode_uuid, inst.snode) = \
1888 ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1889 nodeslist.append(inst.snode)
1891 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1892 # Lock resources of instance's primary and secondary nodes (copy to
1893 # prevent accidential modification)
1894 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1896 def CheckPrereq(self):
1897 """Check prerequisite.
1900 cluster = self.cfg.GetClusterInfo()
1901 default_vg = self.cfg.GetVGName()
1902 ec_id = self.proc.GetECId()
1904 if self.op.opportunistic_locking:
1905 # Only consider nodes for which a lock is held
1906 node_whitelist = self.cfg.GetNodeNames(
1907 list(self.owned_locks(locking.LEVEL_NODE)))
1909 node_whitelist = None
1911 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1912 _ComputeNics(op, cluster, None,
1914 _ComputeFullBeParams(op, cluster),
1916 for op in self.op.instances]
1918 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1919 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1921 ial.Run(self.op.iallocator)
1924 raise errors.OpPrereqError("Can't compute nodes using"
1925 " iallocator '%s': %s" %
1926 (self.op.iallocator, ial.info),
1929 self.ia_result = ial.result
1932 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1933 constants.JOB_IDS_KEY: [],
1936 def _ConstructPartialResult(self):
1937 """Contructs the partial result.
1940 (allocatable, failed) = self.ia_result
1942 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1943 map(compat.fst, allocatable),
1944 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1947 def Exec(self, feedback_fn):
1948 """Executes the opcode.
1951 op2inst = dict((op.instance_name, op) for op in self.op.instances)
1952 (allocatable, failed) = self.ia_result
1955 for (name, node_names) in allocatable:
1956 op = op2inst.pop(name)
1958 (op.pnode_uuid, op.pnode) = \
1959 ExpandNodeUuidAndName(self.cfg, None, node_names[0])
1960 if len(node_names) > 1:
1961 (op.snode_uuid, op.snode) = \
1962 ExpandNodeUuidAndName(self.cfg, None, node_names[1])
1966 missing = set(op2inst.keys()) - set(failed)
1967 assert not missing, \
1968 "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1970 return ResultWithJobs(jobs, **self._ConstructPartialResult())
1973 class _InstNicModPrivate:
1974 """Data structure for network interface modifications.
1976 Used by L{LUInstanceSetParams}.
1984 def _PrepareContainerMods(mods, private_fn):
1985 """Prepares a list of container modifications by adding a private data field.
1987 @type mods: list of tuples; (operation, index, parameters)
1988 @param mods: List of modifications
1989 @type private_fn: callable or None
1990 @param private_fn: Callable for constructing a private data field for a
1995 if private_fn is None:
2000 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2003 def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2004 """Checks if nodes have enough physical CPUs
2006 This function checks if all given nodes have the needed number of
2007 physical CPUs. In case any node has less CPUs or we cannot get the
2008 information from the node, this function raises an OpPrereqError
2011 @type lu: C{LogicalUnit}
2012 @param lu: a logical unit from which we get configuration data
2013 @type node_uuids: C{list}
2014 @param node_uuids: the list of node UUIDs to check
2015 @type requested: C{int}
2016 @param requested: the minimum acceptable number of physical CPUs
2017 @type hypervisor_specs: list of pairs (string, dict of strings)
2018 @param hypervisor_specs: list of hypervisor specifications in
2019 pairs (hypervisor_name, hvparams)
2020 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2021 or we cannot check the node
2024 nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs, None)
2025 for node_uuid in node_uuids:
2026 info = nodeinfo[node_uuid]
2027 node_name = lu.cfg.GetNodeName(node_uuid)
2028 info.Raise("Cannot get current information from node %s" % node_name,
2029 prereq=True, ecode=errors.ECODE_ENVIRON)
2030 (_, _, (hv_info, )) = info.payload
2031 num_cpus = hv_info.get("cpu_total", None)
2032 if not isinstance(num_cpus, int):
2033 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2034 " on node %s, result was '%s'" %
2035 (node_name, num_cpus), errors.ECODE_ENVIRON)
2036 if requested > num_cpus:
2037 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2038 "required" % (node_name, num_cpus, requested),
2042 def GetItemFromContainer(identifier, kind, container):
2043 """Return the item refered by the identifier.
2045 @type identifier: string
2046 @param identifier: Item index or name or UUID
2048 @param kind: One-word item description
2049 @type container: list
2050 @param container: Container to get the item from
2055 idx = int(identifier)
2058 absidx = len(container) - 1
2060 raise IndexError("Not accepting negative indices other than -1")
2061 elif idx > len(container):
2062 raise IndexError("Got %s index %s, but there are only %s" %
2063 (kind, idx, len(container)))
2066 return (absidx, container[idx])
2070 for idx, item in enumerate(container):
2071 if item.uuid == identifier or item.name == identifier:
2074 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2075 (kind, identifier), errors.ECODE_NOENT)
2078 def _ApplyContainerMods(kind, container, chgdesc, mods,
2079 create_fn, modify_fn, remove_fn):
2080 """Applies descriptions in C{mods} to C{container}.
2083 @param kind: One-word item description
2084 @type container: list
2085 @param container: Container to modify
2086 @type chgdesc: None or list
2087 @param chgdesc: List of applied changes
2089 @param mods: Modifications as returned by L{_PrepareContainerMods}
2090 @type create_fn: callable
2091 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2092 receives absolute item index, parameters and private data object as added
2093 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2095 @type modify_fn: callable
2096 @param modify_fn: Callback for modifying an existing item
2097 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2098 and private data object as added by L{_PrepareContainerMods}, returns
2100 @type remove_fn: callable
2101 @param remove_fn: Callback on removing item; receives absolute item index,
2102 item and private data object as added by L{_PrepareContainerMods}
2105 for (op, identifier, params, private) in mods:
2108 if op == constants.DDM_ADD:
2109 # Calculate where item will be added
2110 # When adding an item, identifier can only be an index
2112 idx = int(identifier)
2114 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2115 " identifier for %s" % constants.DDM_ADD,
2118 addidx = len(container)
2121 raise IndexError("Not accepting negative indices other than -1")
2122 elif idx > len(container):
2123 raise IndexError("Got %s index %s, but there are only %s" %
2124 (kind, idx, len(container)))
2127 if create_fn is None:
2130 (item, changes) = create_fn(addidx, params, private)
2133 container.append(item)
2136 assert idx <= len(container)
2137 # list.insert does so before the specified index
2138 container.insert(idx, item)
2140 # Retrieve existing item
2141 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2143 if op == constants.DDM_REMOVE:
2146 if remove_fn is not None:
2147 remove_fn(absidx, item, private)
2149 changes = [("%s/%s" % (kind, absidx), "remove")]
2151 assert container[absidx] == item
2152 del container[absidx]
2153 elif op == constants.DDM_MODIFY:
2154 if modify_fn is not None:
2155 changes = modify_fn(absidx, item, params, private)
2157 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2159 assert _TApplyContModsCbChanges(changes)
2161 if not (chgdesc is None or changes is None):
2162 chgdesc.extend(changes)
2165 def _UpdateIvNames(base_index, disks):
2166 """Updates the C{iv_name} attribute of disks.
2168 @type disks: list of L{objects.Disk}
2171 for (idx, disk) in enumerate(disks):
2172 disk.iv_name = "disk/%s" % (base_index + idx, )
2175 class LUInstanceSetParams(LogicalUnit):
2176 """Modifies an instances's parameters.
2179 HPATH = "instance-modify"
2180 HTYPE = constants.HTYPE_INSTANCE
2184 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2185 assert ht.TList(mods)
2186 assert not mods or len(mods[0]) in (2, 3)
2188 if mods and len(mods[0]) == 2:
2192 for op, params in mods:
2193 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2194 result.append((op, -1, params))
2198 raise errors.OpPrereqError("Only one %s add or remove operation is"
2199 " supported at a time" % kind,
2202 result.append((constants.DDM_MODIFY, op, params))
2204 assert verify_fn(result)
2211 def _CheckMods(kind, mods, key_types, item_fn):
2212 """Ensures requested disk/NIC modifications are valid.
2215 for (op, _, params) in mods:
2216 assert ht.TDict(params)
2218 # If 'key_types' is an empty dict, we assume we have an
2219 # 'ext' template and thus do not ForceDictType
2221 utils.ForceDictType(params, key_types)
2223 if op == constants.DDM_REMOVE:
2225 raise errors.OpPrereqError("No settings should be passed when"
2226 " removing a %s" % kind,
2228 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2231 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2234 def _VerifyDiskModification(op, params, excl_stor):
2235 """Verifies a disk modification.
2238 if op == constants.DDM_ADD:
2239 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2240 if mode not in constants.DISK_ACCESS_SET:
2241 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2244 size = params.get(constants.IDISK_SIZE, None)
2246 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2247 constants.IDISK_SIZE, errors.ECODE_INVAL)
2251 except (TypeError, ValueError), err:
2252 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2255 params[constants.IDISK_SIZE] = size
2256 name = params.get(constants.IDISK_NAME, None)
2257 if name is not None and name.lower() == constants.VALUE_NONE:
2258 params[constants.IDISK_NAME] = None
2260 CheckSpindlesExclusiveStorage(params, excl_stor, True)
2262 elif op == constants.DDM_MODIFY:
2263 if constants.IDISK_SIZE in params:
2264 raise errors.OpPrereqError("Disk size change not possible, use"
2265 " grow-disk", errors.ECODE_INVAL)
2267 raise errors.OpPrereqError("Disk modification doesn't support"
2268 " additional arbitrary parameters",
2270 name = params.get(constants.IDISK_NAME, None)
2271 if name is not None and name.lower() == constants.VALUE_NONE:
2272 params[constants.IDISK_NAME] = None
2275 def _VerifyNicModification(op, params):
2276 """Verifies a network interface modification.
2279 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2280 ip = params.get(constants.INIC_IP, None)
2281 name = params.get(constants.INIC_NAME, None)
2282 req_net = params.get(constants.INIC_NETWORK, None)
2283 link = params.get(constants.NIC_LINK, None)
2284 mode = params.get(constants.NIC_MODE, None)
2285 if name is not None and name.lower() == constants.VALUE_NONE:
2286 params[constants.INIC_NAME] = None
2287 if req_net is not None:
2288 if req_net.lower() == constants.VALUE_NONE:
2289 params[constants.INIC_NETWORK] = None
2291 elif link is not None or mode is not None:
2292 raise errors.OpPrereqError("If network is given"
2293 " mode or link should not",
2296 if op == constants.DDM_ADD:
2297 macaddr = params.get(constants.INIC_MAC, None)
2299 params[constants.INIC_MAC] = constants.VALUE_AUTO
2302 if ip.lower() == constants.VALUE_NONE:
2303 params[constants.INIC_IP] = None
2305 if ip.lower() == constants.NIC_IP_POOL:
2306 if op == constants.DDM_ADD and req_net is None:
2307 raise errors.OpPrereqError("If ip=pool, parameter network"
2311 if not netutils.IPAddress.IsValid(ip):
2312 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2315 if constants.INIC_MAC in params:
2316 macaddr = params[constants.INIC_MAC]
2317 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2318 macaddr = utils.NormalizeAndValidateMac(macaddr)
2320 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2321 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2322 " modifying an existing NIC",
2325 def CheckArguments(self):
2326 if not (self.op.nics or self.op.disks or self.op.disk_template or
2327 self.op.hvparams or self.op.beparams or self.op.os_name or
2328 self.op.offline is not None or self.op.runtime_mem or
2330 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2332 if self.op.hvparams:
2333 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2334 "hypervisor", "instance", "cluster")
2336 self.op.disks = self._UpgradeDiskNicMods(
2337 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2338 self.op.nics = self._UpgradeDiskNicMods(
2339 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2341 if self.op.disks and self.op.disk_template is not None:
2342 raise errors.OpPrereqError("Disk template conversion and other disk"
2343 " changes not supported at the same time",
2346 if (self.op.disk_template and
2347 self.op.disk_template in constants.DTS_INT_MIRROR and
2348 self.op.remote_node is None):
2349 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2350 " one requires specifying a secondary node",
2353 # Check NIC modifications
2354 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2355 self._VerifyNicModification)
2358 (self.op.pnode_uuid, self.op.pnode) = \
2359 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2361 def ExpandNames(self):
2362 self._ExpandAndLockInstance()
2363 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2364 # Can't even acquire node locks in shared mode as upcoming changes in
2365 # Ganeti 2.6 will start to modify the node object on disk conversion
2366 self.needed_locks[locking.LEVEL_NODE] = []
2367 self.needed_locks[locking.LEVEL_NODE_RES] = []
2368 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2369 # Look node group to look up the ipolicy
2370 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2372 def DeclareLocks(self, level):
2373 if level == locking.LEVEL_NODEGROUP:
2374 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2375 # Acquire locks for the instance's nodegroups optimistically. Needs
2376 # to be verified in CheckPrereq
2377 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2378 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2379 elif level == locking.LEVEL_NODE:
2380 self._LockInstancesNodes()
2381 if self.op.disk_template and self.op.remote_node:
2382 (self.op.remote_node_uuid, self.op.remote_node) = \
2383 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2384 self.op.remote_node)
2385 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2386 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2388 self.needed_locks[locking.LEVEL_NODE_RES] = \
2389 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2391 def BuildHooksEnv(self):
2394 This runs on the master, primary and secondaries.
2398 if constants.BE_MINMEM in self.be_new:
2399 args["minmem"] = self.be_new[constants.BE_MINMEM]
2400 if constants.BE_MAXMEM in self.be_new:
2401 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2402 if constants.BE_VCPUS in self.be_new:
2403 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2404 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2405 # information at all.
2407 if self._new_nics is not None:
2410 for nic in self._new_nics:
2411 n = copy.deepcopy(nic)
2412 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2413 n.nicparams = nicparams
2414 nics.append(NICToTuple(self, n))
2418 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2419 if self.op.disk_template:
2420 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2421 if self.op.runtime_mem:
2422 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2426 def BuildHooksNodes(self):
2427 """Build hooks nodes.
2430 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2433 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2434 old_params, cluster, pnode_uuid):
2436 update_params_dict = dict([(key, params[key])
2437 for key in constants.NICS_PARAMETERS
2440 req_link = update_params_dict.get(constants.NIC_LINK, None)
2441 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2444 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2445 if new_net_uuid_or_name:
2446 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2447 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2450 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2453 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2455 raise errors.OpPrereqError("No netparams found for the network"
2456 " %s, probably not connected" %
2457 new_net_obj.name, errors.ECODE_INVAL)
2458 new_params = dict(netparams)
2460 new_params = GetUpdatedParams(old_params, update_params_dict)
2462 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2464 new_filled_params = cluster.SimpleFillNIC(new_params)
2465 objects.NIC.CheckParameterSyntax(new_filled_params)
2467 new_mode = new_filled_params[constants.NIC_MODE]
2468 if new_mode == constants.NIC_MODE_BRIDGED:
2469 bridge = new_filled_params[constants.NIC_LINK]
2470 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2472 msg = "Error checking bridges on node '%s': %s" % \
2473 (self.cfg.GetNodeName(pnode_uuid), msg)
2475 self.warn.append(msg)
2477 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2479 elif new_mode == constants.NIC_MODE_ROUTED:
2480 ip = params.get(constants.INIC_IP, old_ip)
2482 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2483 " on a routed NIC", errors.ECODE_INVAL)
2485 elif new_mode == constants.NIC_MODE_OVS:
2486 # TODO: check OVS link
2487 self.LogInfo("OVS links are currently not checked for correctness")
2489 if constants.INIC_MAC in params:
2490 mac = params[constants.INIC_MAC]
2492 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2494 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2495 # otherwise generate the MAC address
2496 params[constants.INIC_MAC] = \
2497 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2499 # or validate/reserve the current one
2501 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2502 except errors.ReservationError:
2503 raise errors.OpPrereqError("MAC address '%s' already in use"
2504 " in cluster" % mac,
2505 errors.ECODE_NOTUNIQUE)
2506 elif new_net_uuid != old_net_uuid:
2508 def get_net_prefix(net_uuid):
2511 nobj = self.cfg.GetNetwork(net_uuid)
2512 mac_prefix = nobj.mac_prefix
2516 new_prefix = get_net_prefix(new_net_uuid)
2517 old_prefix = get_net_prefix(old_net_uuid)
2518 if old_prefix != new_prefix:
2519 params[constants.INIC_MAC] = \
2520 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2522 # if there is a change in (ip, network) tuple
2523 new_ip = params.get(constants.INIC_IP, old_ip)
2524 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2526 # if IP is pool then require a network and generate one IP
2527 if new_ip.lower() == constants.NIC_IP_POOL:
2530 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2531 except errors.ReservationError:
2532 raise errors.OpPrereqError("Unable to get a free IP"
2533 " from the address pool",
2535 self.LogInfo("Chose IP %s from network %s",
2538 params[constants.INIC_IP] = new_ip
2540 raise errors.OpPrereqError("ip=pool, but no network found",
2542 # Reserve new IP if in the new network if any
2545 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2546 self.LogInfo("Reserving IP %s in network %s",
2547 new_ip, new_net_obj.name)
2548 except errors.ReservationError:
2549 raise errors.OpPrereqError("IP %s not available in network %s" %
2550 (new_ip, new_net_obj.name),
2551 errors.ECODE_NOTUNIQUE)
2552 # new network is None so check if new IP is a conflicting IP
2553 elif self.op.conflicts_check:
2554 _CheckForConflictingIp(self, new_ip, pnode_uuid)
2556 # release old IP if old network is not None
2557 if old_ip and old_net_uuid:
2559 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2560 except errors.AddressPoolError:
2561 logging.warning("Release IP %s not contained in network %s",
2562 old_ip, old_net_obj.name)
2564 # there are no changes in (ip, network) tuple and old network is not None
2565 elif (old_net_uuid is not None and
2566 (req_link is not None or req_mode is not None)):
2567 raise errors.OpPrereqError("Not allowed to change link or mode of"
2568 " a NIC that is connected to a network",
2571 private.params = new_params
2572 private.filled = new_filled_params
2574 def _PreCheckDiskTemplate(self, pnode_info):
2575 """CheckPrereq checks related to a new disk template."""
2576 # Arguments are passed to avoid configuration lookups
2577 pnode_uuid = self.instance.primary_node
2578 if self.instance.disk_template == self.op.disk_template:
2579 raise errors.OpPrereqError("Instance already has disk template %s" %
2580 self.instance.disk_template,
2583 if (self.instance.disk_template,
2584 self.op.disk_template) not in self._DISK_CONVERSIONS:
2585 raise errors.OpPrereqError("Unsupported disk template conversion from"
2586 " %s to %s" % (self.instance.disk_template,
2587 self.op.disk_template),
2589 CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2590 msg="cannot change disk template")
2591 if self.op.disk_template in constants.DTS_INT_MIRROR:
2592 if self.op.remote_node_uuid == pnode_uuid:
2593 raise errors.OpPrereqError("Given new secondary node %s is the same"
2594 " as the primary node of the instance" %
2595 self.op.remote_node, errors.ECODE_STATE)
2596 CheckNodeOnline(self, self.op.remote_node_uuid)
2597 CheckNodeNotDrained(self, self.op.remote_node_uuid)
2598 # FIXME: here we assume that the old instance type is DT_PLAIN
2599 assert self.instance.disk_template == constants.DT_PLAIN
2600 disks = [{constants.IDISK_SIZE: d.size,
2601 constants.IDISK_VG: d.logical_id[0]}
2602 for d in self.instance.disks]
2603 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2604 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2606 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2607 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2608 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2610 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2611 ignore=self.op.ignore_ipolicy)
2612 if pnode_info.group != snode_info.group:
2613 self.LogWarning("The primary and secondary nodes are in two"
2614 " different node groups; the disk parameters"
2615 " from the first disk's node group will be"
2618 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2619 # Make sure none of the nodes require exclusive storage
2620 nodes = [pnode_info]
2621 if self.op.disk_template in constants.DTS_INT_MIRROR:
2623 nodes.append(snode_info)
2624 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2625 if compat.any(map(has_es, nodes)):
2626 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2627 " storage is enabled" % (self.instance.disk_template,
2628 self.op.disk_template))
2629 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2631 def _PreCheckDisks(self, ispec):
2632 """CheckPrereq checks related to disk changes.
2635 @param ispec: instance specs to be updated with the new disks
2638 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2640 excl_stor = compat.any(
2641 rpc.GetExclusiveStorageForNodes(self.cfg,
2642 self.instance.all_nodes).values()
2645 # Check disk modifications. This is done here and not in CheckArguments
2646 # (as with NICs), because we need to know the instance's disk template
2647 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2648 if self.instance.disk_template == constants.DT_EXT:
2649 self._CheckMods("disk", self.op.disks, {}, ver_fn)
2651 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2654 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2656 # Check the validity of the `provider' parameter
2657 if self.instance.disk_template in constants.DT_EXT:
2658 for mod in self.diskmod:
2659 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2660 if mod[0] == constants.DDM_ADD:
2661 if ext_provider is None:
2662 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2663 " '%s' missing, during disk add" %
2665 constants.IDISK_PROVIDER),
2667 elif mod[0] == constants.DDM_MODIFY:
2669 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2671 constants.IDISK_PROVIDER,
2674 for mod in self.diskmod:
2675 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2676 if ext_provider is not None:
2677 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2678 " instances of type '%s'" %
2679 (constants.IDISK_PROVIDER,
2683 if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2684 raise errors.OpPrereqError("Disk operations not supported for"
2685 " diskless instances", errors.ECODE_INVAL)
2687 def _PrepareDiskMod(_, disk, params, __):
2688 disk.name = params.get(constants.IDISK_NAME, None)
2690 # Verify disk changes (operating on a copy)
2691 disks = copy.deepcopy(self.instance.disks)
2692 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2693 _PrepareDiskMod, None)
2694 utils.ValidateDeviceNames("disk", disks)
2695 if len(disks) > constants.MAX_DISKS:
2696 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2697 " more" % constants.MAX_DISKS,
2699 disk_sizes = [disk.size for disk in self.instance.disks]
2700 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2701 self.diskmod if op == constants.DDM_ADD)
2702 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2703 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2705 if self.op.offline is not None and self.op.offline:
2706 CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2707 msg="can't change to offline")
2709 def CheckPrereq(self):
2710 """Check prerequisites.
2712 This only checks the instance list against the existing names.
2715 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2716 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2717 self.cluster = self.cfg.GetClusterInfo()
2719 assert self.instance is not None, \
2720 "Cannot retrieve locked instance %s" % self.op.instance_name
2722 pnode_uuid = self.instance.primary_node
2726 if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2728 # verify that the instance is not up
2729 instance_info = self.rpc.call_instance_info(
2730 pnode_uuid, self.instance.name, self.instance.hypervisor,
2731 self.instance.hvparams)
2732 if instance_info.fail_msg:
2733 self.warn.append("Can't get instance runtime information: %s" %
2734 instance_info.fail_msg)
2735 elif instance_info.payload:
2736 raise errors.OpPrereqError("Instance is still running on %s" %
2737 self.cfg.GetNodeName(pnode_uuid),
2740 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2741 node_uuids = list(self.instance.all_nodes)
2742 pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2744 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2745 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2746 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2748 # dictionary with instance information after the modification
2751 # Prepare NIC modifications
2752 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2755 if self.op.os_name and not self.op.force:
2756 CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2757 self.op.force_variant)
2758 instance_os = self.op.os_name
2760 instance_os = self.instance.os
2762 assert not (self.op.disk_template and self.op.disks), \
2763 "Can't modify disk template and apply disk changes at the same time"
2765 if self.op.disk_template:
2766 self._PreCheckDiskTemplate(pnode_info)
2768 self._PreCheckDisks(ispec)
2770 # hvparams processing
2771 if self.op.hvparams:
2772 hv_type = self.instance.hypervisor
2773 i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2774 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2775 hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2778 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2779 CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2780 self.hv_proposed = self.hv_new = hv_new # the new actual values
2781 self.hv_inst = i_hvdict # the new dict (without defaults)
2783 self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2785 self.instance.hvparams)
2786 self.hv_new = self.hv_inst = {}
2788 # beparams processing
2789 if self.op.beparams:
2790 i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2792 objects.UpgradeBeParams(i_bedict)
2793 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2794 be_new = self.cluster.SimpleFillBE(i_bedict)
2795 self.be_proposed = self.be_new = be_new # the new actual values
2796 self.be_inst = i_bedict # the new dict (without defaults)
2798 self.be_new = self.be_inst = {}
2799 self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2800 be_old = self.cluster.FillBE(self.instance)
2802 # CPU param validation -- checking every time a parameter is
2803 # changed to cover all cases where either CPU mask or vcpus have
2805 if (constants.BE_VCPUS in self.be_proposed and
2806 constants.HV_CPU_MASK in self.hv_proposed):
2808 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2809 # Verify mask is consistent with number of vCPUs. Can skip this
2810 # test if only 1 entry in the CPU mask, which means same mask
2811 # is applied to all vCPUs.
2812 if (len(cpu_list) > 1 and
2813 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2814 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2816 (self.be_proposed[constants.BE_VCPUS],
2817 self.hv_proposed[constants.HV_CPU_MASK]),
2820 # Only perform this test if a new CPU mask is given
2821 if constants.HV_CPU_MASK in self.hv_new:
2822 # Calculate the largest CPU number requested
2823 max_requested_cpu = max(map(max, cpu_list))
2824 # Check that all of the instance's nodes have enough physical CPUs to
2825 # satisfy the requested CPU mask
2826 hvspecs = [(self.instance.hypervisor,
2827 self.cfg.GetClusterInfo()
2828 .hvparams[self.instance.hypervisor])]
2829 _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2830 max_requested_cpu + 1,
2833 # osparams processing
2834 if self.op.osparams:
2835 i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2836 CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2837 self.os_inst = i_osdict # the new dict (without defaults)
2841 #TODO(dynmem): do the appropriate check involving MINMEM
2842 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2843 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2844 mem_check_list = [pnode_uuid]
2845 if be_new[constants.BE_AUTO_BALANCE]:
2846 # either we changed auto_balance to yes or it was from before
2847 mem_check_list.extend(self.instance.secondary_nodes)
2848 instance_info = self.rpc.call_instance_info(
2849 pnode_uuid, self.instance.name, self.instance.hypervisor,
2850 self.instance.hvparams)
2851 hvspecs = [(self.instance.hypervisor,
2852 self.cluster.hvparams[self.instance.hypervisor])]
2853 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2855 pninfo = nodeinfo[pnode_uuid]
2856 msg = pninfo.fail_msg
2858 # Assume the primary node is unreachable and go ahead
2859 self.warn.append("Can't get info from primary node %s: %s" %
2860 (self.cfg.GetNodeName(pnode_uuid), msg))
2862 (_, _, (pnhvinfo, )) = pninfo.payload
2863 if not isinstance(pnhvinfo.get("memory_free", None), int):
2864 self.warn.append("Node data from primary node %s doesn't contain"
2865 " free memory information" %
2866 self.cfg.GetNodeName(pnode_uuid))
2867 elif instance_info.fail_msg:
2868 self.warn.append("Can't get instance runtime information: %s" %
2869 instance_info.fail_msg)
2871 if instance_info.payload:
2872 current_mem = int(instance_info.payload["memory"])
2874 # Assume instance not running
2875 # (there is a slight race condition here, but it's not very
2876 # probable, and we have no other way to check)
2877 # TODO: Describe race condition
2879 #TODO(dynmem): do the appropriate check involving MINMEM
2880 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2881 pnhvinfo["memory_free"])
2883 raise errors.OpPrereqError("This change will prevent the instance"
2884 " from starting, due to %d MB of memory"
2885 " missing on its primary node" %
2886 miss_mem, errors.ECODE_NORES)
2888 if be_new[constants.BE_AUTO_BALANCE]:
2889 for node_uuid, nres in nodeinfo.items():
2890 if node_uuid not in self.instance.secondary_nodes:
2892 nres.Raise("Can't get info from secondary node %s" %
2893 self.cfg.GetNodeName(node_uuid), prereq=True,
2894 ecode=errors.ECODE_STATE)
2895 (_, _, (nhvinfo, )) = nres.payload
2896 if not isinstance(nhvinfo.get("memory_free", None), int):
2897 raise errors.OpPrereqError("Secondary node %s didn't return free"
2898 " memory information" %
2899 self.cfg.GetNodeName(node_uuid),
2901 #TODO(dynmem): do the appropriate check involving MINMEM
2902 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2903 raise errors.OpPrereqError("This change will prevent the instance"
2904 " from failover to its secondary node"
2905 " %s, due to not enough memory" %
2906 self.cfg.GetNodeName(node_uuid),
2909 if self.op.runtime_mem:
2910 remote_info = self.rpc.call_instance_info(
2911 self.instance.primary_node, self.instance.name,
2912 self.instance.hypervisor,
2913 self.cluster.hvparams[self.instance.hypervisor])
2914 remote_info.Raise("Error checking node %s" %
2915 self.cfg.GetNodeName(self.instance.primary_node))
2916 if not remote_info.payload: # not running already
2917 raise errors.OpPrereqError("Instance %s is not running" %
2918 self.instance.name, errors.ECODE_STATE)
2920 current_memory = remote_info.payload["memory"]
2921 if (not self.op.force and
2922 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2923 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2924 raise errors.OpPrereqError("Instance %s must have memory between %d"
2925 " and %d MB of memory unless --force is"
2927 (self.instance.name,
2928 self.be_proposed[constants.BE_MINMEM],
2929 self.be_proposed[constants.BE_MAXMEM]),
2932 delta = self.op.runtime_mem - current_memory
2934 CheckNodeFreeMemory(
2935 self, self.instance.primary_node,
2936 "ballooning memory for instance %s" % self.instance.name, delta,
2937 self.instance.hypervisor,
2938 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
2940 # make self.cluster visible in the functions below
2941 cluster = self.cluster
2943 def _PrepareNicCreate(_, params, private):
2944 self._PrepareNicModification(params, private, None, None,
2945 {}, cluster, pnode_uuid)
2948 def _PrepareNicMod(_, nic, params, private):
2949 self._PrepareNicModification(params, private, nic.ip, nic.network,
2950 nic.nicparams, cluster, pnode_uuid)
2953 def _PrepareNicRemove(_, params, __):
2955 net = params.network
2956 if net is not None and ip is not None:
2957 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2959 # Verify NIC changes (operating on copy)
2960 nics = self.instance.nics[:]
2961 _ApplyContainerMods("NIC", nics, None, self.nicmod,
2962 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2963 if len(nics) > constants.MAX_NICS:
2964 raise errors.OpPrereqError("Instance has too many network interfaces"
2965 " (%d), cannot add more" % constants.MAX_NICS,
2968 # Pre-compute NIC changes (necessary to use result in hooks)
2969 self._nic_chgdesc = []
2971 # Operate on copies as this is still in prereq
2972 nics = [nic.Copy() for nic in self.instance.nics]
2973 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2974 self._CreateNewNic, self._ApplyNicMods, None)
2975 # Verify that NIC names are unique and valid
2976 utils.ValidateDeviceNames("NIC", nics)
2977 self._new_nics = nics
2978 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2980 self._new_nics = None
2981 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
2983 if not self.op.ignore_ipolicy:
2984 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2987 # Fill ispec with backend parameters
2988 ispec[constants.ISPEC_SPINDLE_USE] = \
2989 self.be_new.get(constants.BE_SPINDLE_USE, None)
2990 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2993 # Copy ispec to verify parameters with min/max values separately
2994 if self.op.disk_template:
2995 new_disk_template = self.op.disk_template
2997 new_disk_template = self.instance.disk_template
2998 ispec_max = ispec.copy()
2999 ispec_max[constants.ISPEC_MEM_SIZE] = \
3000 self.be_new.get(constants.BE_MAXMEM, None)
3001 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3003 ispec_min = ispec.copy()
3004 ispec_min[constants.ISPEC_MEM_SIZE] = \
3005 self.be_new.get(constants.BE_MINMEM, None)
3006 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3009 if (res_max or res_min):
3010 # FIXME: Improve error message by including information about whether
3011 # the upper or lower limit of the parameter fails the ipolicy.
3012 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3013 (group_info, group_info.name,
3014 utils.CommaJoin(set(res_max + res_min))))
3015 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3017 def _ConvertPlainToDrbd(self, feedback_fn):
3018 """Converts an instance from plain to drbd.
3021 feedback_fn("Converting template to drbd")
3022 pnode_uuid = self.instance.primary_node
3023 snode_uuid = self.op.remote_node_uuid
3025 assert self.instance.disk_template == constants.DT_PLAIN
3027 # create a fake disk info for _GenerateDiskTemplate
3028 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3029 constants.IDISK_VG: d.logical_id[0],
3030 constants.IDISK_NAME: d.name}
3031 for d in self.instance.disks]
3032 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3033 self.instance.name, pnode_uuid,
3034 [snode_uuid], disk_info, None, None, 0,
3035 feedback_fn, self.diskparams)
3036 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3038 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3039 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3040 info = GetInstanceInfoText(self.instance)
3041 feedback_fn("Creating additional volumes...")
3042 # first, create the missing data and meta devices
3043 for disk in anno_disks:
3044 # unfortunately this is... not too nice
3045 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3046 info, True, p_excl_stor)
3047 for child in disk.children:
3048 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3050 # at this stage, all new LVs have been created, we can rename the
3052 feedback_fn("Renaming original volumes...")
3053 rename_list = [(o, n.children[0].logical_id)
3054 for (o, n) in zip(self.instance.disks, new_disks)]
3055 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3056 result.Raise("Failed to rename original LVs")
3058 feedback_fn("Initializing DRBD devices...")
3059 # all child devices are in place, we can now create the DRBD devices
3061 for disk in anno_disks:
3062 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3063 (snode_uuid, s_excl_stor)]:
3064 f_create = node_uuid == pnode_uuid
3065 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3066 f_create, excl_stor)
3067 except errors.GenericError, e:
3068 feedback_fn("Initializing of DRBD devices failed;"
3069 " renaming back original volumes...")
3070 for disk in new_disks:
3071 self.cfg.SetDiskID(disk, pnode_uuid)
3072 rename_back_list = [(n.children[0], o.logical_id)
3073 for (n, o) in zip(new_disks, self.instance.disks)]
3074 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3075 result.Raise("Failed to rename LVs back after error %s" % str(e))
3078 # at this point, the instance has been modified
3079 self.instance.disk_template = constants.DT_DRBD8
3080 self.instance.disks = new_disks
3081 self.cfg.Update(self.instance, feedback_fn)
3083 # Release node locks while waiting for sync
3084 ReleaseLocks(self, locking.LEVEL_NODE)
3086 # disks are created, waiting for sync
3087 disk_abort = not WaitForSync(self, self.instance,
3088 oneshot=not self.op.wait_for_sync)
3090 raise errors.OpExecError("There are some degraded disks for"
3091 " this instance, please cleanup manually")
3093 # Node resource locks will be released by caller
3095 def _ConvertDrbdToPlain(self, feedback_fn):
3096 """Converts an instance from drbd to plain.
3099 assert len(self.instance.secondary_nodes) == 1
3100 assert self.instance.disk_template == constants.DT_DRBD8
3102 pnode_uuid = self.instance.primary_node
3103 snode_uuid = self.instance.secondary_nodes[0]
3104 feedback_fn("Converting template to plain")
3106 old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3107 new_disks = [d.children[0] for d in self.instance.disks]
3109 # copy over size, mode and name
3110 for parent, child in zip(old_disks, new_disks):
3111 child.size = parent.size
3112 child.mode = parent.mode
3113 child.name = parent.name
3115 # this is a DRBD disk, return its port to the pool
3116 # NOTE: this must be done right before the call to cfg.Update!
3117 for disk in old_disks:
3118 tcp_port = disk.logical_id[2]
3119 self.cfg.AddTcpUdpPort(tcp_port)
3121 # update instance structure
3122 self.instance.disks = new_disks
3123 self.instance.disk_template = constants.DT_PLAIN
3124 _UpdateIvNames(0, self.instance.disks)
3125 self.cfg.Update(self.instance, feedback_fn)
3127 # Release locks in case removing disks takes a while
3128 ReleaseLocks(self, locking.LEVEL_NODE)
3130 feedback_fn("Removing volumes on the secondary node...")
3131 for disk in old_disks:
3132 self.cfg.SetDiskID(disk, snode_uuid)
3133 msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3135 self.LogWarning("Could not remove block device %s on node %s,"
3136 " continuing anyway: %s", disk.iv_name,
3137 self.cfg.GetNodeName(snode_uuid), msg)
3139 feedback_fn("Removing unneeded volumes on the primary node...")
3140 for idx, disk in enumerate(old_disks):
3141 meta = disk.children[1]
3142 self.cfg.SetDiskID(meta, pnode_uuid)
3143 msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3145 self.LogWarning("Could not remove metadata for disk %d on node %s,"
3146 " continuing anyway: %s", idx,
3147 self.cfg.GetNodeName(pnode_uuid), msg)
3149 def _CreateNewDisk(self, idx, params, _):
3150 """Creates a new disk.
3154 if self.instance.disk_template in constants.DTS_FILEBASED:
3155 (file_driver, file_path) = self.instance.disks[0].logical_id
3156 file_path = os.path.dirname(file_path)
3158 file_driver = file_path = None
3161 GenerateDiskTemplate(self, self.instance.disk_template,
3162 self.instance.name, self.instance.primary_node,
3163 self.instance.secondary_nodes, [params], file_path,
3164 file_driver, idx, self.Log, self.diskparams)[0]
3166 new_disks = CreateDisks(self, self.instance, disks=[disk])
3168 if self.cluster.prealloc_wipe_disks:
3170 WipeOrCleanupDisks(self, self.instance,
3171 disks=[(idx, disk, 0)],
3175 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3179 def _ModifyDisk(idx, disk, params, _):
3184 mode = params.get(constants.IDISK_MODE, None)
3187 changes.append(("disk.mode/%d" % idx, disk.mode))
3189 name = params.get(constants.IDISK_NAME, None)
3191 changes.append(("disk.name/%d" % idx, disk.name))
3195 def _RemoveDisk(self, idx, root, _):
3199 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3200 for node_uuid, disk in anno_disk.ComputeNodeTree(
3201 self.instance.primary_node):
3202 self.cfg.SetDiskID(disk, node_uuid)
3203 msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3205 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3206 " continuing anyway", idx,
3207 self.cfg.GetNodeName(node_uuid), msg)
3209 # if this is a DRBD disk, return its port to the pool
3210 if root.dev_type in constants.LDS_DRBD:
3211 self.cfg.AddTcpUdpPort(root.logical_id[2])
3213 def _CreateNewNic(self, idx, params, private):
3214 """Creates data structure for a new network interface.
3217 mac = params[constants.INIC_MAC]
3218 ip = params.get(constants.INIC_IP, None)
3219 net = params.get(constants.INIC_NETWORK, None)
3220 name = params.get(constants.INIC_NAME, None)
3221 net_uuid = self.cfg.LookupNetwork(net)
3222 #TODO: not private.filled?? can a nic have no nicparams??
3223 nicparams = private.filled
3224 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3225 nicparams=nicparams)
3226 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3230 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3231 (mac, ip, private.filled[constants.NIC_MODE],
3232 private.filled[constants.NIC_LINK],
3236 def _ApplyNicMods(self, idx, nic, params, private):
3237 """Modifies a network interface.
3242 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3244 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3245 setattr(nic, key, params[key])
3247 new_net = params.get(constants.INIC_NETWORK, nic.network)
3248 new_net_uuid = self.cfg.LookupNetwork(new_net)
3249 if new_net_uuid != nic.network:
3250 changes.append(("nic.network/%d" % idx, new_net))
3251 nic.network = new_net_uuid
3254 nic.nicparams = private.filled
3256 for (key, val) in nic.nicparams.items():
3257 changes.append(("nic.%s/%d" % (key, idx), val))
3261 def Exec(self, feedback_fn):
3262 """Modifies an instance.
3264 All parameters take effect only at the next restart of the instance.
3267 # Process here the warnings from CheckPrereq, as we don't have a
3268 # feedback_fn there.
3269 # TODO: Replace with self.LogWarning
3270 for warn in self.warn:
3271 feedback_fn("WARNING: %s" % warn)
3273 assert ((self.op.disk_template is None) ^
3274 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3275 "Not owning any node resource locks"
3280 if self.op.pnode_uuid:
3281 self.instance.primary_node = self.op.pnode_uuid
3284 if self.op.runtime_mem:
3285 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3287 self.op.runtime_mem)
3288 rpcres.Raise("Cannot modify instance runtime memory")
3289 result.append(("runtime_memory", self.op.runtime_mem))
3291 # Apply disk changes
3292 _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3293 self._CreateNewDisk, self._ModifyDisk,
3295 _UpdateIvNames(0, self.instance.disks)
3297 if self.op.disk_template:
3299 check_nodes = set(self.instance.all_nodes)
3300 if self.op.remote_node_uuid:
3301 check_nodes.add(self.op.remote_node_uuid)
3302 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3303 owned = self.owned_locks(level)
3304 assert not (check_nodes - owned), \
3305 ("Not owning the correct locks, owning %r, expected at least %r" %
3306 (owned, check_nodes))
3308 r_shut = ShutdownInstanceDisks(self, self.instance)
3310 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3311 " proceed with disk template conversion")
3312 mode = (self.instance.disk_template, self.op.disk_template)
3314 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3316 self.cfg.ReleaseDRBDMinors(self.instance.name)
3318 result.append(("disk_template", self.op.disk_template))
3320 assert self.instance.disk_template == self.op.disk_template, \
3321 ("Expected disk template '%s', found '%s'" %
3322 (self.op.disk_template, self.instance.disk_template))
3324 # Release node and resource locks if there are any (they might already have
3325 # been released during disk conversion)
3326 ReleaseLocks(self, locking.LEVEL_NODE)
3327 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3330 if self._new_nics is not None:
3331 self.instance.nics = self._new_nics
3332 result.extend(self._nic_chgdesc)
3335 if self.op.hvparams:
3336 self.instance.hvparams = self.hv_inst
3337 for key, val in self.op.hvparams.iteritems():
3338 result.append(("hv/%s" % key, val))
3341 if self.op.beparams:
3342 self.instance.beparams = self.be_inst
3343 for key, val in self.op.beparams.iteritems():
3344 result.append(("be/%s" % key, val))
3348 self.instance.os = self.op.os_name
3351 if self.op.osparams:
3352 self.instance.osparams = self.os_inst
3353 for key, val in self.op.osparams.iteritems():
3354 result.append(("os/%s" % key, val))
3356 if self.op.offline is None:
3359 elif self.op.offline:
3360 # Mark instance as offline
3361 self.cfg.MarkInstanceOffline(self.instance.name)
3362 result.append(("admin_state", constants.ADMINST_OFFLINE))
3364 # Mark instance as online, but stopped
3365 self.cfg.MarkInstanceDown(self.instance.name)
3366 result.append(("admin_state", constants.ADMINST_DOWN))
3368 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3370 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3371 self.owned_locks(locking.LEVEL_NODE)), \
3372 "All node locks should have been released by now"
3376 _DISK_CONVERSIONS = {
3377 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3378 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3382 class LUInstanceChangeGroup(LogicalUnit):
3383 HPATH = "instance-change-group"
3384 HTYPE = constants.HTYPE_INSTANCE
3387 def ExpandNames(self):
3388 self.share_locks = ShareAll()
3390 self.needed_locks = {
3391 locking.LEVEL_NODEGROUP: [],
3392 locking.LEVEL_NODE: [],
3393 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3396 self._ExpandAndLockInstance()
3398 if self.op.target_groups:
3399 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3400 self.op.target_groups)
3402 self.req_target_uuids = None
3404 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3406 def DeclareLocks(self, level):
3407 if level == locking.LEVEL_NODEGROUP:
3408 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3410 if self.req_target_uuids:
3411 lock_groups = set(self.req_target_uuids)
3413 # Lock all groups used by instance optimistically; this requires going
3414 # via the node before it's locked, requiring verification later on
3415 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3416 lock_groups.update(instance_groups)
3418 # No target groups, need to lock all of them
3419 lock_groups = locking.ALL_SET
3421 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3423 elif level == locking.LEVEL_NODE:
3424 if self.req_target_uuids:
3425 # Lock all nodes used by instances
3426 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3427 self._LockInstancesNodes()
3429 # Lock all nodes in all potential target groups
3430 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3431 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3432 member_nodes = [node_uuid
3433 for group in lock_groups
3434 for node_uuid in self.cfg.GetNodeGroup(group).members]
3435 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3437 # Lock all nodes as all groups are potential targets
3438 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3440 def CheckPrereq(self):
3441 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3442 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3443 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3445 assert (self.req_target_uuids is None or
3446 owned_groups.issuperset(self.req_target_uuids))
3447 assert owned_instances == set([self.op.instance_name])
3449 # Get instance information
3450 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3452 # Check if node groups for locked instance are still correct
3453 assert owned_nodes.issuperset(self.instance.all_nodes), \
3454 ("Instance %s's nodes changed while we kept the lock" %
3455 self.op.instance_name)
3457 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3460 if self.req_target_uuids:
3461 # User requested specific target groups
3462 self.target_uuids = frozenset(self.req_target_uuids)
3464 # All groups except those used by the instance are potential targets
3465 self.target_uuids = owned_groups - inst_groups
3467 conflicting_groups = self.target_uuids & inst_groups
3468 if conflicting_groups:
3469 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3470 " used by the instance '%s'" %
3471 (utils.CommaJoin(conflicting_groups),
3472 self.op.instance_name),
3475 if not self.target_uuids:
3476 raise errors.OpPrereqError("There are no possible target groups",
3479 def BuildHooksEnv(self):
3483 assert self.target_uuids
3486 "TARGET_GROUPS": " ".join(self.target_uuids),
3489 env.update(BuildInstanceHookEnvByObject(self, self.instance))
3493 def BuildHooksNodes(self):
3494 """Build hooks nodes.
3497 mn = self.cfg.GetMasterNode()
3500 def Exec(self, feedback_fn):
3501 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3503 assert instances == [self.op.instance_name], "Instance not locked"
3505 req = iallocator.IAReqGroupChange(instances=instances,
3506 target_groups=list(self.target_uuids))
3507 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3509 ial.Run(self.op.iallocator)
3512 raise errors.OpPrereqError("Can't compute solution for changing group of"
3513 " instance '%s' using iallocator '%s': %s" %
3514 (self.op.instance_name, self.op.iallocator,
3515 ial.info), errors.ECODE_NORES)
3517 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3519 self.LogInfo("Iallocator returned %s job(s) for changing group of"
3520 " instance '%s'", len(jobs), self.op.instance_name)
3522 return ResultWithJobs(jobs)