4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Logical units dealing with instances."""
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58 CheckSpindlesExclusiveStorage
59 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66 import ganeti.masterd.instance
69 #: Type description for changes as returned by L{_ApplyContainerMods}'s
71 _TApplyContModsCbChanges = \
72 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
78 def _CheckHostnameSane(lu, name):
79 """Ensures that a given hostname resolves to a 'sane' name.
81 The given name is required to be a prefix of the resolved hostname,
82 to prevent accidental mismatches.
84 @param lu: the logical unit on behalf of which we're checking
85 @param name: the name we should resolve and check
86 @return: the resolved hostname object
89 hostname = netutils.GetHostname(name=name)
90 if hostname.name != name:
91 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92 if not utils.MatchNameComponent(name, [hostname.name]):
93 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94 " same as given hostname '%s'") %
95 (hostname.name, name), errors.ECODE_INVAL)
99 def _CheckOpportunisticLocking(op):
100 """Generate error if opportunistic locking is not possible.
103 if op.opportunistic_locking and not op.iallocator:
104 raise errors.OpPrereqError("Opportunistic locking is only available in"
105 " combination with an instance allocator",
109 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
110 """Wrapper around IAReqInstanceAlloc.
112 @param op: The instance opcode
113 @param disks: The computed disks
114 @param nics: The computed nics
115 @param beparams: The full filled beparams
116 @param node_whitelist: List of nodes which should appear as online to the
117 allocator (unless the node is already marked offline)
119 @returns: A filled L{iallocator.IAReqInstanceAlloc}
122 spindle_use = beparams[constants.BE_SPINDLE_USE]
123 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124 disk_template=op.disk_template,
127 vcpus=beparams[constants.BE_VCPUS],
128 memory=beparams[constants.BE_MAXMEM],
129 spindle_use=spindle_use,
131 nics=[n.ToDict() for n in nics],
132 hypervisor=op.hypervisor,
133 node_whitelist=node_whitelist)
136 def _ComputeFullBeParams(op, cluster):
137 """Computes the full beparams.
139 @param op: The instance opcode
140 @param cluster: The cluster config object
142 @return: The fully filled beparams
145 default_beparams = cluster.beparams[constants.PP_DEFAULT]
146 for param, value in op.beparams.iteritems():
147 if value == constants.VALUE_AUTO:
148 op.beparams[param] = default_beparams[param]
149 objects.UpgradeBeParams(op.beparams)
150 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151 return cluster.SimpleFillBE(op.beparams)
154 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155 """Computes the nics.
157 @param op: The instance opcode
158 @param cluster: Cluster configuration object
159 @param default_ip: The default ip to assign
160 @param cfg: An instance of the configuration object
161 @param ec_id: Execution context ID
163 @returns: The build up nics
168 nic_mode_req = nic.get(constants.INIC_MODE, None)
169 nic_mode = nic_mode_req
170 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173 net = nic.get(constants.INIC_NETWORK, None)
174 link = nic.get(constants.NIC_LINK, None)
175 ip = nic.get(constants.INIC_IP, None)
177 if net is None or net.lower() == constants.VALUE_NONE:
180 if nic_mode_req is not None or link is not None:
181 raise errors.OpPrereqError("If network is given, no mode or link"
182 " is allowed to be passed",
186 if ip is None or ip.lower() == constants.VALUE_NONE:
188 elif ip.lower() == constants.VALUE_AUTO:
189 if not op.name_check:
190 raise errors.OpPrereqError("IP address set to auto but name checks"
191 " have been skipped",
195 # We defer pool operations until later, so that the iallocator has
196 # filled in the instance's node(s) dimara
197 if ip.lower() == constants.NIC_IP_POOL:
199 raise errors.OpPrereqError("if ip=pool, parameter network"
200 " must be passed too",
203 elif not netutils.IPAddress.IsValid(ip):
204 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
209 # TODO: check the ip address for uniqueness
210 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
211 raise errors.OpPrereqError("Routed nic mode requires an ip address",
214 # MAC address verification
215 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
216 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
217 mac = utils.NormalizeAndValidateMac(mac)
220 # TODO: We need to factor this out
221 cfg.ReserveMAC(mac, ec_id)
222 except errors.ReservationError:
223 raise errors.OpPrereqError("MAC address %s already in use"
225 errors.ECODE_NOTUNIQUE)
227 # Build nic parameters
230 nicparams[constants.NIC_MODE] = nic_mode
232 nicparams[constants.NIC_LINK] = link
234 check_params = cluster.SimpleFillNIC(nicparams)
235 objects.NIC.CheckParameterSyntax(check_params)
236 net_uuid = cfg.LookupNetwork(net)
237 name = nic.get(constants.INIC_NAME, None)
238 if name is not None and name.lower() == constants.VALUE_NONE:
240 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
241 network=net_uuid, nicparams=nicparams)
242 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
248 def _CheckForConflictingIp(lu, ip, node):
249 """In case of conflicting IP address raise error.
252 @param ip: IP address
254 @param node: node name
257 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
258 if conf_net is not None:
259 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
260 " network %s, but the target NIC does not." %
267 def _ComputeIPolicyInstanceSpecViolation(
268 ipolicy, instance_spec, disk_template,
269 _compute_fn=ComputeIPolicySpecViolation):
270 """Compute if instance specs meets the specs of ipolicy.
273 @param ipolicy: The ipolicy to verify against
274 @param instance_spec: dict
275 @param instance_spec: The instance spec to verify
276 @type disk_template: string
277 @param disk_template: the disk template of the instance
278 @param _compute_fn: The function to verify ipolicy (unittest only)
279 @see: L{ComputeIPolicySpecViolation}
282 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
283 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
284 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
285 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
286 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
287 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
289 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
290 disk_sizes, spindle_use, disk_template)
293 def _CheckOSVariant(os_obj, name):
294 """Check whether an OS name conforms to the os variants specification.
296 @type os_obj: L{objects.OS}
297 @param os_obj: OS object to check
299 @param name: OS name passed by the user, to check for validity
302 variant = objects.OS.GetVariant(name)
303 if not os_obj.supported_variants:
305 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
306 " passed)" % (os_obj.name, variant),
310 raise errors.OpPrereqError("OS name must include a variant",
313 if variant not in os_obj.supported_variants:
314 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
317 class LUInstanceCreate(LogicalUnit):
318 """Create an instance.
321 HPATH = "instance-add"
322 HTYPE = constants.HTYPE_INSTANCE
325 def CheckArguments(self):
329 # do not require name_check to ease forward/backward compatibility
331 if self.op.no_install and self.op.start:
332 self.LogInfo("No-installation mode selected, disabling startup")
333 self.op.start = False
334 # validate/normalize the instance name
335 self.op.instance_name = \
336 netutils.Hostname.GetNormalizedName(self.op.instance_name)
338 if self.op.ip_check and not self.op.name_check:
339 # TODO: make the ip check more flexible and not depend on the name check
340 raise errors.OpPrereqError("Cannot do IP address check without a name"
341 " check", errors.ECODE_INVAL)
343 # check nics' parameter names
344 for nic in self.op.nics:
345 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
346 # check that NIC's parameters names are unique and valid
347 utils.ValidateDeviceNames("NIC", self.op.nics)
349 # check that disk's names are unique and valid
350 utils.ValidateDeviceNames("disk", self.op.disks)
352 cluster = self.cfg.GetClusterInfo()
353 if not self.op.disk_template in cluster.enabled_disk_templates:
354 raise errors.OpPrereqError("Cannot create an instance with disk template"
355 " '%s', because it is not enabled in the"
356 " cluster. Enabled disk templates are: %s." %
357 (self.op.disk_template,
358 ",".join(cluster.enabled_disk_templates)))
360 # check disks. parameter names and consistent adopt/no-adopt strategy
361 has_adopt = has_no_adopt = False
362 for disk in self.op.disks:
363 if self.op.disk_template != constants.DT_EXT:
364 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
365 if constants.IDISK_ADOPT in disk:
369 if has_adopt and has_no_adopt:
370 raise errors.OpPrereqError("Either all disks are adopted or none is",
373 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
374 raise errors.OpPrereqError("Disk adoption is not supported for the"
375 " '%s' disk template" %
376 self.op.disk_template,
378 if self.op.iallocator is not None:
379 raise errors.OpPrereqError("Disk adoption not allowed with an"
380 " iallocator script", errors.ECODE_INVAL)
381 if self.op.mode == constants.INSTANCE_IMPORT:
382 raise errors.OpPrereqError("Disk adoption not allowed for"
383 " instance import", errors.ECODE_INVAL)
385 if self.op.disk_template in constants.DTS_MUST_ADOPT:
386 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
387 " but no 'adopt' parameter given" %
388 self.op.disk_template,
391 self.adopt_disks = has_adopt
393 # instance name verification
394 if self.op.name_check:
395 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
396 self.op.instance_name = self.hostname1.name
397 # used in CheckPrereq for ip ping check
398 self.check_ip = self.hostname1.ip
402 # file storage checks
403 if (self.op.file_driver and
404 not self.op.file_driver in constants.FILE_DRIVER):
405 raise errors.OpPrereqError("Invalid file driver name '%s'" %
406 self.op.file_driver, errors.ECODE_INVAL)
408 if self.op.disk_template == constants.DT_FILE:
409 opcodes.RequireFileStorage()
410 elif self.op.disk_template == constants.DT_SHARED_FILE:
411 opcodes.RequireSharedFileStorage()
413 ### Node/iallocator related checks
414 CheckIAllocatorOrNode(self, "iallocator", "pnode")
416 if self.op.pnode is not None:
417 if self.op.disk_template in constants.DTS_INT_MIRROR:
418 if self.op.snode is None:
419 raise errors.OpPrereqError("The networked disk templates need"
420 " a mirror node", errors.ECODE_INVAL)
422 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
426 _CheckOpportunisticLocking(self.op)
428 self._cds = GetClusterDomainSecret()
430 if self.op.mode == constants.INSTANCE_IMPORT:
431 # On import force_variant must be True, because if we forced it at
432 # initial install, our only chance when importing it back is that it
434 self.op.force_variant = True
436 if self.op.no_install:
437 self.LogInfo("No-installation mode has no effect during import")
439 elif self.op.mode == constants.INSTANCE_CREATE:
440 if self.op.os_type is None:
441 raise errors.OpPrereqError("No guest OS specified",
443 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
444 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
445 " installation" % self.op.os_type,
447 if self.op.disk_template is None:
448 raise errors.OpPrereqError("No disk template specified",
451 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
452 # Check handshake to ensure both clusters have the same domain secret
453 src_handshake = self.op.source_handshake
454 if not src_handshake:
455 raise errors.OpPrereqError("Missing source handshake",
458 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
461 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
464 # Load and check source CA
465 self.source_x509_ca_pem = self.op.source_x509_ca
466 if not self.source_x509_ca_pem:
467 raise errors.OpPrereqError("Missing source X509 CA",
471 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
473 except OpenSSL.crypto.Error, err:
474 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
475 (err, ), errors.ECODE_INVAL)
477 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
478 if errcode is not None:
479 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
482 self.source_x509_ca = cert
484 src_instance_name = self.op.source_instance_name
485 if not src_instance_name:
486 raise errors.OpPrereqError("Missing source instance name",
489 self.source_instance_name = \
490 netutils.GetHostname(name=src_instance_name).name
493 raise errors.OpPrereqError("Invalid instance creation mode %r" %
494 self.op.mode, errors.ECODE_INVAL)
496 def ExpandNames(self):
497 """ExpandNames for CreateInstance.
499 Figure out the right locks for instance creation.
502 self.needed_locks = {}
504 instance_name = self.op.instance_name
505 # this is just a preventive check, but someone might still add this
506 # instance in the meantime, and creation will fail at lock-add time
507 if instance_name in self.cfg.GetInstanceList():
508 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
509 instance_name, errors.ECODE_EXISTS)
511 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
513 if self.op.iallocator:
514 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
515 # specifying a group on instance creation and then selecting nodes from
517 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
518 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
520 if self.op.opportunistic_locking:
521 self.opportunistic_locks[locking.LEVEL_NODE] = True
522 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
524 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
525 nodelist = [self.op.pnode]
526 if self.op.snode is not None:
527 self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
528 nodelist.append(self.op.snode)
529 self.needed_locks[locking.LEVEL_NODE] = nodelist
531 # in case of import lock the source node too
532 if self.op.mode == constants.INSTANCE_IMPORT:
533 src_node = self.op.src_node
534 src_path = self.op.src_path
537 self.op.src_path = src_path = self.op.instance_name
540 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
541 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
542 self.op.src_node = None
543 if os.path.isabs(src_path):
544 raise errors.OpPrereqError("Importing an instance from a path"
545 " requires a source node option",
548 self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
549 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
550 self.needed_locks[locking.LEVEL_NODE].append(src_node)
551 if not os.path.isabs(src_path):
552 self.op.src_path = src_path = \
553 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
555 self.needed_locks[locking.LEVEL_NODE_RES] = \
556 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
558 def _RunAllocator(self):
559 """Run the allocator based on input opcode.
562 if self.op.opportunistic_locking:
563 # Only consider nodes for which a lock is held
564 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
566 node_whitelist = None
568 #TODO Export network to iallocator so that it chooses a pnode
569 # in a nodegroup that has the desired network connected to
570 req = _CreateInstanceAllocRequest(self.op, self.disks,
571 self.nics, self.be_full,
573 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
575 ial.Run(self.op.iallocator)
578 # When opportunistic locks are used only a temporary failure is generated
579 if self.op.opportunistic_locking:
580 ecode = errors.ECODE_TEMP_NORES
582 ecode = errors.ECODE_NORES
584 raise errors.OpPrereqError("Can't compute nodes using"
585 " iallocator '%s': %s" %
586 (self.op.iallocator, ial.info),
589 self.op.pnode = ial.result[0]
590 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
591 self.op.instance_name, self.op.iallocator,
592 utils.CommaJoin(ial.result))
594 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
596 if req.RequiredNodes() == 2:
597 self.op.snode = ial.result[1]
599 def BuildHooksEnv(self):
602 This runs on master, primary and secondary nodes of the instance.
606 "ADD_MODE": self.op.mode,
608 if self.op.mode == constants.INSTANCE_IMPORT:
609 env["SRC_NODE"] = self.op.src_node
610 env["SRC_PATH"] = self.op.src_path
611 env["SRC_IMAGES"] = self.src_images
613 env.update(BuildInstanceHookEnv(
614 name=self.op.instance_name,
615 primary_node=self.op.pnode,
616 secondary_nodes=self.secondaries,
617 status=self.op.start,
618 os_type=self.op.os_type,
619 minmem=self.be_full[constants.BE_MINMEM],
620 maxmem=self.be_full[constants.BE_MAXMEM],
621 vcpus=self.be_full[constants.BE_VCPUS],
622 nics=NICListToTuple(self, self.nics),
623 disk_template=self.op.disk_template,
624 disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
625 d[constants.IDISK_MODE]) for d in self.disks],
628 hypervisor_name=self.op.hypervisor,
634 def BuildHooksNodes(self):
635 """Build hooks nodes.
638 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
641 def _ReadExportInfo(self):
642 """Reads the export information from disk.
644 It will override the opcode source node and path with the actual
645 information, if these two were not specified before.
647 @return: the export information
650 assert self.op.mode == constants.INSTANCE_IMPORT
652 src_node = self.op.src_node
653 src_path = self.op.src_path
656 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
657 exp_list = self.rpc.call_export_list(locked_nodes)
659 for node in exp_list:
660 if exp_list[node].fail_msg:
662 if src_path in exp_list[node].payload:
664 self.op.src_node = src_node = node
665 self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
669 raise errors.OpPrereqError("No export found for relative path %s" %
670 src_path, errors.ECODE_INVAL)
672 CheckNodeOnline(self, src_node)
673 result = self.rpc.call_export_info(src_node, src_path)
674 result.Raise("No export or invalid export found in dir %s" % src_path)
676 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
677 if not export_info.has_section(constants.INISECT_EXP):
678 raise errors.ProgrammerError("Corrupted export config",
679 errors.ECODE_ENVIRON)
681 ei_version = export_info.get(constants.INISECT_EXP, "version")
682 if (int(ei_version) != constants.EXPORT_VERSION):
683 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
684 (ei_version, constants.EXPORT_VERSION),
685 errors.ECODE_ENVIRON)
688 def _ReadExportParams(self, einfo):
689 """Use export parameters as defaults.
691 In case the opcode doesn't specify (as in override) some instance
692 parameters, then try to use them from the export information, if
696 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
698 if self.op.disk_template is None:
699 if einfo.has_option(constants.INISECT_INS, "disk_template"):
700 self.op.disk_template = einfo.get(constants.INISECT_INS,
702 if self.op.disk_template not in constants.DISK_TEMPLATES:
703 raise errors.OpPrereqError("Disk template specified in configuration"
704 " file is not one of the allowed values:"
706 " ".join(constants.DISK_TEMPLATES),
709 raise errors.OpPrereqError("No disk template specified and the export"
710 " is missing the disk_template information",
713 if not self.op.disks:
715 # TODO: import the disk iv_name too
716 for idx in range(constants.MAX_DISKS):
717 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
718 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
719 disks.append({constants.IDISK_SIZE: disk_sz})
720 self.op.disks = disks
721 if not disks and self.op.disk_template != constants.DT_DISKLESS:
722 raise errors.OpPrereqError("No disk info specified and the export"
723 " is missing the disk information",
728 for idx in range(constants.MAX_NICS):
729 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
731 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
732 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
739 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
740 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
742 if (self.op.hypervisor is None and
743 einfo.has_option(constants.INISECT_INS, "hypervisor")):
744 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
746 if einfo.has_section(constants.INISECT_HYP):
747 # use the export parameters but do not override the ones
748 # specified by the user
749 for name, value in einfo.items(constants.INISECT_HYP):
750 if name not in self.op.hvparams:
751 self.op.hvparams[name] = value
753 if einfo.has_section(constants.INISECT_BEP):
754 # use the parameters, without overriding
755 for name, value in einfo.items(constants.INISECT_BEP):
756 if name not in self.op.beparams:
757 self.op.beparams[name] = value
758 # Compatibility for the old "memory" be param
759 if name == constants.BE_MEMORY:
760 if constants.BE_MAXMEM not in self.op.beparams:
761 self.op.beparams[constants.BE_MAXMEM] = value
762 if constants.BE_MINMEM not in self.op.beparams:
763 self.op.beparams[constants.BE_MINMEM] = value
765 # try to read the parameters old style, from the main section
766 for name in constants.BES_PARAMETERS:
767 if (name not in self.op.beparams and
768 einfo.has_option(constants.INISECT_INS, name)):
769 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
771 if einfo.has_section(constants.INISECT_OSP):
772 # use the parameters, without overriding
773 for name, value in einfo.items(constants.INISECT_OSP):
774 if name not in self.op.osparams:
775 self.op.osparams[name] = value
777 def _RevertToDefaults(self, cluster):
778 """Revert the instance parameters to the default values.
782 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
783 for name in self.op.hvparams.keys():
784 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
785 del self.op.hvparams[name]
787 be_defs = cluster.SimpleFillBE({})
788 for name in self.op.beparams.keys():
789 if name in be_defs and be_defs[name] == self.op.beparams[name]:
790 del self.op.beparams[name]
792 nic_defs = cluster.SimpleFillNIC({})
793 for nic in self.op.nics:
794 for name in constants.NICS_PARAMETERS:
795 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
798 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
799 for name in self.op.osparams.keys():
800 if name in os_defs and os_defs[name] == self.op.osparams[name]:
801 del self.op.osparams[name]
803 def _CalculateFileStorageDir(self):
804 """Calculate final instance file storage dir.
807 # file storage dir calculation/check
808 self.instance_file_storage_dir = None
809 if self.op.disk_template in constants.DTS_FILEBASED:
810 # build the full file storage dir path
813 if self.op.disk_template == constants.DT_SHARED_FILE:
814 get_fsd_fn = self.cfg.GetSharedFileStorageDir
816 get_fsd_fn = self.cfg.GetFileStorageDir
818 cfg_storagedir = get_fsd_fn()
819 if not cfg_storagedir:
820 raise errors.OpPrereqError("Cluster file storage dir not defined",
822 joinargs.append(cfg_storagedir)
824 if self.op.file_storage_dir is not None:
825 joinargs.append(self.op.file_storage_dir)
827 joinargs.append(self.op.instance_name)
829 # pylint: disable=W0142
830 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
832 def CheckPrereq(self): # pylint: disable=R0914
833 """Check prerequisites.
836 self._CalculateFileStorageDir()
838 if self.op.mode == constants.INSTANCE_IMPORT:
839 export_info = self._ReadExportInfo()
840 self._ReadExportParams(export_info)
841 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
843 self._old_instance_name = None
845 if (not self.cfg.GetVGName() and
846 self.op.disk_template not in constants.DTS_NOT_LVM):
847 raise errors.OpPrereqError("Cluster does not support lvm-based"
848 " instances", errors.ECODE_STATE)
850 if (self.op.hypervisor is None or
851 self.op.hypervisor == constants.VALUE_AUTO):
852 self.op.hypervisor = self.cfg.GetHypervisorType()
854 cluster = self.cfg.GetClusterInfo()
855 enabled_hvs = cluster.enabled_hypervisors
856 if self.op.hypervisor not in enabled_hvs:
857 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
859 (self.op.hypervisor, ",".join(enabled_hvs)),
863 for tag in self.op.tags:
864 objects.TaggableObject.ValidateTag(tag)
866 # check hypervisor parameter syntax (locally)
867 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
868 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
870 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
871 hv_type.CheckParameterSyntax(filled_hvp)
872 self.hv_full = filled_hvp
873 # check that we don't specify global parameters on an instance
874 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
875 "instance", "cluster")
877 # fill and remember the beparams dict
878 self.be_full = _ComputeFullBeParams(self.op, cluster)
880 # build os parameters
881 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
883 # now that hvp/bep are in final format, let's reset to defaults,
885 if self.op.identify_defaults:
886 self._RevertToDefaults(cluster)
889 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
892 # disk checks/pre-build
893 default_vg = self.cfg.GetVGName()
894 self.disks = ComputeDisks(self.op, default_vg)
896 if self.op.mode == constants.INSTANCE_IMPORT:
898 for idx in range(len(self.disks)):
899 option = "disk%d_dump" % idx
900 if export_info.has_option(constants.INISECT_INS, option):
901 # FIXME: are the old os-es, disk sizes, etc. useful?
902 export_name = export_info.get(constants.INISECT_INS, option)
903 image = utils.PathJoin(self.op.src_path, export_name)
904 disk_images.append(image)
906 disk_images.append(False)
908 self.src_images = disk_images
910 if self.op.instance_name == self._old_instance_name:
911 for idx, nic in enumerate(self.nics):
912 if nic.mac == constants.VALUE_AUTO:
913 nic_mac_ini = "nic%d_mac" % idx
914 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
916 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
918 # ip ping checks (we use the same ip that was resolved in ExpandNames)
920 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
921 raise errors.OpPrereqError("IP %s of instance %s already in use" %
922 (self.check_ip, self.op.instance_name),
923 errors.ECODE_NOTUNIQUE)
925 #### mac address generation
926 # By generating here the mac address both the allocator and the hooks get
927 # the real final mac address rather than the 'auto' or 'generate' value.
928 # There is a race condition between the generation and the instance object
929 # creation, which means that we know the mac is valid now, but we're not
930 # sure it will be when we actually add the instance. If things go bad
931 # adding the instance will abort because of a duplicate mac, and the
932 # creation job will fail.
933 for nic in self.nics:
934 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
935 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
939 if self.op.iallocator is not None:
942 # Release all unneeded node locks
943 keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
944 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
945 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
946 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
948 assert (self.owned_locks(locking.LEVEL_NODE) ==
949 self.owned_locks(locking.LEVEL_NODE_RES)), \
950 "Node locks differ from node resource locks"
952 #### node related checks
955 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
956 assert self.pnode is not None, \
957 "Cannot retrieve locked node %s" % self.op.pnode
959 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
960 pnode.name, errors.ECODE_STATE)
962 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
963 pnode.name, errors.ECODE_STATE)
964 if not pnode.vm_capable:
965 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
966 " '%s'" % pnode.name, errors.ECODE_STATE)
968 self.secondaries = []
970 # Fill in any IPs from IP pools. This must happen here, because we need to
971 # know the nic's primary node, as specified by the iallocator
972 for idx, nic in enumerate(self.nics):
973 net_uuid = nic.network
974 if net_uuid is not None:
975 nobj = self.cfg.GetNetwork(net_uuid)
976 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
977 if netparams is None:
978 raise errors.OpPrereqError("No netparams found for network"
979 " %s. Propably not connected to"
980 " node's %s nodegroup" %
981 (nobj.name, self.pnode.name),
983 self.LogInfo("NIC/%d inherits netparams %s" %
984 (idx, netparams.values()))
985 nic.nicparams = dict(netparams)
986 if nic.ip is not None:
987 if nic.ip.lower() == constants.NIC_IP_POOL:
989 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
990 except errors.ReservationError:
991 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
992 " from the address pool" % idx,
994 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
997 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
998 except errors.ReservationError:
999 raise errors.OpPrereqError("IP address %s already in use"
1000 " or does not belong to network %s" %
1001 (nic.ip, nobj.name),
1002 errors.ECODE_NOTUNIQUE)
1004 # net is None, ip None or given
1005 elif self.op.conflicts_check:
1006 _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1008 # mirror node verification
1009 if self.op.disk_template in constants.DTS_INT_MIRROR:
1010 if self.op.snode == pnode.name:
1011 raise errors.OpPrereqError("The secondary node cannot be the"
1012 " primary node", errors.ECODE_INVAL)
1013 CheckNodeOnline(self, self.op.snode)
1014 CheckNodeNotDrained(self, self.op.snode)
1015 CheckNodeVmCapable(self, self.op.snode)
1016 self.secondaries.append(self.op.snode)
1018 snode = self.cfg.GetNodeInfo(self.op.snode)
1019 if pnode.group != snode.group:
1020 self.LogWarning("The primary and secondary nodes are in two"
1021 " different node groups; the disk parameters"
1022 " from the first disk's node group will be"
1026 if self.op.disk_template in constants.DTS_INT_MIRROR:
1028 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1029 excl_stor = compat.any(map(has_es, nodes))
1030 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1031 raise errors.OpPrereqError("Disk template %s not supported with"
1032 " exclusive storage" % self.op.disk_template,
1034 for disk in self.disks:
1035 CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1037 nodenames = [pnode.name] + self.secondaries
1039 if not self.adopt_disks:
1040 if self.op.disk_template == constants.DT_RBD:
1041 # _CheckRADOSFreeSpace() is just a placeholder.
1042 # Any function that checks prerequisites can be placed here.
1043 # Check if there is enough space on the RADOS cluster.
1044 CheckRADOSFreeSpace()
1045 elif self.op.disk_template == constants.DT_EXT:
1046 # FIXME: Function that checks prereqs if needed
1048 elif self.op.disk_template in utils.GetLvmDiskTemplates():
1049 # Check lv size requirements, if not adopting
1050 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1051 CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1053 # FIXME: add checks for other, non-adopting, non-lvm disk templates
1056 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1057 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1058 disk[constants.IDISK_ADOPT])
1059 for disk in self.disks])
1060 if len(all_lvs) != len(self.disks):
1061 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1063 for lv_name in all_lvs:
1065 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1066 # to ReserveLV uses the same syntax
1067 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1068 except errors.ReservationError:
1069 raise errors.OpPrereqError("LV named %s used by another instance" %
1070 lv_name, errors.ECODE_NOTUNIQUE)
1072 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1073 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1075 node_lvs = self.rpc.call_lv_list([pnode.name],
1076 vg_names.payload.keys())[pnode.name]
1077 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1078 node_lvs = node_lvs.payload
1080 delta = all_lvs.difference(node_lvs.keys())
1082 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1083 utils.CommaJoin(delta),
1085 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1087 raise errors.OpPrereqError("Online logical volumes found, cannot"
1088 " adopt: %s" % utils.CommaJoin(online_lvs),
1090 # update the size of disk based on what is found
1091 for dsk in self.disks:
1092 dsk[constants.IDISK_SIZE] = \
1093 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1094 dsk[constants.IDISK_ADOPT])][0]))
1096 elif self.op.disk_template == constants.DT_BLOCK:
1097 # Normalize and de-duplicate device paths
1098 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1099 for disk in self.disks])
1100 if len(all_disks) != len(self.disks):
1101 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1103 baddisks = [d for d in all_disks
1104 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1106 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1107 " cannot be adopted" %
1108 (utils.CommaJoin(baddisks),
1109 constants.ADOPTABLE_BLOCKDEV_ROOT),
1112 node_disks = self.rpc.call_bdev_sizes([pnode.name],
1113 list(all_disks))[pnode.name]
1114 node_disks.Raise("Cannot get block device information from node %s" %
1116 node_disks = node_disks.payload
1117 delta = all_disks.difference(node_disks.keys())
1119 raise errors.OpPrereqError("Missing block device(s): %s" %
1120 utils.CommaJoin(delta),
1122 for dsk in self.disks:
1123 dsk[constants.IDISK_SIZE] = \
1124 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1126 # Verify instance specs
1127 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1129 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1130 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1131 constants.ISPEC_DISK_COUNT: len(self.disks),
1132 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1133 for disk in self.disks],
1134 constants.ISPEC_NIC_COUNT: len(self.nics),
1135 constants.ISPEC_SPINDLE_USE: spindle_use,
1138 group_info = self.cfg.GetNodeGroup(pnode.group)
1139 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1140 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1141 self.op.disk_template)
1142 if not self.op.ignore_ipolicy and res:
1143 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1144 (pnode.group, group_info.name, utils.CommaJoin(res)))
1145 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1147 CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1149 CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1150 # check OS parameters (remotely)
1151 CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1153 CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1155 #TODO: _CheckExtParams (remotely)
1156 # Check parameters for extstorage
1158 # memory check on primary node
1159 #TODO(dynmem): use MINMEM for checking
1161 CheckNodeFreeMemory(self, self.pnode.name,
1162 "creating instance %s" % self.op.instance_name,
1163 self.be_full[constants.BE_MAXMEM],
1166 self.dry_run_result = list(nodenames)
1168 def Exec(self, feedback_fn):
1169 """Create and add the instance to the cluster.
1172 instance = self.op.instance_name
1173 pnode_name = self.pnode.name
1175 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1176 self.owned_locks(locking.LEVEL_NODE)), \
1177 "Node locks differ from node resource locks"
1178 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1180 ht_kind = self.op.hypervisor
1181 if ht_kind in constants.HTS_REQ_PORT:
1182 network_port = self.cfg.AllocatePort()
1186 # This is ugly but we got a chicken-egg problem here
1187 # We can only take the group disk parameters, as the instance
1188 # has no disks yet (we are generating them right here).
1189 node = self.cfg.GetNodeInfo(pnode_name)
1190 nodegroup = self.cfg.GetNodeGroup(node.group)
1191 disks = GenerateDiskTemplate(self,
1192 self.op.disk_template,
1193 instance, pnode_name,
1196 self.instance_file_storage_dir,
1197 self.op.file_driver,
1200 self.cfg.GetGroupDiskParams(nodegroup))
1202 iobj = objects.Instance(name=instance, os=self.op.os_type,
1203 primary_node=pnode_name,
1204 nics=self.nics, disks=disks,
1205 disk_template=self.op.disk_template,
1207 admin_state=constants.ADMINST_DOWN,
1208 network_port=network_port,
1209 beparams=self.op.beparams,
1210 hvparams=self.op.hvparams,
1211 hypervisor=self.op.hypervisor,
1212 osparams=self.op.osparams,
1216 for tag in self.op.tags:
1219 if self.adopt_disks:
1220 if self.op.disk_template == constants.DT_PLAIN:
1221 # rename LVs to the newly-generated names; we need to construct
1222 # 'fake' LV disks with the old data, plus the new unique_id
1223 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1225 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1226 rename_to.append(t_dsk.logical_id)
1227 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1228 self.cfg.SetDiskID(t_dsk, pnode_name)
1229 result = self.rpc.call_blockdev_rename(pnode_name,
1230 zip(tmp_disks, rename_to))
1231 result.Raise("Failed to rename adoped LVs")
1233 feedback_fn("* creating instance disks...")
1235 CreateDisks(self, iobj)
1236 except errors.OpExecError:
1237 self.LogWarning("Device creation failed")
1238 self.cfg.ReleaseDRBDMinors(instance)
1241 feedback_fn("adding instance %s to cluster config" % instance)
1243 self.cfg.AddInstance(iobj, self.proc.GetECId())
1245 # Declare that we don't want to remove the instance lock anymore, as we've
1246 # added the instance to the config
1247 del self.remove_locks[locking.LEVEL_INSTANCE]
1249 if self.op.mode == constants.INSTANCE_IMPORT:
1250 # Release unused nodes
1251 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1254 ReleaseLocks(self, locking.LEVEL_NODE)
1257 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1258 feedback_fn("* wiping instance disks...")
1260 WipeDisks(self, iobj)
1261 except errors.OpExecError, err:
1262 logging.exception("Wiping disks failed")
1263 self.LogWarning("Wiping instance disks failed (%s)", err)
1267 # Something is already wrong with the disks, don't do anything else
1269 elif self.op.wait_for_sync:
1270 disk_abort = not WaitForSync(self, iobj)
1271 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1272 # make sure the disks are not degraded (still sync-ing is ok)
1273 feedback_fn("* checking mirrors status")
1274 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1279 RemoveDisks(self, iobj)
1280 self.cfg.RemoveInstance(iobj.name)
1281 # Make sure the instance lock gets removed
1282 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1283 raise errors.OpExecError("There are some degraded disks for"
1286 # instance disks are now active
1287 iobj.disks_active = True
1289 # Release all node resource locks
1290 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1292 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1293 # we need to set the disks ID to the primary node, since the
1294 # preceding code might or might have not done it, depending on
1295 # disk template and other options
1296 for disk in iobj.disks:
1297 self.cfg.SetDiskID(disk, pnode_name)
1298 if self.op.mode == constants.INSTANCE_CREATE:
1299 if not self.op.no_install:
1300 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1301 not self.op.wait_for_sync)
1303 feedback_fn("* pausing disk sync to install instance OS")
1304 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1307 for idx, success in enumerate(result.payload):
1309 logging.warn("pause-sync of instance %s for disk %d failed",
1312 feedback_fn("* running the instance OS create scripts...")
1313 # FIXME: pass debug option from opcode to backend
1315 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1316 self.op.debug_level)
1318 feedback_fn("* resuming disk sync")
1319 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1322 for idx, success in enumerate(result.payload):
1324 logging.warn("resume-sync of instance %s for disk %d failed",
1327 os_add_result.Raise("Could not add os for instance %s"
1328 " on node %s" % (instance, pnode_name))
1331 if self.op.mode == constants.INSTANCE_IMPORT:
1332 feedback_fn("* running the instance OS import scripts...")
1336 for idx, image in enumerate(self.src_images):
1340 # FIXME: pass debug option from opcode to backend
1341 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1342 constants.IEIO_FILE, (image, ),
1343 constants.IEIO_SCRIPT,
1344 (iobj.disks[idx], idx),
1346 transfers.append(dt)
1349 masterd.instance.TransferInstanceData(self, feedback_fn,
1350 self.op.src_node, pnode_name,
1351 self.pnode.secondary_ip,
1353 if not compat.all(import_result):
1354 self.LogWarning("Some disks for instance %s on node %s were not"
1355 " imported successfully" % (instance, pnode_name))
1357 rename_from = self._old_instance_name
1359 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1360 feedback_fn("* preparing remote import...")
1361 # The source cluster will stop the instance before attempting to make
1362 # a connection. In some cases stopping an instance can take a long
1363 # time, hence the shutdown timeout is added to the connection
1365 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1366 self.op.source_shutdown_timeout)
1367 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1369 assert iobj.primary_node == self.pnode.name
1371 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1372 self.source_x509_ca,
1373 self._cds, timeouts)
1374 if not compat.all(disk_results):
1375 # TODO: Should the instance still be started, even if some disks
1376 # failed to import (valid for local imports, too)?
1377 self.LogWarning("Some disks for instance %s on node %s were not"
1378 " imported successfully" % (instance, pnode_name))
1380 rename_from = self.source_instance_name
1383 # also checked in the prereq part
1384 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1387 # Run rename script on newly imported instance
1388 assert iobj.name == instance
1389 feedback_fn("Running rename script for %s" % instance)
1390 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1392 self.op.debug_level)
1394 self.LogWarning("Failed to run rename script for %s on node"
1395 " %s: %s" % (instance, pnode_name, result.fail_msg))
1397 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1400 iobj.admin_state = constants.ADMINST_UP
1401 self.cfg.Update(iobj, feedback_fn)
1402 logging.info("Starting instance %s on node %s", instance, pnode_name)
1403 feedback_fn("* starting instance...")
1404 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1405 False, self.op.reason)
1406 result.Raise("Could not start instance")
1408 return list(iobj.all_nodes)
1411 class LUInstanceRename(LogicalUnit):
1412 """Rename an instance.
1415 HPATH = "instance-rename"
1416 HTYPE = constants.HTYPE_INSTANCE
1418 def CheckArguments(self):
1422 if self.op.ip_check and not self.op.name_check:
1423 # TODO: make the ip check more flexible and not depend on the name check
1424 raise errors.OpPrereqError("IP address check requires a name check",
1427 def BuildHooksEnv(self):
1430 This runs on master, primary and secondary nodes of the instance.
1433 env = BuildInstanceHookEnvByObject(self, self.instance)
1434 env["INSTANCE_NEW_NAME"] = self.op.new_name
1437 def BuildHooksNodes(self):
1438 """Build hooks nodes.
1441 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1444 def CheckPrereq(self):
1445 """Check prerequisites.
1447 This checks that the instance is in the cluster and is not running.
1450 self.op.instance_name = ExpandInstanceName(self.cfg,
1451 self.op.instance_name)
1452 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1453 assert instance is not None
1454 CheckNodeOnline(self, instance.primary_node)
1455 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1456 msg="cannot rename")
1457 self.instance = instance
1459 new_name = self.op.new_name
1460 if self.op.name_check:
1461 hostname = _CheckHostnameSane(self, new_name)
1462 new_name = self.op.new_name = hostname.name
1463 if (self.op.ip_check and
1464 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1465 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1466 (hostname.ip, new_name),
1467 errors.ECODE_NOTUNIQUE)
1469 instance_list = self.cfg.GetInstanceList()
1470 if new_name in instance_list and new_name != instance.name:
1471 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1472 new_name, errors.ECODE_EXISTS)
1474 def Exec(self, feedback_fn):
1475 """Rename the instance.
1478 inst = self.instance
1479 old_name = inst.name
1481 rename_file_storage = False
1482 if (inst.disk_template in constants.DTS_FILEBASED and
1483 self.op.new_name != inst.name):
1484 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1485 rename_file_storage = True
1487 self.cfg.RenameInstance(inst.name, self.op.new_name)
1488 # Change the instance lock. This is definitely safe while we hold the BGL.
1489 # Otherwise the new lock would have to be added in acquired mode.
1491 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1492 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1493 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1495 # re-read the instance from the configuration after rename
1496 inst = self.cfg.GetInstanceInfo(self.op.new_name)
1498 if rename_file_storage:
1499 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1500 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1501 old_file_storage_dir,
1502 new_file_storage_dir)
1503 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1504 " (but the instance has been renamed in Ganeti)" %
1505 (inst.primary_node, old_file_storage_dir,
1506 new_file_storage_dir))
1508 StartInstanceDisks(self, inst, None)
1509 # update info on disks
1510 info = GetInstanceInfoText(inst)
1511 for (idx, disk) in enumerate(inst.disks):
1512 for node in inst.all_nodes:
1513 self.cfg.SetDiskID(disk, node)
1514 result = self.rpc.call_blockdev_setinfo(node, disk, info)
1516 self.LogWarning("Error setting info on node %s for disk %s: %s",
1517 node, idx, result.fail_msg)
1519 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1520 old_name, self.op.debug_level)
1521 msg = result.fail_msg
1523 msg = ("Could not run OS rename script for instance %s on node %s"
1524 " (but the instance has been renamed in Ganeti): %s" %
1525 (inst.name, inst.primary_node, msg))
1526 self.LogWarning(msg)
1528 ShutdownInstanceDisks(self, inst)
1533 class LUInstanceRemove(LogicalUnit):
1534 """Remove an instance.
1537 HPATH = "instance-remove"
1538 HTYPE = constants.HTYPE_INSTANCE
1541 def ExpandNames(self):
1542 self._ExpandAndLockInstance()
1543 self.needed_locks[locking.LEVEL_NODE] = []
1544 self.needed_locks[locking.LEVEL_NODE_RES] = []
1545 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1547 def DeclareLocks(self, level):
1548 if level == locking.LEVEL_NODE:
1549 self._LockInstancesNodes()
1550 elif level == locking.LEVEL_NODE_RES:
1552 self.needed_locks[locking.LEVEL_NODE_RES] = \
1553 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1555 def BuildHooksEnv(self):
1558 This runs on master, primary and secondary nodes of the instance.
1561 env = BuildInstanceHookEnvByObject(self, self.instance)
1562 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1565 def BuildHooksNodes(self):
1566 """Build hooks nodes.
1569 nl = [self.cfg.GetMasterNode()]
1570 nl_post = list(self.instance.all_nodes) + nl
1571 return (nl, nl_post)
1573 def CheckPrereq(self):
1574 """Check prerequisites.
1576 This checks that the instance is in the cluster.
1579 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1580 assert self.instance is not None, \
1581 "Cannot retrieve locked instance %s" % self.op.instance_name
1583 def Exec(self, feedback_fn):
1584 """Remove the instance.
1587 instance = self.instance
1588 logging.info("Shutting down instance %s on node %s",
1589 instance.name, instance.primary_node)
1591 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1592 self.op.shutdown_timeout,
1594 msg = result.fail_msg
1596 if self.op.ignore_failures:
1597 feedback_fn("Warning: can't shutdown instance: %s" % msg)
1599 raise errors.OpExecError("Could not shutdown instance %s on"
1601 (instance.name, instance.primary_node, msg))
1603 assert (self.owned_locks(locking.LEVEL_NODE) ==
1604 self.owned_locks(locking.LEVEL_NODE_RES))
1605 assert not (set(instance.all_nodes) -
1606 self.owned_locks(locking.LEVEL_NODE)), \
1607 "Not owning correct locks"
1609 RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1612 class LUInstanceMove(LogicalUnit):
1613 """Move an instance by data-copying.
1616 HPATH = "instance-move"
1617 HTYPE = constants.HTYPE_INSTANCE
1620 def ExpandNames(self):
1621 self._ExpandAndLockInstance()
1622 target_node = ExpandNodeName(self.cfg, self.op.target_node)
1623 self.op.target_node = target_node
1624 self.needed_locks[locking.LEVEL_NODE] = [target_node]
1625 self.needed_locks[locking.LEVEL_NODE_RES] = []
1626 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1628 def DeclareLocks(self, level):
1629 if level == locking.LEVEL_NODE:
1630 self._LockInstancesNodes(primary_only=True)
1631 elif level == locking.LEVEL_NODE_RES:
1633 self.needed_locks[locking.LEVEL_NODE_RES] = \
1634 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1636 def BuildHooksEnv(self):
1639 This runs on master, primary and secondary nodes of the instance.
1643 "TARGET_NODE": self.op.target_node,
1644 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1646 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1649 def BuildHooksNodes(self):
1650 """Build hooks nodes.
1654 self.cfg.GetMasterNode(),
1655 self.instance.primary_node,
1656 self.op.target_node,
1660 def CheckPrereq(self):
1661 """Check prerequisites.
1663 This checks that the instance is in the cluster.
1666 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1667 assert self.instance is not None, \
1668 "Cannot retrieve locked instance %s" % self.op.instance_name
1670 if instance.disk_template not in constants.DTS_COPYABLE:
1671 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1672 instance.disk_template, errors.ECODE_STATE)
1674 node = self.cfg.GetNodeInfo(self.op.target_node)
1675 assert node is not None, \
1676 "Cannot retrieve locked node %s" % self.op.target_node
1678 self.target_node = target_node = node.name
1680 if target_node == instance.primary_node:
1681 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1682 (instance.name, target_node),
1685 bep = self.cfg.GetClusterInfo().FillBE(instance)
1687 for idx, dsk in enumerate(instance.disks):
1688 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1689 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1690 " cannot copy" % idx, errors.ECODE_STATE)
1692 CheckNodeOnline(self, target_node)
1693 CheckNodeNotDrained(self, target_node)
1694 CheckNodeVmCapable(self, target_node)
1695 cluster = self.cfg.GetClusterInfo()
1696 group_info = self.cfg.GetNodeGroup(node.group)
1697 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1698 CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1699 ignore=self.op.ignore_ipolicy)
1701 if instance.admin_state == constants.ADMINST_UP:
1702 # check memory requirements on the secondary node
1703 CheckNodeFreeMemory(self, target_node,
1704 "failing over instance %s" %
1705 instance.name, bep[constants.BE_MAXMEM],
1706 instance.hypervisor)
1708 self.LogInfo("Not checking memory on the secondary node as"
1709 " instance will not be started")
1711 # check bridge existance
1712 CheckInstanceBridgesExist(self, instance, node=target_node)
1714 def Exec(self, feedback_fn):
1715 """Move an instance.
1717 The move is done by shutting it down on its present node, copying
1718 the data over (slow) and starting it on the new node.
1721 instance = self.instance
1723 source_node = instance.primary_node
1724 target_node = self.target_node
1726 self.LogInfo("Shutting down instance %s on source node %s",
1727 instance.name, source_node)
1729 assert (self.owned_locks(locking.LEVEL_NODE) ==
1730 self.owned_locks(locking.LEVEL_NODE_RES))
1732 result = self.rpc.call_instance_shutdown(source_node, instance,
1733 self.op.shutdown_timeout,
1735 msg = result.fail_msg
1737 if self.op.ignore_consistency:
1738 self.LogWarning("Could not shutdown instance %s on node %s."
1739 " Proceeding anyway. Please make sure node"
1740 " %s is down. Error details: %s",
1741 instance.name, source_node, source_node, msg)
1743 raise errors.OpExecError("Could not shutdown instance %s on"
1745 (instance.name, source_node, msg))
1747 # create the target disks
1749 CreateDisks(self, instance, target_node=target_node)
1750 except errors.OpExecError:
1751 self.LogWarning("Device creation failed")
1752 self.cfg.ReleaseDRBDMinors(instance.name)
1755 cluster_name = self.cfg.GetClusterInfo().cluster_name
1758 # activate, get path, copy the data over
1759 for idx, disk in enumerate(instance.disks):
1760 self.LogInfo("Copying data for disk %d", idx)
1761 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1762 instance.name, True, idx)
1764 self.LogWarning("Can't assemble newly created disk %d: %s",
1765 idx, result.fail_msg)
1766 errs.append(result.fail_msg)
1768 dev_path = result.payload
1769 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1770 target_node, dev_path,
1773 self.LogWarning("Can't copy data over for disk %d: %s",
1774 idx, result.fail_msg)
1775 errs.append(result.fail_msg)
1779 self.LogWarning("Some disks failed to copy, aborting")
1781 RemoveDisks(self, instance, target_node=target_node)
1783 self.cfg.ReleaseDRBDMinors(instance.name)
1784 raise errors.OpExecError("Errors during disk copy: %s" %
1787 instance.primary_node = target_node
1788 self.cfg.Update(instance, feedback_fn)
1790 self.LogInfo("Removing the disks on the original node")
1791 RemoveDisks(self, instance, target_node=source_node)
1793 # Only start the instance if it's marked as up
1794 if instance.admin_state == constants.ADMINST_UP:
1795 self.LogInfo("Starting instance %s on node %s",
1796 instance.name, target_node)
1798 disks_ok, _ = AssembleInstanceDisks(self, instance,
1799 ignore_secondaries=True)
1801 ShutdownInstanceDisks(self, instance)
1802 raise errors.OpExecError("Can't activate the instance's disks")
1804 result = self.rpc.call_instance_start(target_node,
1805 (instance, None, None), False,
1807 msg = result.fail_msg
1809 ShutdownInstanceDisks(self, instance)
1810 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1811 (instance.name, target_node, msg))
1814 class LUInstanceMultiAlloc(NoHooksLU):
1815 """Allocates multiple instances at the same time.
1820 def CheckArguments(self):
1825 for inst in self.op.instances:
1826 if inst.iallocator is not None:
1827 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1828 " instance objects", errors.ECODE_INVAL)
1829 nodes.append(bool(inst.pnode))
1830 if inst.disk_template in constants.DTS_INT_MIRROR:
1831 nodes.append(bool(inst.snode))
1833 has_nodes = compat.any(nodes)
1834 if compat.all(nodes) ^ has_nodes:
1835 raise errors.OpPrereqError("There are instance objects providing"
1836 " pnode/snode while others do not",
1839 if self.op.iallocator is None:
1840 default_iallocator = self.cfg.GetDefaultIAllocator()
1841 if default_iallocator and has_nodes:
1842 self.op.iallocator = default_iallocator
1844 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1845 " given and no cluster-wide default"
1846 " iallocator found; please specify either"
1847 " an iallocator or nodes on the instances"
1848 " or set a cluster-wide default iallocator",
1851 _CheckOpportunisticLocking(self.op)
1853 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1855 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1856 utils.CommaJoin(dups), errors.ECODE_INVAL)
1858 def ExpandNames(self):
1859 """Calculate the locks.
1862 self.share_locks = ShareAll()
1863 self.needed_locks = {
1864 # iallocator will select nodes and even if no iallocator is used,
1865 # collisions with LUInstanceCreate should be avoided
1866 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1869 if self.op.iallocator:
1870 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1871 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1873 if self.op.opportunistic_locking:
1874 self.opportunistic_locks[locking.LEVEL_NODE] = True
1875 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1878 for inst in self.op.instances:
1879 inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1880 nodeslist.append(inst.pnode)
1881 if inst.snode is not None:
1882 inst.snode = ExpandNodeName(self.cfg, inst.snode)
1883 nodeslist.append(inst.snode)
1885 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1886 # Lock resources of instance's primary and secondary nodes (copy to
1887 # prevent accidential modification)
1888 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1890 def CheckPrereq(self):
1891 """Check prerequisite.
1894 cluster = self.cfg.GetClusterInfo()
1895 default_vg = self.cfg.GetVGName()
1896 ec_id = self.proc.GetECId()
1898 if self.op.opportunistic_locking:
1899 # Only consider nodes for which a lock is held
1900 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1902 node_whitelist = None
1904 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1905 _ComputeNics(op, cluster, None,
1907 _ComputeFullBeParams(op, cluster),
1909 for op in self.op.instances]
1911 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1912 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1914 ial.Run(self.op.iallocator)
1917 raise errors.OpPrereqError("Can't compute nodes using"
1918 " iallocator '%s': %s" %
1919 (self.op.iallocator, ial.info),
1922 self.ia_result = ial.result
1925 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1926 constants.JOB_IDS_KEY: [],
1929 def _ConstructPartialResult(self):
1930 """Contructs the partial result.
1933 (allocatable, failed) = self.ia_result
1935 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1936 map(compat.fst, allocatable),
1937 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1940 def Exec(self, feedback_fn):
1941 """Executes the opcode.
1944 op2inst = dict((op.instance_name, op) for op in self.op.instances)
1945 (allocatable, failed) = self.ia_result
1948 for (name, nodes) in allocatable:
1949 op = op2inst.pop(name)
1952 (op.pnode, op.snode) = nodes
1958 missing = set(op2inst.keys()) - set(failed)
1959 assert not missing, \
1960 "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1962 return ResultWithJobs(jobs, **self._ConstructPartialResult())
1965 class _InstNicModPrivate:
1966 """Data structure for network interface modifications.
1968 Used by L{LUInstanceSetParams}.
1976 def _PrepareContainerMods(mods, private_fn):
1977 """Prepares a list of container modifications by adding a private data field.
1979 @type mods: list of tuples; (operation, index, parameters)
1980 @param mods: List of modifications
1981 @type private_fn: callable or None
1982 @param private_fn: Callable for constructing a private data field for a
1987 if private_fn is None:
1992 return [(op, idx, params, fn()) for (op, idx, params) in mods]
1995 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
1996 """Checks if nodes have enough physical CPUs
1998 This function checks if all given nodes have the needed number of
1999 physical CPUs. In case any node has less CPUs or we cannot get the
2000 information from the node, this function raises an OpPrereqError
2003 @type lu: C{LogicalUnit}
2004 @param lu: a logical unit from which we get configuration data
2005 @type nodenames: C{list}
2006 @param nodenames: the list of node names to check
2007 @type requested: C{int}
2008 @param requested: the minimum acceptable number of physical CPUs
2009 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2010 or we cannot check the node
2013 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2014 for node in nodenames:
2015 info = nodeinfo[node]
2016 info.Raise("Cannot get current information from node %s" % node,
2017 prereq=True, ecode=errors.ECODE_ENVIRON)
2018 (_, _, (hv_info, )) = info.payload
2019 num_cpus = hv_info.get("cpu_total", None)
2020 if not isinstance(num_cpus, int):
2021 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2022 " on node %s, result was '%s'" %
2023 (node, num_cpus), errors.ECODE_ENVIRON)
2024 if requested > num_cpus:
2025 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2026 "required" % (node, num_cpus, requested),
2030 def GetItemFromContainer(identifier, kind, container):
2031 """Return the item refered by the identifier.
2033 @type identifier: string
2034 @param identifier: Item index or name or UUID
2036 @param kind: One-word item description
2037 @type container: list
2038 @param container: Container to get the item from
2043 idx = int(identifier)
2046 absidx = len(container) - 1
2048 raise IndexError("Not accepting negative indices other than -1")
2049 elif idx > len(container):
2050 raise IndexError("Got %s index %s, but there are only %s" %
2051 (kind, idx, len(container)))
2054 return (absidx, container[idx])
2058 for idx, item in enumerate(container):
2059 if item.uuid == identifier or item.name == identifier:
2062 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2063 (kind, identifier), errors.ECODE_NOENT)
2066 def _ApplyContainerMods(kind, container, chgdesc, mods,
2067 create_fn, modify_fn, remove_fn):
2068 """Applies descriptions in C{mods} to C{container}.
2071 @param kind: One-word item description
2072 @type container: list
2073 @param container: Container to modify
2074 @type chgdesc: None or list
2075 @param chgdesc: List of applied changes
2077 @param mods: Modifications as returned by L{_PrepareContainerMods}
2078 @type create_fn: callable
2079 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2080 receives absolute item index, parameters and private data object as added
2081 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2083 @type modify_fn: callable
2084 @param modify_fn: Callback for modifying an existing item
2085 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2086 and private data object as added by L{_PrepareContainerMods}, returns
2088 @type remove_fn: callable
2089 @param remove_fn: Callback on removing item; receives absolute item index,
2090 item and private data object as added by L{_PrepareContainerMods}
2093 for (op, identifier, params, private) in mods:
2096 if op == constants.DDM_ADD:
2097 # Calculate where item will be added
2098 # When adding an item, identifier can only be an index
2100 idx = int(identifier)
2102 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2103 " identifier for %s" % constants.DDM_ADD,
2106 addidx = len(container)
2109 raise IndexError("Not accepting negative indices other than -1")
2110 elif idx > len(container):
2111 raise IndexError("Got %s index %s, but there are only %s" %
2112 (kind, idx, len(container)))
2115 if create_fn is None:
2118 (item, changes) = create_fn(addidx, params, private)
2121 container.append(item)
2124 assert idx <= len(container)
2125 # list.insert does so before the specified index
2126 container.insert(idx, item)
2128 # Retrieve existing item
2129 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2131 if op == constants.DDM_REMOVE:
2134 if remove_fn is not None:
2135 remove_fn(absidx, item, private)
2137 changes = [("%s/%s" % (kind, absidx), "remove")]
2139 assert container[absidx] == item
2140 del container[absidx]
2141 elif op == constants.DDM_MODIFY:
2142 if modify_fn is not None:
2143 changes = modify_fn(absidx, item, params, private)
2145 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2147 assert _TApplyContModsCbChanges(changes)
2149 if not (chgdesc is None or changes is None):
2150 chgdesc.extend(changes)
2153 def _UpdateIvNames(base_index, disks):
2154 """Updates the C{iv_name} attribute of disks.
2156 @type disks: list of L{objects.Disk}
2159 for (idx, disk) in enumerate(disks):
2160 disk.iv_name = "disk/%s" % (base_index + idx, )
2163 class LUInstanceSetParams(LogicalUnit):
2164 """Modifies an instances's parameters.
2167 HPATH = "instance-modify"
2168 HTYPE = constants.HTYPE_INSTANCE
2172 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2173 assert ht.TList(mods)
2174 assert not mods or len(mods[0]) in (2, 3)
2176 if mods and len(mods[0]) == 2:
2180 for op, params in mods:
2181 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2182 result.append((op, -1, params))
2186 raise errors.OpPrereqError("Only one %s add or remove operation is"
2187 " supported at a time" % kind,
2190 result.append((constants.DDM_MODIFY, op, params))
2192 assert verify_fn(result)
2199 def _CheckMods(kind, mods, key_types, item_fn):
2200 """Ensures requested disk/NIC modifications are valid.
2203 for (op, _, params) in mods:
2204 assert ht.TDict(params)
2206 # If 'key_types' is an empty dict, we assume we have an
2207 # 'ext' template and thus do not ForceDictType
2209 utils.ForceDictType(params, key_types)
2211 if op == constants.DDM_REMOVE:
2213 raise errors.OpPrereqError("No settings should be passed when"
2214 " removing a %s" % kind,
2216 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2219 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2222 def _VerifyDiskModification(op, params, excl_stor):
2223 """Verifies a disk modification.
2226 if op == constants.DDM_ADD:
2227 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2228 if mode not in constants.DISK_ACCESS_SET:
2229 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2232 size = params.get(constants.IDISK_SIZE, None)
2234 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2235 constants.IDISK_SIZE, errors.ECODE_INVAL)
2239 except (TypeError, ValueError), err:
2240 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2243 params[constants.IDISK_SIZE] = size
2244 name = params.get(constants.IDISK_NAME, None)
2245 if name is not None and name.lower() == constants.VALUE_NONE:
2246 params[constants.IDISK_NAME] = None
2248 CheckSpindlesExclusiveStorage(params, excl_stor, True)
2250 elif op == constants.DDM_MODIFY:
2251 if constants.IDISK_SIZE in params:
2252 raise errors.OpPrereqError("Disk size change not possible, use"
2253 " grow-disk", errors.ECODE_INVAL)
2255 raise errors.OpPrereqError("Disk modification doesn't support"
2256 " additional arbitrary parameters",
2258 name = params.get(constants.IDISK_NAME, None)
2259 if name is not None and name.lower() == constants.VALUE_NONE:
2260 params[constants.IDISK_NAME] = None
2263 def _VerifyNicModification(op, params):
2264 """Verifies a network interface modification.
2267 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2268 ip = params.get(constants.INIC_IP, None)
2269 name = params.get(constants.INIC_NAME, None)
2270 req_net = params.get(constants.INIC_NETWORK, None)
2271 link = params.get(constants.NIC_LINK, None)
2272 mode = params.get(constants.NIC_MODE, None)
2273 if name is not None and name.lower() == constants.VALUE_NONE:
2274 params[constants.INIC_NAME] = None
2275 if req_net is not None:
2276 if req_net.lower() == constants.VALUE_NONE:
2277 params[constants.INIC_NETWORK] = None
2279 elif link is not None or mode is not None:
2280 raise errors.OpPrereqError("If network is given"
2281 " mode or link should not",
2284 if op == constants.DDM_ADD:
2285 macaddr = params.get(constants.INIC_MAC, None)
2287 params[constants.INIC_MAC] = constants.VALUE_AUTO
2290 if ip.lower() == constants.VALUE_NONE:
2291 params[constants.INIC_IP] = None
2293 if ip.lower() == constants.NIC_IP_POOL:
2294 if op == constants.DDM_ADD and req_net is None:
2295 raise errors.OpPrereqError("If ip=pool, parameter network"
2299 if not netutils.IPAddress.IsValid(ip):
2300 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2303 if constants.INIC_MAC in params:
2304 macaddr = params[constants.INIC_MAC]
2305 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2306 macaddr = utils.NormalizeAndValidateMac(macaddr)
2308 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2309 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2310 " modifying an existing NIC",
2313 def CheckArguments(self):
2314 if not (self.op.nics or self.op.disks or self.op.disk_template or
2315 self.op.hvparams or self.op.beparams or self.op.os_name or
2316 self.op.offline is not None or self.op.runtime_mem or
2318 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2320 if self.op.hvparams:
2321 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2322 "hypervisor", "instance", "cluster")
2324 self.op.disks = self._UpgradeDiskNicMods(
2325 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2326 self.op.nics = self._UpgradeDiskNicMods(
2327 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2329 if self.op.disks and self.op.disk_template is not None:
2330 raise errors.OpPrereqError("Disk template conversion and other disk"
2331 " changes not supported at the same time",
2334 if (self.op.disk_template and
2335 self.op.disk_template in constants.DTS_INT_MIRROR and
2336 self.op.remote_node is None):
2337 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2338 " one requires specifying a secondary node",
2341 # Check NIC modifications
2342 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2343 self._VerifyNicModification)
2346 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2348 def ExpandNames(self):
2349 self._ExpandAndLockInstance()
2350 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2351 # Can't even acquire node locks in shared mode as upcoming changes in
2352 # Ganeti 2.6 will start to modify the node object on disk conversion
2353 self.needed_locks[locking.LEVEL_NODE] = []
2354 self.needed_locks[locking.LEVEL_NODE_RES] = []
2355 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2356 # Look node group to look up the ipolicy
2357 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2359 def DeclareLocks(self, level):
2360 if level == locking.LEVEL_NODEGROUP:
2361 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2362 # Acquire locks for the instance's nodegroups optimistically. Needs
2363 # to be verified in CheckPrereq
2364 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2365 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2366 elif level == locking.LEVEL_NODE:
2367 self._LockInstancesNodes()
2368 if self.op.disk_template and self.op.remote_node:
2369 self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2370 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2371 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2373 self.needed_locks[locking.LEVEL_NODE_RES] = \
2374 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2376 def BuildHooksEnv(self):
2379 This runs on the master, primary and secondaries.
2383 if constants.BE_MINMEM in self.be_new:
2384 args["minmem"] = self.be_new[constants.BE_MINMEM]
2385 if constants.BE_MAXMEM in self.be_new:
2386 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2387 if constants.BE_VCPUS in self.be_new:
2388 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2389 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2390 # information at all.
2392 if self._new_nics is not None:
2395 for nic in self._new_nics:
2396 n = copy.deepcopy(nic)
2397 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2398 n.nicparams = nicparams
2399 nics.append(NICToTuple(self, n))
2403 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2404 if self.op.disk_template:
2405 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2406 if self.op.runtime_mem:
2407 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2411 def BuildHooksNodes(self):
2412 """Build hooks nodes.
2415 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2418 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2419 old_params, cluster, pnode):
2421 update_params_dict = dict([(key, params[key])
2422 for key in constants.NICS_PARAMETERS
2425 req_link = update_params_dict.get(constants.NIC_LINK, None)
2426 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2429 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2430 if new_net_uuid_or_name:
2431 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2432 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2435 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2438 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2440 raise errors.OpPrereqError("No netparams found for the network"
2441 " %s, probably not connected" %
2442 new_net_obj.name, errors.ECODE_INVAL)
2443 new_params = dict(netparams)
2445 new_params = GetUpdatedParams(old_params, update_params_dict)
2447 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2449 new_filled_params = cluster.SimpleFillNIC(new_params)
2450 objects.NIC.CheckParameterSyntax(new_filled_params)
2452 new_mode = new_filled_params[constants.NIC_MODE]
2453 if new_mode == constants.NIC_MODE_BRIDGED:
2454 bridge = new_filled_params[constants.NIC_LINK]
2455 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2457 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2459 self.warn.append(msg)
2461 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2463 elif new_mode == constants.NIC_MODE_ROUTED:
2464 ip = params.get(constants.INIC_IP, old_ip)
2466 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2467 " on a routed NIC", errors.ECODE_INVAL)
2469 elif new_mode == constants.NIC_MODE_OVS:
2470 # TODO: check OVS link
2471 self.LogInfo("OVS links are currently not checked for correctness")
2473 if constants.INIC_MAC in params:
2474 mac = params[constants.INIC_MAC]
2476 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2478 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2479 # otherwise generate the MAC address
2480 params[constants.INIC_MAC] = \
2481 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2483 # or validate/reserve the current one
2485 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2486 except errors.ReservationError:
2487 raise errors.OpPrereqError("MAC address '%s' already in use"
2488 " in cluster" % mac,
2489 errors.ECODE_NOTUNIQUE)
2490 elif new_net_uuid != old_net_uuid:
2492 def get_net_prefix(net_uuid):
2495 nobj = self.cfg.GetNetwork(net_uuid)
2496 mac_prefix = nobj.mac_prefix
2500 new_prefix = get_net_prefix(new_net_uuid)
2501 old_prefix = get_net_prefix(old_net_uuid)
2502 if old_prefix != new_prefix:
2503 params[constants.INIC_MAC] = \
2504 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2506 # if there is a change in (ip, network) tuple
2507 new_ip = params.get(constants.INIC_IP, old_ip)
2508 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2510 # if IP is pool then require a network and generate one IP
2511 if new_ip.lower() == constants.NIC_IP_POOL:
2514 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2515 except errors.ReservationError:
2516 raise errors.OpPrereqError("Unable to get a free IP"
2517 " from the address pool",
2519 self.LogInfo("Chose IP %s from network %s",
2522 params[constants.INIC_IP] = new_ip
2524 raise errors.OpPrereqError("ip=pool, but no network found",
2526 # Reserve new IP if in the new network if any
2529 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2530 self.LogInfo("Reserving IP %s in network %s",
2531 new_ip, new_net_obj.name)
2532 except errors.ReservationError:
2533 raise errors.OpPrereqError("IP %s not available in network %s" %
2534 (new_ip, new_net_obj.name),
2535 errors.ECODE_NOTUNIQUE)
2536 # new network is None so check if new IP is a conflicting IP
2537 elif self.op.conflicts_check:
2538 _CheckForConflictingIp(self, new_ip, pnode)
2540 # release old IP if old network is not None
2541 if old_ip and old_net_uuid:
2543 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2544 except errors.AddressPoolError:
2545 logging.warning("Release IP %s not contained in network %s",
2546 old_ip, old_net_obj.name)
2548 # there are no changes in (ip, network) tuple and old network is not None
2549 elif (old_net_uuid is not None and
2550 (req_link is not None or req_mode is not None)):
2551 raise errors.OpPrereqError("Not allowed to change link or mode of"
2552 " a NIC that is connected to a network",
2555 private.params = new_params
2556 private.filled = new_filled_params
2558 def _PreCheckDiskTemplate(self, pnode_info):
2559 """CheckPrereq checks related to a new disk template."""
2560 # Arguments are passed to avoid configuration lookups
2561 instance = self.instance
2562 pnode = instance.primary_node
2563 cluster = self.cluster
2564 if instance.disk_template == self.op.disk_template:
2565 raise errors.OpPrereqError("Instance already has disk template %s" %
2566 instance.disk_template, errors.ECODE_INVAL)
2568 if (instance.disk_template,
2569 self.op.disk_template) not in self._DISK_CONVERSIONS:
2570 raise errors.OpPrereqError("Unsupported disk template conversion from"
2571 " %s to %s" % (instance.disk_template,
2572 self.op.disk_template),
2574 CheckInstanceState(self, instance, INSTANCE_DOWN,
2575 msg="cannot change disk template")
2576 if self.op.disk_template in constants.DTS_INT_MIRROR:
2577 if self.op.remote_node == pnode:
2578 raise errors.OpPrereqError("Given new secondary node %s is the same"
2579 " as the primary node of the instance" %
2580 self.op.remote_node, errors.ECODE_STATE)
2581 CheckNodeOnline(self, self.op.remote_node)
2582 CheckNodeNotDrained(self, self.op.remote_node)
2583 # FIXME: here we assume that the old instance type is DT_PLAIN
2584 assert instance.disk_template == constants.DT_PLAIN
2585 disks = [{constants.IDISK_SIZE: d.size,
2586 constants.IDISK_VG: d.logical_id[0]}
2587 for d in instance.disks]
2588 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2589 CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2591 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2592 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2593 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2595 CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2596 ignore=self.op.ignore_ipolicy)
2597 if pnode_info.group != snode_info.group:
2598 self.LogWarning("The primary and secondary nodes are in two"
2599 " different node groups; the disk parameters"
2600 " from the first disk's node group will be"
2603 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2604 # Make sure none of the nodes require exclusive storage
2605 nodes = [pnode_info]
2606 if self.op.disk_template in constants.DTS_INT_MIRROR:
2608 nodes.append(snode_info)
2609 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2610 if compat.any(map(has_es, nodes)):
2611 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2612 " storage is enabled" % (instance.disk_template,
2613 self.op.disk_template))
2614 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2616 def _PreCheckDisks(self, ispec):
2617 """CheckPrereq checks related to disk changes.
2620 @param ispec: instance specs to be updated with the new disks
2623 instance = self.instance
2624 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2626 excl_stor = compat.any(
2627 rpc.GetExclusiveStorageForNodeNames(self.cfg, instance.all_nodes).values()
2630 # Check disk modifications. This is done here and not in CheckArguments
2631 # (as with NICs), because we need to know the instance's disk template
2632 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2633 if instance.disk_template == constants.DT_EXT:
2634 self._CheckMods("disk", self.op.disks, {}, ver_fn)
2636 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2639 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2641 # Check the validity of the `provider' parameter
2642 if instance.disk_template in constants.DT_EXT:
2643 for mod in self.diskmod:
2644 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2645 if mod[0] == constants.DDM_ADD:
2646 if ext_provider is None:
2647 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2648 " '%s' missing, during disk add" %
2650 constants.IDISK_PROVIDER),
2652 elif mod[0] == constants.DDM_MODIFY:
2654 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2656 constants.IDISK_PROVIDER,
2659 for mod in self.diskmod:
2660 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2661 if ext_provider is not None:
2662 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2663 " instances of type '%s'" %
2664 (constants.IDISK_PROVIDER,
2668 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2669 raise errors.OpPrereqError("Disk operations not supported for"
2670 " diskless instances", errors.ECODE_INVAL)
2672 def _PrepareDiskMod(_, disk, params, __):
2673 disk.name = params.get(constants.IDISK_NAME, None)
2675 # Verify disk changes (operating on a copy)
2676 disks = copy.deepcopy(instance.disks)
2677 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2678 _PrepareDiskMod, None)
2679 utils.ValidateDeviceNames("disk", disks)
2680 if len(disks) > constants.MAX_DISKS:
2681 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2682 " more" % constants.MAX_DISKS,
2684 disk_sizes = [disk.size for disk in instance.disks]
2685 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2686 self.diskmod if op == constants.DDM_ADD)
2687 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2688 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2690 if self.op.offline is not None and self.op.offline:
2691 CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2692 msg="can't change to offline")
2694 def CheckPrereq(self):
2695 """Check prerequisites.
2697 This only checks the instance list against the existing names.
2700 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2701 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2703 cluster = self.cluster = self.cfg.GetClusterInfo()
2704 assert self.instance is not None, \
2705 "Cannot retrieve locked instance %s" % self.op.instance_name
2707 pnode = instance.primary_node
2711 if (self.op.pnode is not None and self.op.pnode != pnode and
2713 # verify that the instance is not up
2714 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2715 instance.hypervisor)
2716 if instance_info.fail_msg:
2717 self.warn.append("Can't get instance runtime information: %s" %
2718 instance_info.fail_msg)
2719 elif instance_info.payload:
2720 raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2723 assert pnode in self.owned_locks(locking.LEVEL_NODE)
2724 nodelist = list(instance.all_nodes)
2725 pnode_info = self.cfg.GetNodeInfo(pnode)
2727 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2728 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2729 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2731 # dictionary with instance information after the modification
2734 # Prepare NIC modifications
2735 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2738 if self.op.os_name and not self.op.force:
2739 CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2740 self.op.force_variant)
2741 instance_os = self.op.os_name
2743 instance_os = instance.os
2745 assert not (self.op.disk_template and self.op.disks), \
2746 "Can't modify disk template and apply disk changes at the same time"
2748 if self.op.disk_template:
2749 self._PreCheckDiskTemplate(pnode_info)
2751 self._PreCheckDisks(ispec)
2753 # hvparams processing
2754 if self.op.hvparams:
2755 hv_type = instance.hypervisor
2756 i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2757 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2758 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2761 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2762 CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2763 self.hv_proposed = self.hv_new = hv_new # the new actual values
2764 self.hv_inst = i_hvdict # the new dict (without defaults)
2766 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2768 self.hv_new = self.hv_inst = {}
2770 # beparams processing
2771 if self.op.beparams:
2772 i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2774 objects.UpgradeBeParams(i_bedict)
2775 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2776 be_new = cluster.SimpleFillBE(i_bedict)
2777 self.be_proposed = self.be_new = be_new # the new actual values
2778 self.be_inst = i_bedict # the new dict (without defaults)
2780 self.be_new = self.be_inst = {}
2781 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2782 be_old = cluster.FillBE(instance)
2784 # CPU param validation -- checking every time a parameter is
2785 # changed to cover all cases where either CPU mask or vcpus have
2787 if (constants.BE_VCPUS in self.be_proposed and
2788 constants.HV_CPU_MASK in self.hv_proposed):
2790 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2791 # Verify mask is consistent with number of vCPUs. Can skip this
2792 # test if only 1 entry in the CPU mask, which means same mask
2793 # is applied to all vCPUs.
2794 if (len(cpu_list) > 1 and
2795 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2796 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2798 (self.be_proposed[constants.BE_VCPUS],
2799 self.hv_proposed[constants.HV_CPU_MASK]),
2802 # Only perform this test if a new CPU mask is given
2803 if constants.HV_CPU_MASK in self.hv_new:
2804 # Calculate the largest CPU number requested
2805 max_requested_cpu = max(map(max, cpu_list))
2806 # Check that all of the instance's nodes have enough physical CPUs to
2807 # satisfy the requested CPU mask
2808 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2809 max_requested_cpu + 1, instance.hypervisor)
2811 # osparams processing
2812 if self.op.osparams:
2813 i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2814 CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2815 self.os_inst = i_osdict # the new dict (without defaults)
2819 #TODO(dynmem): do the appropriate check involving MINMEM
2820 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2821 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2822 mem_check_list = [pnode]
2823 if be_new[constants.BE_AUTO_BALANCE]:
2824 # either we changed auto_balance to yes or it was from before
2825 mem_check_list.extend(instance.secondary_nodes)
2826 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2827 instance.hypervisor)
2828 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2829 [instance.hypervisor], False)
2830 pninfo = nodeinfo[pnode]
2831 msg = pninfo.fail_msg
2833 # Assume the primary node is unreachable and go ahead
2834 self.warn.append("Can't get info from primary node %s: %s" %
2837 (_, _, (pnhvinfo, )) = pninfo.payload
2838 if not isinstance(pnhvinfo.get("memory_free", None), int):
2839 self.warn.append("Node data from primary node %s doesn't contain"
2840 " free memory information" % pnode)
2841 elif instance_info.fail_msg:
2842 self.warn.append("Can't get instance runtime information: %s" %
2843 instance_info.fail_msg)
2845 if instance_info.payload:
2846 current_mem = int(instance_info.payload["memory"])
2848 # Assume instance not running
2849 # (there is a slight race condition here, but it's not very
2850 # probable, and we have no other way to check)
2851 # TODO: Describe race condition
2853 #TODO(dynmem): do the appropriate check involving MINMEM
2854 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2855 pnhvinfo["memory_free"])
2857 raise errors.OpPrereqError("This change will prevent the instance"
2858 " from starting, due to %d MB of memory"
2859 " missing on its primary node" %
2860 miss_mem, errors.ECODE_NORES)
2862 if be_new[constants.BE_AUTO_BALANCE]:
2863 for node, nres in nodeinfo.items():
2864 if node not in instance.secondary_nodes:
2866 nres.Raise("Can't get info from secondary node %s" % node,
2867 prereq=True, ecode=errors.ECODE_STATE)
2868 (_, _, (nhvinfo, )) = nres.payload
2869 if not isinstance(nhvinfo.get("memory_free", None), int):
2870 raise errors.OpPrereqError("Secondary node %s didn't return free"
2871 " memory information" % node,
2873 #TODO(dynmem): do the appropriate check involving MINMEM
2874 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2875 raise errors.OpPrereqError("This change will prevent the instance"
2876 " from failover to its secondary node"
2877 " %s, due to not enough memory" % node,
2880 if self.op.runtime_mem:
2881 remote_info = self.rpc.call_instance_info(instance.primary_node,
2883 instance.hypervisor)
2884 remote_info.Raise("Error checking node %s" % instance.primary_node)
2885 if not remote_info.payload: # not running already
2886 raise errors.OpPrereqError("Instance %s is not running" %
2887 instance.name, errors.ECODE_STATE)
2889 current_memory = remote_info.payload["memory"]
2890 if (not self.op.force and
2891 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2892 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2893 raise errors.OpPrereqError("Instance %s must have memory between %d"
2894 " and %d MB of memory unless --force is"
2897 self.be_proposed[constants.BE_MINMEM],
2898 self.be_proposed[constants.BE_MAXMEM]),
2901 delta = self.op.runtime_mem - current_memory
2903 CheckNodeFreeMemory(self, instance.primary_node,
2904 "ballooning memory for instance %s" %
2905 instance.name, delta, instance.hypervisor)
2907 def _PrepareNicCreate(_, params, private):
2908 self._PrepareNicModification(params, private, None, None,
2912 def _PrepareNicMod(_, nic, params, private):
2913 self._PrepareNicModification(params, private, nic.ip, nic.network,
2914 nic.nicparams, cluster, pnode)
2917 def _PrepareNicRemove(_, params, __):
2919 net = params.network
2920 if net is not None and ip is not None:
2921 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2923 # Verify NIC changes (operating on copy)
2924 nics = instance.nics[:]
2925 _ApplyContainerMods("NIC", nics, None, self.nicmod,
2926 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2927 if len(nics) > constants.MAX_NICS:
2928 raise errors.OpPrereqError("Instance has too many network interfaces"
2929 " (%d), cannot add more" % constants.MAX_NICS,
2932 # Pre-compute NIC changes (necessary to use result in hooks)
2933 self._nic_chgdesc = []
2935 # Operate on copies as this is still in prereq
2936 nics = [nic.Copy() for nic in instance.nics]
2937 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2938 self._CreateNewNic, self._ApplyNicMods, None)
2939 # Verify that NIC names are unique and valid
2940 utils.ValidateDeviceNames("NIC", nics)
2941 self._new_nics = nics
2942 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2944 self._new_nics = None
2945 ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2947 if not self.op.ignore_ipolicy:
2948 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2951 # Fill ispec with backend parameters
2952 ispec[constants.ISPEC_SPINDLE_USE] = \
2953 self.be_new.get(constants.BE_SPINDLE_USE, None)
2954 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2957 # Copy ispec to verify parameters with min/max values separately
2958 if self.op.disk_template:
2959 new_disk_template = self.op.disk_template
2961 new_disk_template = instance.disk_template
2962 ispec_max = ispec.copy()
2963 ispec_max[constants.ISPEC_MEM_SIZE] = \
2964 self.be_new.get(constants.BE_MAXMEM, None)
2965 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2967 ispec_min = ispec.copy()
2968 ispec_min[constants.ISPEC_MEM_SIZE] = \
2969 self.be_new.get(constants.BE_MINMEM, None)
2970 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2973 if (res_max or res_min):
2974 # FIXME: Improve error message by including information about whether
2975 # the upper or lower limit of the parameter fails the ipolicy.
2976 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
2977 (group_info, group_info.name,
2978 utils.CommaJoin(set(res_max + res_min))))
2979 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2981 def _ConvertPlainToDrbd(self, feedback_fn):
2982 """Converts an instance from plain to drbd.
2985 feedback_fn("Converting template to drbd")
2986 instance = self.instance
2987 pnode = instance.primary_node
2988 snode = self.op.remote_node
2990 assert instance.disk_template == constants.DT_PLAIN
2992 # create a fake disk info for _GenerateDiskTemplate
2993 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
2994 constants.IDISK_VG: d.logical_id[0],
2995 constants.IDISK_NAME: d.name}
2996 for d in instance.disks]
2997 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
2998 instance.name, pnode, [snode],
2999 disk_info, None, None, 0, feedback_fn,
3001 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3003 p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3004 s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3005 info = GetInstanceInfoText(instance)
3006 feedback_fn("Creating additional volumes...")
3007 # first, create the missing data and meta devices
3008 for disk in anno_disks:
3009 # unfortunately this is... not too nice
3010 CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3011 info, True, p_excl_stor)
3012 for child in disk.children:
3013 CreateSingleBlockDev(self, snode, instance, child, info, True,
3015 # at this stage, all new LVs have been created, we can rename the
3017 feedback_fn("Renaming original volumes...")
3018 rename_list = [(o, n.children[0].logical_id)
3019 for (o, n) in zip(instance.disks, new_disks)]
3020 result = self.rpc.call_blockdev_rename(pnode, rename_list)
3021 result.Raise("Failed to rename original LVs")
3023 feedback_fn("Initializing DRBD devices...")
3024 # all child devices are in place, we can now create the DRBD devices
3026 for disk in anno_disks:
3027 for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3028 f_create = node == pnode
3029 CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3031 except errors.GenericError, e:
3032 feedback_fn("Initializing of DRBD devices failed;"
3033 " renaming back original volumes...")
3034 for disk in new_disks:
3035 self.cfg.SetDiskID(disk, pnode)
3036 rename_back_list = [(n.children[0], o.logical_id)
3037 for (n, o) in zip(new_disks, instance.disks)]
3038 result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3039 result.Raise("Failed to rename LVs back after error %s" % str(e))
3042 # at this point, the instance has been modified
3043 instance.disk_template = constants.DT_DRBD8
3044 instance.disks = new_disks
3045 self.cfg.Update(instance, feedback_fn)
3047 # Release node locks while waiting for sync
3048 ReleaseLocks(self, locking.LEVEL_NODE)
3050 # disks are created, waiting for sync
3051 disk_abort = not WaitForSync(self, instance,
3052 oneshot=not self.op.wait_for_sync)
3054 raise errors.OpExecError("There are some degraded disks for"
3055 " this instance, please cleanup manually")
3057 # Node resource locks will be released by caller
3059 def _ConvertDrbdToPlain(self, feedback_fn):
3060 """Converts an instance from drbd to plain.
3063 instance = self.instance
3065 assert len(instance.secondary_nodes) == 1
3066 assert instance.disk_template == constants.DT_DRBD8
3068 pnode = instance.primary_node
3069 snode = instance.secondary_nodes[0]
3070 feedback_fn("Converting template to plain")
3072 old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3073 new_disks = [d.children[0] for d in instance.disks]
3075 # copy over size, mode and name
3076 for parent, child in zip(old_disks, new_disks):
3077 child.size = parent.size
3078 child.mode = parent.mode
3079 child.name = parent.name
3081 # this is a DRBD disk, return its port to the pool
3082 # NOTE: this must be done right before the call to cfg.Update!
3083 for disk in old_disks:
3084 tcp_port = disk.logical_id[2]
3085 self.cfg.AddTcpUdpPort(tcp_port)
3087 # update instance structure
3088 instance.disks = new_disks
3089 instance.disk_template = constants.DT_PLAIN
3090 _UpdateIvNames(0, instance.disks)
3091 self.cfg.Update(instance, feedback_fn)
3093 # Release locks in case removing disks takes a while
3094 ReleaseLocks(self, locking.LEVEL_NODE)
3096 feedback_fn("Removing volumes on the secondary node...")
3097 for disk in old_disks:
3098 self.cfg.SetDiskID(disk, snode)
3099 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3101 self.LogWarning("Could not remove block device %s on node %s,"
3102 " continuing anyway: %s", disk.iv_name, snode, msg)
3104 feedback_fn("Removing unneeded volumes on the primary node...")
3105 for idx, disk in enumerate(old_disks):
3106 meta = disk.children[1]
3107 self.cfg.SetDiskID(meta, pnode)
3108 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3110 self.LogWarning("Could not remove metadata for disk %d on node %s,"
3111 " continuing anyway: %s", idx, pnode, msg)
3113 def _CreateNewDisk(self, idx, params, _):
3114 """Creates a new disk.
3117 instance = self.instance
3120 if instance.disk_template in constants.DTS_FILEBASED:
3121 (file_driver, file_path) = instance.disks[0].logical_id
3122 file_path = os.path.dirname(file_path)
3124 file_driver = file_path = None
3127 GenerateDiskTemplate(self, instance.disk_template, instance.name,
3128 instance.primary_node, instance.secondary_nodes,
3129 [params], file_path, file_driver, idx,
3130 self.Log, self.diskparams)[0]
3132 new_disks = CreateDisks(self, instance, disks=[disk])
3134 if self.cluster.prealloc_wipe_disks:
3136 WipeOrCleanupDisks(self, instance,
3137 disks=[(idx, disk, 0)],
3141 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3145 def _ModifyDisk(idx, disk, params, _):
3150 mode = params.get(constants.IDISK_MODE, None)
3153 changes.append(("disk.mode/%d" % idx, disk.mode))
3155 name = params.get(constants.IDISK_NAME, None)
3157 changes.append(("disk.name/%d" % idx, disk.name))
3161 def _RemoveDisk(self, idx, root, _):
3165 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3166 for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3167 self.cfg.SetDiskID(disk, node)
3168 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3170 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3171 " continuing anyway", idx, node, msg)
3173 # if this is a DRBD disk, return its port to the pool
3174 if root.dev_type in constants.LDS_DRBD:
3175 self.cfg.AddTcpUdpPort(root.logical_id[2])
3177 def _CreateNewNic(self, idx, params, private):
3178 """Creates data structure for a new network interface.
3181 mac = params[constants.INIC_MAC]
3182 ip = params.get(constants.INIC_IP, None)
3183 net = params.get(constants.INIC_NETWORK, None)
3184 name = params.get(constants.INIC_NAME, None)
3185 net_uuid = self.cfg.LookupNetwork(net)
3186 #TODO: not private.filled?? can a nic have no nicparams??
3187 nicparams = private.filled
3188 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3189 nicparams=nicparams)
3190 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3194 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3195 (mac, ip, private.filled[constants.NIC_MODE],
3196 private.filled[constants.NIC_LINK],
3200 def _ApplyNicMods(self, idx, nic, params, private):
3201 """Modifies a network interface.
3206 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3208 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3209 setattr(nic, key, params[key])
3211 new_net = params.get(constants.INIC_NETWORK, nic.network)
3212 new_net_uuid = self.cfg.LookupNetwork(new_net)
3213 if new_net_uuid != nic.network:
3214 changes.append(("nic.network/%d" % idx, new_net))
3215 nic.network = new_net_uuid
3218 nic.nicparams = private.filled
3220 for (key, val) in nic.nicparams.items():
3221 changes.append(("nic.%s/%d" % (key, idx), val))
3225 def Exec(self, feedback_fn):
3226 """Modifies an instance.
3228 All parameters take effect only at the next restart of the instance.
3231 # Process here the warnings from CheckPrereq, as we don't have a
3232 # feedback_fn there.
3233 # TODO: Replace with self.LogWarning
3234 for warn in self.warn:
3235 feedback_fn("WARNING: %s" % warn)
3237 assert ((self.op.disk_template is None) ^
3238 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3239 "Not owning any node resource locks"
3242 instance = self.instance
3246 instance.primary_node = self.op.pnode
3249 if self.op.runtime_mem:
3250 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3252 self.op.runtime_mem)
3253 rpcres.Raise("Cannot modify instance runtime memory")
3254 result.append(("runtime_memory", self.op.runtime_mem))
3256 # Apply disk changes
3257 _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3258 self._CreateNewDisk, self._ModifyDisk,
3260 _UpdateIvNames(0, instance.disks)
3262 if self.op.disk_template:
3264 check_nodes = set(instance.all_nodes)
3265 if self.op.remote_node:
3266 check_nodes.add(self.op.remote_node)
3267 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3268 owned = self.owned_locks(level)
3269 assert not (check_nodes - owned), \
3270 ("Not owning the correct locks, owning %r, expected at least %r" %
3271 (owned, check_nodes))
3273 r_shut = ShutdownInstanceDisks(self, instance)
3275 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3276 " proceed with disk template conversion")
3277 mode = (instance.disk_template, self.op.disk_template)
3279 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3281 self.cfg.ReleaseDRBDMinors(instance.name)
3283 result.append(("disk_template", self.op.disk_template))
3285 assert instance.disk_template == self.op.disk_template, \
3286 ("Expected disk template '%s', found '%s'" %
3287 (self.op.disk_template, instance.disk_template))
3289 # Release node and resource locks if there are any (they might already have
3290 # been released during disk conversion)
3291 ReleaseLocks(self, locking.LEVEL_NODE)
3292 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3295 if self._new_nics is not None:
3296 instance.nics = self._new_nics
3297 result.extend(self._nic_chgdesc)
3300 if self.op.hvparams:
3301 instance.hvparams = self.hv_inst
3302 for key, val in self.op.hvparams.iteritems():
3303 result.append(("hv/%s" % key, val))
3306 if self.op.beparams:
3307 instance.beparams = self.be_inst
3308 for key, val in self.op.beparams.iteritems():
3309 result.append(("be/%s" % key, val))
3313 instance.os = self.op.os_name
3316 if self.op.osparams:
3317 instance.osparams = self.os_inst
3318 for key, val in self.op.osparams.iteritems():
3319 result.append(("os/%s" % key, val))
3321 if self.op.offline is None:
3324 elif self.op.offline:
3325 # Mark instance as offline
3326 self.cfg.MarkInstanceOffline(instance.name)
3327 result.append(("admin_state", constants.ADMINST_OFFLINE))
3329 # Mark instance as online, but stopped
3330 self.cfg.MarkInstanceDown(instance.name)
3331 result.append(("admin_state", constants.ADMINST_DOWN))
3333 self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3335 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3336 self.owned_locks(locking.LEVEL_NODE)), \
3337 "All node locks should have been released by now"
3341 _DISK_CONVERSIONS = {
3342 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3343 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3347 class LUInstanceChangeGroup(LogicalUnit):
3348 HPATH = "instance-change-group"
3349 HTYPE = constants.HTYPE_INSTANCE
3352 def ExpandNames(self):
3353 self.share_locks = ShareAll()
3355 self.needed_locks = {
3356 locking.LEVEL_NODEGROUP: [],
3357 locking.LEVEL_NODE: [],
3358 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3361 self._ExpandAndLockInstance()
3363 if self.op.target_groups:
3364 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3365 self.op.target_groups)
3367 self.req_target_uuids = None
3369 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3371 def DeclareLocks(self, level):
3372 if level == locking.LEVEL_NODEGROUP:
3373 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3375 if self.req_target_uuids:
3376 lock_groups = set(self.req_target_uuids)
3378 # Lock all groups used by instance optimistically; this requires going
3379 # via the node before it's locked, requiring verification later on
3380 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3381 lock_groups.update(instance_groups)
3383 # No target groups, need to lock all of them
3384 lock_groups = locking.ALL_SET
3386 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3388 elif level == locking.LEVEL_NODE:
3389 if self.req_target_uuids:
3390 # Lock all nodes used by instances
3391 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3392 self._LockInstancesNodes()
3394 # Lock all nodes in all potential target groups
3395 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3396 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3397 member_nodes = [node_name
3398 for group in lock_groups
3399 for node_name in self.cfg.GetNodeGroup(group).members]
3400 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3402 # Lock all nodes as all groups are potential targets
3403 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3405 def CheckPrereq(self):
3406 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3407 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3408 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3410 assert (self.req_target_uuids is None or
3411 owned_groups.issuperset(self.req_target_uuids))
3412 assert owned_instances == set([self.op.instance_name])
3414 # Get instance information
3415 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3417 # Check if node groups for locked instance are still correct
3418 assert owned_nodes.issuperset(self.instance.all_nodes), \
3419 ("Instance %s's nodes changed while we kept the lock" %
3420 self.op.instance_name)
3422 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3425 if self.req_target_uuids:
3426 # User requested specific target groups
3427 self.target_uuids = frozenset(self.req_target_uuids)
3429 # All groups except those used by the instance are potential targets
3430 self.target_uuids = owned_groups - inst_groups
3432 conflicting_groups = self.target_uuids & inst_groups
3433 if conflicting_groups:
3434 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3435 " used by the instance '%s'" %
3436 (utils.CommaJoin(conflicting_groups),
3437 self.op.instance_name),
3440 if not self.target_uuids:
3441 raise errors.OpPrereqError("There are no possible target groups",
3444 def BuildHooksEnv(self):
3448 assert self.target_uuids
3451 "TARGET_GROUPS": " ".join(self.target_uuids),
3454 env.update(BuildInstanceHookEnvByObject(self, self.instance))
3458 def BuildHooksNodes(self):
3459 """Build hooks nodes.
3462 mn = self.cfg.GetMasterNode()
3465 def Exec(self, feedback_fn):
3466 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3468 assert instances == [self.op.instance_name], "Instance not locked"
3470 req = iallocator.IAReqGroupChange(instances=instances,
3471 target_groups=list(self.target_uuids))
3472 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3474 ial.Run(self.op.iallocator)
3477 raise errors.OpPrereqError("Can't compute solution for changing group of"
3478 " instance '%s' using iallocator '%s': %s" %
3479 (self.op.instance_name, self.op.iallocator,
3480 ial.info), errors.ECODE_NORES)
3482 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3484 self.LogInfo("Iallocator returned %s job(s) for changing group of"
3485 " instance '%s'", len(jobs), self.op.instance_name)
3487 return ResultWithJobs(jobs)