4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Logical units dealing with instances."""
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65 import ganeti.masterd.instance
68 #: Type description for changes as returned by L{_ApplyContainerMods}'s
70 _TApplyContModsCbChanges = \
71 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
77 def _CheckHostnameSane(lu, name):
78 """Ensures that a given hostname resolves to a 'sane' name.
80 The given name is required to be a prefix of the resolved hostname,
81 to prevent accidental mismatches.
83 @param lu: the logical unit on behalf of which we're checking
84 @param name: the name we should resolve and check
85 @return: the resolved hostname object
88 hostname = netutils.GetHostname(name=name)
89 if hostname.name != name:
90 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91 if not utils.MatchNameComponent(name, [hostname.name]):
92 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93 " same as given hostname '%s'") %
94 (hostname.name, name), errors.ECODE_INVAL)
98 def _CheckOpportunisticLocking(op):
99 """Generate error if opportunistic locking is not possible.
102 if op.opportunistic_locking and not op.iallocator:
103 raise errors.OpPrereqError("Opportunistic locking is only available in"
104 " combination with an instance allocator",
108 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109 """Wrapper around IAReqInstanceAlloc.
111 @param op: The instance opcode
112 @param disks: The computed disks
113 @param nics: The computed nics
114 @param beparams: The full filled beparams
115 @param node_whitelist: List of nodes which should appear as online to the
116 allocator (unless the node is already marked offline)
118 @returns: A filled L{iallocator.IAReqInstanceAlloc}
121 spindle_use = beparams[constants.BE_SPINDLE_USE]
122 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123 disk_template=op.disk_template,
126 vcpus=beparams[constants.BE_VCPUS],
127 memory=beparams[constants.BE_MAXMEM],
128 spindle_use=spindle_use,
130 nics=[n.ToDict() for n in nics],
131 hypervisor=op.hypervisor,
132 node_whitelist=node_whitelist)
135 def _ComputeFullBeParams(op, cluster):
136 """Computes the full beparams.
138 @param op: The instance opcode
139 @param cluster: The cluster config object
141 @return: The fully filled beparams
144 default_beparams = cluster.beparams[constants.PP_DEFAULT]
145 for param, value in op.beparams.iteritems():
146 if value == constants.VALUE_AUTO:
147 op.beparams[param] = default_beparams[param]
148 objects.UpgradeBeParams(op.beparams)
149 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150 return cluster.SimpleFillBE(op.beparams)
153 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154 """Computes the nics.
156 @param op: The instance opcode
157 @param cluster: Cluster configuration object
158 @param default_ip: The default ip to assign
159 @param cfg: An instance of the configuration object
160 @param ec_id: Execution context ID
162 @returns: The build up nics
167 nic_mode_req = nic.get(constants.INIC_MODE, None)
168 nic_mode = nic_mode_req
169 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172 net = nic.get(constants.INIC_NETWORK, None)
173 link = nic.get(constants.NIC_LINK, None)
174 ip = nic.get(constants.INIC_IP, None)
176 if net is None or net.lower() == constants.VALUE_NONE:
179 if nic_mode_req is not None or link is not None:
180 raise errors.OpPrereqError("If network is given, no mode or link"
181 " is allowed to be passed",
185 if ip is None or ip.lower() == constants.VALUE_NONE:
187 elif ip.lower() == constants.VALUE_AUTO:
188 if not op.name_check:
189 raise errors.OpPrereqError("IP address set to auto but name checks"
190 " have been skipped",
194 # We defer pool operations until later, so that the iallocator has
195 # filled in the instance's node(s) dimara
196 if ip.lower() == constants.NIC_IP_POOL:
198 raise errors.OpPrereqError("if ip=pool, parameter network"
199 " must be passed too",
202 elif not netutils.IPAddress.IsValid(ip):
203 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
208 # TODO: check the ip address for uniqueness
209 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210 raise errors.OpPrereqError("Routed nic mode requires an ip address",
213 # MAC address verification
214 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216 mac = utils.NormalizeAndValidateMac(mac)
219 # TODO: We need to factor this out
220 cfg.ReserveMAC(mac, ec_id)
221 except errors.ReservationError:
222 raise errors.OpPrereqError("MAC address %s already in use"
224 errors.ECODE_NOTUNIQUE)
226 # Build nic parameters
229 nicparams[constants.NIC_MODE] = nic_mode
231 nicparams[constants.NIC_LINK] = link
233 check_params = cluster.SimpleFillNIC(nicparams)
234 objects.NIC.CheckParameterSyntax(check_params)
235 net_uuid = cfg.LookupNetwork(net)
236 name = nic.get(constants.INIC_NAME, None)
237 if name is not None and name.lower() == constants.VALUE_NONE:
239 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240 network=net_uuid, nicparams=nicparams)
241 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
247 def _CheckForConflictingIp(lu, ip, node):
248 """In case of conflicting IP address raise error.
251 @param ip: IP address
253 @param node: node name
256 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257 if conf_net is not None:
258 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259 " network %s, but the target NIC does not." %
266 def _ComputeIPolicyInstanceSpecViolation(
267 ipolicy, instance_spec, disk_template,
268 _compute_fn=ComputeIPolicySpecViolation):
269 """Compute if instance specs meets the specs of ipolicy.
272 @param ipolicy: The ipolicy to verify against
273 @param instance_spec: dict
274 @param instance_spec: The instance spec to verify
275 @type disk_template: string
276 @param disk_template: the disk template of the instance
277 @param _compute_fn: The function to verify ipolicy (unittest only)
278 @see: L{ComputeIPolicySpecViolation}
281 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
288 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289 disk_sizes, spindle_use, disk_template)
292 def _CheckOSVariant(os_obj, name):
293 """Check whether an OS name conforms to the os variants specification.
295 @type os_obj: L{objects.OS}
296 @param os_obj: OS object to check
298 @param name: OS name passed by the user, to check for validity
301 variant = objects.OS.GetVariant(name)
302 if not os_obj.supported_variants:
304 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305 " passed)" % (os_obj.name, variant),
309 raise errors.OpPrereqError("OS name must include a variant",
312 if variant not in os_obj.supported_variants:
313 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
316 class LUInstanceCreate(LogicalUnit):
317 """Create an instance.
320 HPATH = "instance-add"
321 HTYPE = constants.HTYPE_INSTANCE
324 def CheckArguments(self):
328 # do not require name_check to ease forward/backward compatibility
330 if self.op.no_install and self.op.start:
331 self.LogInfo("No-installation mode selected, disabling startup")
332 self.op.start = False
333 # validate/normalize the instance name
334 self.op.instance_name = \
335 netutils.Hostname.GetNormalizedName(self.op.instance_name)
337 if self.op.ip_check and not self.op.name_check:
338 # TODO: make the ip check more flexible and not depend on the name check
339 raise errors.OpPrereqError("Cannot do IP address check without a name"
340 " check", errors.ECODE_INVAL)
342 # check nics' parameter names
343 for nic in self.op.nics:
344 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345 # check that NIC's parameters names are unique and valid
346 utils.ValidateDeviceNames("NIC", self.op.nics)
348 # check that disk's names are unique and valid
349 utils.ValidateDeviceNames("disk", self.op.disks)
351 cluster = self.cfg.GetClusterInfo()
352 if not self.op.disk_template in cluster.enabled_disk_templates:
353 raise errors.OpPrereqError("Cannot create an instance with disk template"
354 " '%s', because it is not enabled in the"
355 " cluster. Enabled disk templates are: %s." %
356 (self.op.disk_template,
357 ",".join(cluster.enabled_disk_templates)))
359 # check disks. parameter names and consistent adopt/no-adopt strategy
360 has_adopt = has_no_adopt = False
361 for disk in self.op.disks:
362 if self.op.disk_template != constants.DT_EXT:
363 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364 if constants.IDISK_ADOPT in disk:
368 if has_adopt and has_no_adopt:
369 raise errors.OpPrereqError("Either all disks are adopted or none is",
372 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373 raise errors.OpPrereqError("Disk adoption is not supported for the"
374 " '%s' disk template" %
375 self.op.disk_template,
377 if self.op.iallocator is not None:
378 raise errors.OpPrereqError("Disk adoption not allowed with an"
379 " iallocator script", errors.ECODE_INVAL)
380 if self.op.mode == constants.INSTANCE_IMPORT:
381 raise errors.OpPrereqError("Disk adoption not allowed for"
382 " instance import", errors.ECODE_INVAL)
384 if self.op.disk_template in constants.DTS_MUST_ADOPT:
385 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386 " but no 'adopt' parameter given" %
387 self.op.disk_template,
390 self.adopt_disks = has_adopt
392 # instance name verification
393 if self.op.name_check:
394 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395 self.op.instance_name = self.hostname1.name
396 # used in CheckPrereq for ip ping check
397 self.check_ip = self.hostname1.ip
401 # file storage checks
402 if (self.op.file_driver and
403 not self.op.file_driver in constants.FILE_DRIVER):
404 raise errors.OpPrereqError("Invalid file driver name '%s'" %
405 self.op.file_driver, errors.ECODE_INVAL)
407 if self.op.disk_template == constants.DT_FILE:
408 opcodes.RequireFileStorage()
409 elif self.op.disk_template == constants.DT_SHARED_FILE:
410 opcodes.RequireSharedFileStorage()
412 ### Node/iallocator related checks
413 CheckIAllocatorOrNode(self, "iallocator", "pnode")
415 if self.op.pnode is not None:
416 if self.op.disk_template in constants.DTS_INT_MIRROR:
417 if self.op.snode is None:
418 raise errors.OpPrereqError("The networked disk templates need"
419 " a mirror node", errors.ECODE_INVAL)
421 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
425 _CheckOpportunisticLocking(self.op)
427 self._cds = GetClusterDomainSecret()
429 if self.op.mode == constants.INSTANCE_IMPORT:
430 # On import force_variant must be True, because if we forced it at
431 # initial install, our only chance when importing it back is that it
433 self.op.force_variant = True
435 if self.op.no_install:
436 self.LogInfo("No-installation mode has no effect during import")
438 elif self.op.mode == constants.INSTANCE_CREATE:
439 if self.op.os_type is None:
440 raise errors.OpPrereqError("No guest OS specified",
442 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
443 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
444 " installation" % self.op.os_type,
446 if self.op.disk_template is None:
447 raise errors.OpPrereqError("No disk template specified",
450 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
451 # Check handshake to ensure both clusters have the same domain secret
452 src_handshake = self.op.source_handshake
453 if not src_handshake:
454 raise errors.OpPrereqError("Missing source handshake",
457 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
460 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
463 # Load and check source CA
464 self.source_x509_ca_pem = self.op.source_x509_ca
465 if not self.source_x509_ca_pem:
466 raise errors.OpPrereqError("Missing source X509 CA",
470 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
472 except OpenSSL.crypto.Error, err:
473 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
474 (err, ), errors.ECODE_INVAL)
476 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
477 if errcode is not None:
478 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
481 self.source_x509_ca = cert
483 src_instance_name = self.op.source_instance_name
484 if not src_instance_name:
485 raise errors.OpPrereqError("Missing source instance name",
488 self.source_instance_name = \
489 netutils.GetHostname(name=src_instance_name).name
492 raise errors.OpPrereqError("Invalid instance creation mode %r" %
493 self.op.mode, errors.ECODE_INVAL)
495 def ExpandNames(self):
496 """ExpandNames for CreateInstance.
498 Figure out the right locks for instance creation.
501 self.needed_locks = {}
503 instance_name = self.op.instance_name
504 # this is just a preventive check, but someone might still add this
505 # instance in the meantime, and creation will fail at lock-add time
506 if instance_name in self.cfg.GetInstanceList():
507 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
508 instance_name, errors.ECODE_EXISTS)
510 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
512 if self.op.iallocator:
513 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
514 # specifying a group on instance creation and then selecting nodes from
516 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
517 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
519 if self.op.opportunistic_locking:
520 self.opportunistic_locks[locking.LEVEL_NODE] = True
521 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
523 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
524 nodelist = [self.op.pnode]
525 if self.op.snode is not None:
526 self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
527 nodelist.append(self.op.snode)
528 self.needed_locks[locking.LEVEL_NODE] = nodelist
530 # in case of import lock the source node too
531 if self.op.mode == constants.INSTANCE_IMPORT:
532 src_node = self.op.src_node
533 src_path = self.op.src_path
536 self.op.src_path = src_path = self.op.instance_name
539 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
540 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
541 self.op.src_node = None
542 if os.path.isabs(src_path):
543 raise errors.OpPrereqError("Importing an instance from a path"
544 " requires a source node option",
547 self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
548 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
549 self.needed_locks[locking.LEVEL_NODE].append(src_node)
550 if not os.path.isabs(src_path):
551 self.op.src_path = src_path = \
552 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
554 self.needed_locks[locking.LEVEL_NODE_RES] = \
555 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
557 def _RunAllocator(self):
558 """Run the allocator based on input opcode.
561 if self.op.opportunistic_locking:
562 # Only consider nodes for which a lock is held
563 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
565 node_whitelist = None
567 #TODO Export network to iallocator so that it chooses a pnode
568 # in a nodegroup that has the desired network connected to
569 req = _CreateInstanceAllocRequest(self.op, self.disks,
570 self.nics, self.be_full,
572 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
574 ial.Run(self.op.iallocator)
577 # When opportunistic locks are used only a temporary failure is generated
578 if self.op.opportunistic_locking:
579 ecode = errors.ECODE_TEMP_NORES
581 ecode = errors.ECODE_NORES
583 raise errors.OpPrereqError("Can't compute nodes using"
584 " iallocator '%s': %s" %
585 (self.op.iallocator, ial.info),
588 self.op.pnode = ial.result[0]
589 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
590 self.op.instance_name, self.op.iallocator,
591 utils.CommaJoin(ial.result))
593 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
595 if req.RequiredNodes() == 2:
596 self.op.snode = ial.result[1]
598 def BuildHooksEnv(self):
601 This runs on master, primary and secondary nodes of the instance.
605 "ADD_MODE": self.op.mode,
607 if self.op.mode == constants.INSTANCE_IMPORT:
608 env["SRC_NODE"] = self.op.src_node
609 env["SRC_PATH"] = self.op.src_path
610 env["SRC_IMAGES"] = self.src_images
612 env.update(BuildInstanceHookEnv(
613 name=self.op.instance_name,
614 primary_node=self.op.pnode,
615 secondary_nodes=self.secondaries,
616 status=self.op.start,
617 os_type=self.op.os_type,
618 minmem=self.be_full[constants.BE_MINMEM],
619 maxmem=self.be_full[constants.BE_MAXMEM],
620 vcpus=self.be_full[constants.BE_VCPUS],
621 nics=NICListToTuple(self, self.nics),
622 disk_template=self.op.disk_template,
623 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
624 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
625 for d in self.disks],
628 hypervisor_name=self.op.hypervisor,
634 def BuildHooksNodes(self):
635 """Build hooks nodes.
638 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
641 def _ReadExportInfo(self):
642 """Reads the export information from disk.
644 It will override the opcode source node and path with the actual
645 information, if these two were not specified before.
647 @return: the export information
650 assert self.op.mode == constants.INSTANCE_IMPORT
652 src_node = self.op.src_node
653 src_path = self.op.src_path
656 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
657 exp_list = self.rpc.call_export_list(locked_nodes)
659 for node in exp_list:
660 if exp_list[node].fail_msg:
662 if src_path in exp_list[node].payload:
664 self.op.src_node = src_node = node
665 self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
669 raise errors.OpPrereqError("No export found for relative path %s" %
670 src_path, errors.ECODE_INVAL)
672 CheckNodeOnline(self, src_node)
673 result = self.rpc.call_export_info(src_node, src_path)
674 result.Raise("No export or invalid export found in dir %s" % src_path)
676 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
677 if not export_info.has_section(constants.INISECT_EXP):
678 raise errors.ProgrammerError("Corrupted export config",
679 errors.ECODE_ENVIRON)
681 ei_version = export_info.get(constants.INISECT_EXP, "version")
682 if (int(ei_version) != constants.EXPORT_VERSION):
683 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
684 (ei_version, constants.EXPORT_VERSION),
685 errors.ECODE_ENVIRON)
688 def _ReadExportParams(self, einfo):
689 """Use export parameters as defaults.
691 In case the opcode doesn't specify (as in override) some instance
692 parameters, then try to use them from the export information, if
696 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
698 if self.op.disk_template is None:
699 if einfo.has_option(constants.INISECT_INS, "disk_template"):
700 self.op.disk_template = einfo.get(constants.INISECT_INS,
702 if self.op.disk_template not in constants.DISK_TEMPLATES:
703 raise errors.OpPrereqError("Disk template specified in configuration"
704 " file is not one of the allowed values:"
706 " ".join(constants.DISK_TEMPLATES),
709 raise errors.OpPrereqError("No disk template specified and the export"
710 " is missing the disk_template information",
713 if not self.op.disks:
715 # TODO: import the disk iv_name too
716 for idx in range(constants.MAX_DISKS):
717 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
718 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
719 disks.append({constants.IDISK_SIZE: disk_sz})
720 self.op.disks = disks
721 if not disks and self.op.disk_template != constants.DT_DISKLESS:
722 raise errors.OpPrereqError("No disk info specified and the export"
723 " is missing the disk information",
728 for idx in range(constants.MAX_NICS):
729 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
731 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
732 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
739 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
740 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
742 if (self.op.hypervisor is None and
743 einfo.has_option(constants.INISECT_INS, "hypervisor")):
744 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
746 if einfo.has_section(constants.INISECT_HYP):
747 # use the export parameters but do not override the ones
748 # specified by the user
749 for name, value in einfo.items(constants.INISECT_HYP):
750 if name not in self.op.hvparams:
751 self.op.hvparams[name] = value
753 if einfo.has_section(constants.INISECT_BEP):
754 # use the parameters, without overriding
755 for name, value in einfo.items(constants.INISECT_BEP):
756 if name not in self.op.beparams:
757 self.op.beparams[name] = value
758 # Compatibility for the old "memory" be param
759 if name == constants.BE_MEMORY:
760 if constants.BE_MAXMEM not in self.op.beparams:
761 self.op.beparams[constants.BE_MAXMEM] = value
762 if constants.BE_MINMEM not in self.op.beparams:
763 self.op.beparams[constants.BE_MINMEM] = value
765 # try to read the parameters old style, from the main section
766 for name in constants.BES_PARAMETERS:
767 if (name not in self.op.beparams and
768 einfo.has_option(constants.INISECT_INS, name)):
769 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
771 if einfo.has_section(constants.INISECT_OSP):
772 # use the parameters, without overriding
773 for name, value in einfo.items(constants.INISECT_OSP):
774 if name not in self.op.osparams:
775 self.op.osparams[name] = value
777 def _RevertToDefaults(self, cluster):
778 """Revert the instance parameters to the default values.
782 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
783 for name in self.op.hvparams.keys():
784 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
785 del self.op.hvparams[name]
787 be_defs = cluster.SimpleFillBE({})
788 for name in self.op.beparams.keys():
789 if name in be_defs and be_defs[name] == self.op.beparams[name]:
790 del self.op.beparams[name]
792 nic_defs = cluster.SimpleFillNIC({})
793 for nic in self.op.nics:
794 for name in constants.NICS_PARAMETERS:
795 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
798 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
799 for name in self.op.osparams.keys():
800 if name in os_defs and os_defs[name] == self.op.osparams[name]:
801 del self.op.osparams[name]
803 def _CalculateFileStorageDir(self):
804 """Calculate final instance file storage dir.
807 # file storage dir calculation/check
808 self.instance_file_storage_dir = None
809 if self.op.disk_template in constants.DTS_FILEBASED:
810 # build the full file storage dir path
813 if self.op.disk_template == constants.DT_SHARED_FILE:
814 get_fsd_fn = self.cfg.GetSharedFileStorageDir
816 get_fsd_fn = self.cfg.GetFileStorageDir
818 cfg_storagedir = get_fsd_fn()
819 if not cfg_storagedir:
820 raise errors.OpPrereqError("Cluster file storage dir not defined",
822 joinargs.append(cfg_storagedir)
824 if self.op.file_storage_dir is not None:
825 joinargs.append(self.op.file_storage_dir)
827 joinargs.append(self.op.instance_name)
829 # pylint: disable=W0142
830 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
832 def CheckPrereq(self): # pylint: disable=R0914
833 """Check prerequisites.
836 self._CalculateFileStorageDir()
838 if self.op.mode == constants.INSTANCE_IMPORT:
839 export_info = self._ReadExportInfo()
840 self._ReadExportParams(export_info)
841 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
843 self._old_instance_name = None
845 if (not self.cfg.GetVGName() and
846 self.op.disk_template not in constants.DTS_NOT_LVM):
847 raise errors.OpPrereqError("Cluster does not support lvm-based"
848 " instances", errors.ECODE_STATE)
850 if (self.op.hypervisor is None or
851 self.op.hypervisor == constants.VALUE_AUTO):
852 self.op.hypervisor = self.cfg.GetHypervisorType()
854 cluster = self.cfg.GetClusterInfo()
855 enabled_hvs = cluster.enabled_hypervisors
856 if self.op.hypervisor not in enabled_hvs:
857 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
859 (self.op.hypervisor, ",".join(enabled_hvs)),
863 for tag in self.op.tags:
864 objects.TaggableObject.ValidateTag(tag)
866 # check hypervisor parameter syntax (locally)
867 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
868 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
870 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
871 hv_type.CheckParameterSyntax(filled_hvp)
872 self.hv_full = filled_hvp
873 # check that we don't specify global parameters on an instance
874 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
875 "instance", "cluster")
877 # fill and remember the beparams dict
878 self.be_full = _ComputeFullBeParams(self.op, cluster)
880 # build os parameters
881 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
883 # now that hvp/bep are in final format, let's reset to defaults,
885 if self.op.identify_defaults:
886 self._RevertToDefaults(cluster)
889 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
892 # disk checks/pre-build
893 default_vg = self.cfg.GetVGName()
894 self.disks = ComputeDisks(self.op, default_vg)
896 if self.op.mode == constants.INSTANCE_IMPORT:
898 for idx in range(len(self.disks)):
899 option = "disk%d_dump" % idx
900 if export_info.has_option(constants.INISECT_INS, option):
901 # FIXME: are the old os-es, disk sizes, etc. useful?
902 export_name = export_info.get(constants.INISECT_INS, option)
903 image = utils.PathJoin(self.op.src_path, export_name)
904 disk_images.append(image)
906 disk_images.append(False)
908 self.src_images = disk_images
910 if self.op.instance_name == self._old_instance_name:
911 for idx, nic in enumerate(self.nics):
912 if nic.mac == constants.VALUE_AUTO:
913 nic_mac_ini = "nic%d_mac" % idx
914 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
916 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
918 # ip ping checks (we use the same ip that was resolved in ExpandNames)
920 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
921 raise errors.OpPrereqError("IP %s of instance %s already in use" %
922 (self.check_ip, self.op.instance_name),
923 errors.ECODE_NOTUNIQUE)
925 #### mac address generation
926 # By generating here the mac address both the allocator and the hooks get
927 # the real final mac address rather than the 'auto' or 'generate' value.
928 # There is a race condition between the generation and the instance object
929 # creation, which means that we know the mac is valid now, but we're not
930 # sure it will be when we actually add the instance. If things go bad
931 # adding the instance will abort because of a duplicate mac, and the
932 # creation job will fail.
933 for nic in self.nics:
934 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
935 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
939 if self.op.iallocator is not None:
942 # Release all unneeded node locks
943 keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
944 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
945 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
946 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
948 assert (self.owned_locks(locking.LEVEL_NODE) ==
949 self.owned_locks(locking.LEVEL_NODE_RES)), \
950 "Node locks differ from node resource locks"
952 #### node related checks
955 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
956 assert self.pnode is not None, \
957 "Cannot retrieve locked node %s" % self.op.pnode
959 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
960 pnode.name, errors.ECODE_STATE)
962 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
963 pnode.name, errors.ECODE_STATE)
964 if not pnode.vm_capable:
965 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
966 " '%s'" % pnode.name, errors.ECODE_STATE)
968 self.secondaries = []
970 # Fill in any IPs from IP pools. This must happen here, because we need to
971 # know the nic's primary node, as specified by the iallocator
972 for idx, nic in enumerate(self.nics):
973 net_uuid = nic.network
974 if net_uuid is not None:
975 nobj = self.cfg.GetNetwork(net_uuid)
976 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
977 if netparams is None:
978 raise errors.OpPrereqError("No netparams found for network"
979 " %s. Propably not connected to"
980 " node's %s nodegroup" %
981 (nobj.name, self.pnode.name),
983 self.LogInfo("NIC/%d inherits netparams %s" %
984 (idx, netparams.values()))
985 nic.nicparams = dict(netparams)
986 if nic.ip is not None:
987 if nic.ip.lower() == constants.NIC_IP_POOL:
989 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
990 except errors.ReservationError:
991 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
992 " from the address pool" % idx,
994 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
997 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
998 except errors.ReservationError:
999 raise errors.OpPrereqError("IP address %s already in use"
1000 " or does not belong to network %s" %
1001 (nic.ip, nobj.name),
1002 errors.ECODE_NOTUNIQUE)
1004 # net is None, ip None or given
1005 elif self.op.conflicts_check:
1006 _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1008 # mirror node verification
1009 if self.op.disk_template in constants.DTS_INT_MIRROR:
1010 if self.op.snode == pnode.name:
1011 raise errors.OpPrereqError("The secondary node cannot be the"
1012 " primary node", errors.ECODE_INVAL)
1013 CheckNodeOnline(self, self.op.snode)
1014 CheckNodeNotDrained(self, self.op.snode)
1015 CheckNodeVmCapable(self, self.op.snode)
1016 self.secondaries.append(self.op.snode)
1018 snode = self.cfg.GetNodeInfo(self.op.snode)
1019 if pnode.group != snode.group:
1020 self.LogWarning("The primary and secondary nodes are in two"
1021 " different node groups; the disk parameters"
1022 " from the first disk's node group will be"
1025 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1027 if self.op.disk_template in constants.DTS_INT_MIRROR:
1029 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1030 if compat.any(map(has_es, nodes)):
1031 raise errors.OpPrereqError("Disk template %s not supported with"
1032 " exclusive storage" % self.op.disk_template,
1035 nodenames = [pnode.name] + self.secondaries
1037 if not self.adopt_disks:
1038 if self.op.disk_template == constants.DT_RBD:
1039 # _CheckRADOSFreeSpace() is just a placeholder.
1040 # Any function that checks prerequisites can be placed here.
1041 # Check if there is enough space on the RADOS cluster.
1042 CheckRADOSFreeSpace()
1043 elif self.op.disk_template == constants.DT_EXT:
1044 # FIXME: Function that checks prereqs if needed
1047 # Check lv size requirements, if not adopting
1048 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1049 CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1051 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1052 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1053 disk[constants.IDISK_ADOPT])
1054 for disk in self.disks])
1055 if len(all_lvs) != len(self.disks):
1056 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1058 for lv_name in all_lvs:
1060 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1061 # to ReserveLV uses the same syntax
1062 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1063 except errors.ReservationError:
1064 raise errors.OpPrereqError("LV named %s used by another instance" %
1065 lv_name, errors.ECODE_NOTUNIQUE)
1067 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1068 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1070 node_lvs = self.rpc.call_lv_list([pnode.name],
1071 vg_names.payload.keys())[pnode.name]
1072 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1073 node_lvs = node_lvs.payload
1075 delta = all_lvs.difference(node_lvs.keys())
1077 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1078 utils.CommaJoin(delta),
1080 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1082 raise errors.OpPrereqError("Online logical volumes found, cannot"
1083 " adopt: %s" % utils.CommaJoin(online_lvs),
1085 # update the size of disk based on what is found
1086 for dsk in self.disks:
1087 dsk[constants.IDISK_SIZE] = \
1088 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1089 dsk[constants.IDISK_ADOPT])][0]))
1091 elif self.op.disk_template == constants.DT_BLOCK:
1092 # Normalize and de-duplicate device paths
1093 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1094 for disk in self.disks])
1095 if len(all_disks) != len(self.disks):
1096 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1098 baddisks = [d for d in all_disks
1099 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1101 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1102 " cannot be adopted" %
1103 (utils.CommaJoin(baddisks),
1104 constants.ADOPTABLE_BLOCKDEV_ROOT),
1107 node_disks = self.rpc.call_bdev_sizes([pnode.name],
1108 list(all_disks))[pnode.name]
1109 node_disks.Raise("Cannot get block device information from node %s" %
1111 node_disks = node_disks.payload
1112 delta = all_disks.difference(node_disks.keys())
1114 raise errors.OpPrereqError("Missing block device(s): %s" %
1115 utils.CommaJoin(delta),
1117 for dsk in self.disks:
1118 dsk[constants.IDISK_SIZE] = \
1119 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1121 # Verify instance specs
1122 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1124 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1125 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1126 constants.ISPEC_DISK_COUNT: len(self.disks),
1127 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1128 for disk in self.disks],
1129 constants.ISPEC_NIC_COUNT: len(self.nics),
1130 constants.ISPEC_SPINDLE_USE: spindle_use,
1133 group_info = self.cfg.GetNodeGroup(pnode.group)
1134 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1135 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1136 self.op.disk_template)
1137 if not self.op.ignore_ipolicy and res:
1138 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1139 (pnode.group, group_info.name, utils.CommaJoin(res)))
1140 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1142 CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1144 CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1145 # check OS parameters (remotely)
1146 CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1148 CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1150 #TODO: _CheckExtParams (remotely)
1151 # Check parameters for extstorage
1153 # memory check on primary node
1154 #TODO(dynmem): use MINMEM for checking
1156 CheckNodeFreeMemory(self, self.pnode.name,
1157 "creating instance %s" % self.op.instance_name,
1158 self.be_full[constants.BE_MAXMEM],
1161 self.dry_run_result = list(nodenames)
1163 def Exec(self, feedback_fn):
1164 """Create and add the instance to the cluster.
1167 instance = self.op.instance_name
1168 pnode_name = self.pnode.name
1170 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1171 self.owned_locks(locking.LEVEL_NODE)), \
1172 "Node locks differ from node resource locks"
1173 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1175 ht_kind = self.op.hypervisor
1176 if ht_kind in constants.HTS_REQ_PORT:
1177 network_port = self.cfg.AllocatePort()
1181 # This is ugly but we got a chicken-egg problem here
1182 # We can only take the group disk parameters, as the instance
1183 # has no disks yet (we are generating them right here).
1184 node = self.cfg.GetNodeInfo(pnode_name)
1185 nodegroup = self.cfg.GetNodeGroup(node.group)
1186 disks = GenerateDiskTemplate(self,
1187 self.op.disk_template,
1188 instance, pnode_name,
1191 self.instance_file_storage_dir,
1192 self.op.file_driver,
1195 self.cfg.GetGroupDiskParams(nodegroup))
1197 iobj = objects.Instance(name=instance, os=self.op.os_type,
1198 primary_node=pnode_name,
1199 nics=self.nics, disks=disks,
1200 disk_template=self.op.disk_template,
1202 admin_state=constants.ADMINST_DOWN,
1203 network_port=network_port,
1204 beparams=self.op.beparams,
1205 hvparams=self.op.hvparams,
1206 hypervisor=self.op.hypervisor,
1207 osparams=self.op.osparams,
1211 for tag in self.op.tags:
1214 if self.adopt_disks:
1215 if self.op.disk_template == constants.DT_PLAIN:
1216 # rename LVs to the newly-generated names; we need to construct
1217 # 'fake' LV disks with the old data, plus the new unique_id
1218 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1220 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1221 rename_to.append(t_dsk.logical_id)
1222 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1223 self.cfg.SetDiskID(t_dsk, pnode_name)
1224 result = self.rpc.call_blockdev_rename(pnode_name,
1225 zip(tmp_disks, rename_to))
1226 result.Raise("Failed to rename adoped LVs")
1228 feedback_fn("* creating instance disks...")
1230 CreateDisks(self, iobj)
1231 except errors.OpExecError:
1232 self.LogWarning("Device creation failed")
1233 self.cfg.ReleaseDRBDMinors(instance)
1236 feedback_fn("adding instance %s to cluster config" % instance)
1238 self.cfg.AddInstance(iobj, self.proc.GetECId())
1240 # Declare that we don't want to remove the instance lock anymore, as we've
1241 # added the instance to the config
1242 del self.remove_locks[locking.LEVEL_INSTANCE]
1244 if self.op.mode == constants.INSTANCE_IMPORT:
1245 # Release unused nodes
1246 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1249 ReleaseLocks(self, locking.LEVEL_NODE)
1252 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1253 feedback_fn("* wiping instance disks...")
1255 WipeDisks(self, iobj)
1256 except errors.OpExecError, err:
1257 logging.exception("Wiping disks failed")
1258 self.LogWarning("Wiping instance disks failed (%s)", err)
1262 # Something is already wrong with the disks, don't do anything else
1264 elif self.op.wait_for_sync:
1265 disk_abort = not WaitForSync(self, iobj)
1266 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1267 # make sure the disks are not degraded (still sync-ing is ok)
1268 feedback_fn("* checking mirrors status")
1269 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1274 RemoveDisks(self, iobj)
1275 self.cfg.RemoveInstance(iobj.name)
1276 # Make sure the instance lock gets removed
1277 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1278 raise errors.OpExecError("There are some degraded disks for"
1281 # instance disks are now active
1282 iobj.disks_active = True
1284 # Release all node resource locks
1285 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1287 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1288 # we need to set the disks ID to the primary node, since the
1289 # preceding code might or might have not done it, depending on
1290 # disk template and other options
1291 for disk in iobj.disks:
1292 self.cfg.SetDiskID(disk, pnode_name)
1293 if self.op.mode == constants.INSTANCE_CREATE:
1294 if not self.op.no_install:
1295 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1296 not self.op.wait_for_sync)
1298 feedback_fn("* pausing disk sync to install instance OS")
1299 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1302 for idx, success in enumerate(result.payload):
1304 logging.warn("pause-sync of instance %s for disk %d failed",
1307 feedback_fn("* running the instance OS create scripts...")
1308 # FIXME: pass debug option from opcode to backend
1310 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1311 self.op.debug_level)
1313 feedback_fn("* resuming disk sync")
1314 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1317 for idx, success in enumerate(result.payload):
1319 logging.warn("resume-sync of instance %s for disk %d failed",
1322 os_add_result.Raise("Could not add os for instance %s"
1323 " on node %s" % (instance, pnode_name))
1326 if self.op.mode == constants.INSTANCE_IMPORT:
1327 feedback_fn("* running the instance OS import scripts...")
1331 for idx, image in enumerate(self.src_images):
1335 # FIXME: pass debug option from opcode to backend
1336 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1337 constants.IEIO_FILE, (image, ),
1338 constants.IEIO_SCRIPT,
1339 (iobj.disks[idx], idx),
1341 transfers.append(dt)
1344 masterd.instance.TransferInstanceData(self, feedback_fn,
1345 self.op.src_node, pnode_name,
1346 self.pnode.secondary_ip,
1348 if not compat.all(import_result):
1349 self.LogWarning("Some disks for instance %s on node %s were not"
1350 " imported successfully" % (instance, pnode_name))
1352 rename_from = self._old_instance_name
1354 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1355 feedback_fn("* preparing remote import...")
1356 # The source cluster will stop the instance before attempting to make
1357 # a connection. In some cases stopping an instance can take a long
1358 # time, hence the shutdown timeout is added to the connection
1360 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1361 self.op.source_shutdown_timeout)
1362 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1364 assert iobj.primary_node == self.pnode.name
1366 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1367 self.source_x509_ca,
1368 self._cds, timeouts)
1369 if not compat.all(disk_results):
1370 # TODO: Should the instance still be started, even if some disks
1371 # failed to import (valid for local imports, too)?
1372 self.LogWarning("Some disks for instance %s on node %s were not"
1373 " imported successfully" % (instance, pnode_name))
1375 rename_from = self.source_instance_name
1378 # also checked in the prereq part
1379 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1382 # Run rename script on newly imported instance
1383 assert iobj.name == instance
1384 feedback_fn("Running rename script for %s" % instance)
1385 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1387 self.op.debug_level)
1389 self.LogWarning("Failed to run rename script for %s on node"
1390 " %s: %s" % (instance, pnode_name, result.fail_msg))
1392 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1395 iobj.admin_state = constants.ADMINST_UP
1396 self.cfg.Update(iobj, feedback_fn)
1397 logging.info("Starting instance %s on node %s", instance, pnode_name)
1398 feedback_fn("* starting instance...")
1399 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1400 False, self.op.reason)
1401 result.Raise("Could not start instance")
1403 return list(iobj.all_nodes)
1406 class LUInstanceRename(LogicalUnit):
1407 """Rename an instance.
1410 HPATH = "instance-rename"
1411 HTYPE = constants.HTYPE_INSTANCE
1413 def CheckArguments(self):
1417 if self.op.ip_check and not self.op.name_check:
1418 # TODO: make the ip check more flexible and not depend on the name check
1419 raise errors.OpPrereqError("IP address check requires a name check",
1422 def BuildHooksEnv(self):
1425 This runs on master, primary and secondary nodes of the instance.
1428 env = BuildInstanceHookEnvByObject(self, self.instance)
1429 env["INSTANCE_NEW_NAME"] = self.op.new_name
1432 def BuildHooksNodes(self):
1433 """Build hooks nodes.
1436 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1439 def CheckPrereq(self):
1440 """Check prerequisites.
1442 This checks that the instance is in the cluster and is not running.
1445 self.op.instance_name = ExpandInstanceName(self.cfg,
1446 self.op.instance_name)
1447 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1448 assert instance is not None
1449 CheckNodeOnline(self, instance.primary_node)
1450 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1451 msg="cannot rename")
1452 self.instance = instance
1454 new_name = self.op.new_name
1455 if self.op.name_check:
1456 hostname = _CheckHostnameSane(self, new_name)
1457 new_name = self.op.new_name = hostname.name
1458 if (self.op.ip_check and
1459 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1460 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1461 (hostname.ip, new_name),
1462 errors.ECODE_NOTUNIQUE)
1464 instance_list = self.cfg.GetInstanceList()
1465 if new_name in instance_list and new_name != instance.name:
1466 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1467 new_name, errors.ECODE_EXISTS)
1469 def Exec(self, feedback_fn):
1470 """Rename the instance.
1473 inst = self.instance
1474 old_name = inst.name
1476 rename_file_storage = False
1477 if (inst.disk_template in constants.DTS_FILEBASED and
1478 self.op.new_name != inst.name):
1479 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1480 rename_file_storage = True
1482 self.cfg.RenameInstance(inst.name, self.op.new_name)
1483 # Change the instance lock. This is definitely safe while we hold the BGL.
1484 # Otherwise the new lock would have to be added in acquired mode.
1486 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1487 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1488 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1490 # re-read the instance from the configuration after rename
1491 inst = self.cfg.GetInstanceInfo(self.op.new_name)
1493 if rename_file_storage:
1494 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1495 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1496 old_file_storage_dir,
1497 new_file_storage_dir)
1498 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1499 " (but the instance has been renamed in Ganeti)" %
1500 (inst.primary_node, old_file_storage_dir,
1501 new_file_storage_dir))
1503 StartInstanceDisks(self, inst, None)
1504 # update info on disks
1505 info = GetInstanceInfoText(inst)
1506 for (idx, disk) in enumerate(inst.disks):
1507 for node in inst.all_nodes:
1508 self.cfg.SetDiskID(disk, node)
1509 result = self.rpc.call_blockdev_setinfo(node, disk, info)
1511 self.LogWarning("Error setting info on node %s for disk %s: %s",
1512 node, idx, result.fail_msg)
1514 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1515 old_name, self.op.debug_level)
1516 msg = result.fail_msg
1518 msg = ("Could not run OS rename script for instance %s on node %s"
1519 " (but the instance has been renamed in Ganeti): %s" %
1520 (inst.name, inst.primary_node, msg))
1521 self.LogWarning(msg)
1523 ShutdownInstanceDisks(self, inst)
1528 class LUInstanceRemove(LogicalUnit):
1529 """Remove an instance.
1532 HPATH = "instance-remove"
1533 HTYPE = constants.HTYPE_INSTANCE
1536 def ExpandNames(self):
1537 self._ExpandAndLockInstance()
1538 self.needed_locks[locking.LEVEL_NODE] = []
1539 self.needed_locks[locking.LEVEL_NODE_RES] = []
1540 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1542 def DeclareLocks(self, level):
1543 if level == locking.LEVEL_NODE:
1544 self._LockInstancesNodes()
1545 elif level == locking.LEVEL_NODE_RES:
1547 self.needed_locks[locking.LEVEL_NODE_RES] = \
1548 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1550 def BuildHooksEnv(self):
1553 This runs on master, primary and secondary nodes of the instance.
1556 env = BuildInstanceHookEnvByObject(self, self.instance)
1557 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1560 def BuildHooksNodes(self):
1561 """Build hooks nodes.
1564 nl = [self.cfg.GetMasterNode()]
1565 nl_post = list(self.instance.all_nodes) + nl
1566 return (nl, nl_post)
1568 def CheckPrereq(self):
1569 """Check prerequisites.
1571 This checks that the instance is in the cluster.
1574 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1575 assert self.instance is not None, \
1576 "Cannot retrieve locked instance %s" % self.op.instance_name
1578 def Exec(self, feedback_fn):
1579 """Remove the instance.
1582 instance = self.instance
1583 logging.info("Shutting down instance %s on node %s",
1584 instance.name, instance.primary_node)
1586 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1587 self.op.shutdown_timeout,
1589 msg = result.fail_msg
1591 if self.op.ignore_failures:
1592 feedback_fn("Warning: can't shutdown instance: %s" % msg)
1594 raise errors.OpExecError("Could not shutdown instance %s on"
1596 (instance.name, instance.primary_node, msg))
1598 assert (self.owned_locks(locking.LEVEL_NODE) ==
1599 self.owned_locks(locking.LEVEL_NODE_RES))
1600 assert not (set(instance.all_nodes) -
1601 self.owned_locks(locking.LEVEL_NODE)), \
1602 "Not owning correct locks"
1604 RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1607 class LUInstanceMove(LogicalUnit):
1608 """Move an instance by data-copying.
1611 HPATH = "instance-move"
1612 HTYPE = constants.HTYPE_INSTANCE
1615 def ExpandNames(self):
1616 self._ExpandAndLockInstance()
1617 target_node = ExpandNodeName(self.cfg, self.op.target_node)
1618 self.op.target_node = target_node
1619 self.needed_locks[locking.LEVEL_NODE] = [target_node]
1620 self.needed_locks[locking.LEVEL_NODE_RES] = []
1621 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1623 def DeclareLocks(self, level):
1624 if level == locking.LEVEL_NODE:
1625 self._LockInstancesNodes(primary_only=True)
1626 elif level == locking.LEVEL_NODE_RES:
1628 self.needed_locks[locking.LEVEL_NODE_RES] = \
1629 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1631 def BuildHooksEnv(self):
1634 This runs on master, primary and secondary nodes of the instance.
1638 "TARGET_NODE": self.op.target_node,
1639 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1641 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1644 def BuildHooksNodes(self):
1645 """Build hooks nodes.
1649 self.cfg.GetMasterNode(),
1650 self.instance.primary_node,
1651 self.op.target_node,
1655 def CheckPrereq(self):
1656 """Check prerequisites.
1658 This checks that the instance is in the cluster.
1661 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1662 assert self.instance is not None, \
1663 "Cannot retrieve locked instance %s" % self.op.instance_name
1665 if instance.disk_template not in constants.DTS_COPYABLE:
1666 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1667 instance.disk_template, errors.ECODE_STATE)
1669 node = self.cfg.GetNodeInfo(self.op.target_node)
1670 assert node is not None, \
1671 "Cannot retrieve locked node %s" % self.op.target_node
1673 self.target_node = target_node = node.name
1675 if target_node == instance.primary_node:
1676 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1677 (instance.name, target_node),
1680 bep = self.cfg.GetClusterInfo().FillBE(instance)
1682 for idx, dsk in enumerate(instance.disks):
1683 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1684 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1685 " cannot copy" % idx, errors.ECODE_STATE)
1687 CheckNodeOnline(self, target_node)
1688 CheckNodeNotDrained(self, target_node)
1689 CheckNodeVmCapable(self, target_node)
1690 cluster = self.cfg.GetClusterInfo()
1691 group_info = self.cfg.GetNodeGroup(node.group)
1692 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1693 CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1694 ignore=self.op.ignore_ipolicy)
1696 if instance.admin_state == constants.ADMINST_UP:
1697 # check memory requirements on the secondary node
1698 CheckNodeFreeMemory(self, target_node,
1699 "failing over instance %s" %
1700 instance.name, bep[constants.BE_MAXMEM],
1701 instance.hypervisor)
1703 self.LogInfo("Not checking memory on the secondary node as"
1704 " instance will not be started")
1706 # check bridge existance
1707 CheckInstanceBridgesExist(self, instance, node=target_node)
1709 def Exec(self, feedback_fn):
1710 """Move an instance.
1712 The move is done by shutting it down on its present node, copying
1713 the data over (slow) and starting it on the new node.
1716 instance = self.instance
1718 source_node = instance.primary_node
1719 target_node = self.target_node
1721 self.LogInfo("Shutting down instance %s on source node %s",
1722 instance.name, source_node)
1724 assert (self.owned_locks(locking.LEVEL_NODE) ==
1725 self.owned_locks(locking.LEVEL_NODE_RES))
1727 result = self.rpc.call_instance_shutdown(source_node, instance,
1728 self.op.shutdown_timeout,
1730 msg = result.fail_msg
1732 if self.op.ignore_consistency:
1733 self.LogWarning("Could not shutdown instance %s on node %s."
1734 " Proceeding anyway. Please make sure node"
1735 " %s is down. Error details: %s",
1736 instance.name, source_node, source_node, msg)
1738 raise errors.OpExecError("Could not shutdown instance %s on"
1740 (instance.name, source_node, msg))
1742 # create the target disks
1744 CreateDisks(self, instance, target_node=target_node)
1745 except errors.OpExecError:
1746 self.LogWarning("Device creation failed")
1747 self.cfg.ReleaseDRBDMinors(instance.name)
1750 cluster_name = self.cfg.GetClusterInfo().cluster_name
1753 # activate, get path, copy the data over
1754 for idx, disk in enumerate(instance.disks):
1755 self.LogInfo("Copying data for disk %d", idx)
1756 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1757 instance.name, True, idx)
1759 self.LogWarning("Can't assemble newly created disk %d: %s",
1760 idx, result.fail_msg)
1761 errs.append(result.fail_msg)
1763 dev_path = result.payload
1764 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1765 target_node, dev_path,
1768 self.LogWarning("Can't copy data over for disk %d: %s",
1769 idx, result.fail_msg)
1770 errs.append(result.fail_msg)
1774 self.LogWarning("Some disks failed to copy, aborting")
1776 RemoveDisks(self, instance, target_node=target_node)
1778 self.cfg.ReleaseDRBDMinors(instance.name)
1779 raise errors.OpExecError("Errors during disk copy: %s" %
1782 instance.primary_node = target_node
1783 self.cfg.Update(instance, feedback_fn)
1785 self.LogInfo("Removing the disks on the original node")
1786 RemoveDisks(self, instance, target_node=source_node)
1788 # Only start the instance if it's marked as up
1789 if instance.admin_state == constants.ADMINST_UP:
1790 self.LogInfo("Starting instance %s on node %s",
1791 instance.name, target_node)
1793 disks_ok, _ = AssembleInstanceDisks(self, instance,
1794 ignore_secondaries=True)
1796 ShutdownInstanceDisks(self, instance)
1797 raise errors.OpExecError("Can't activate the instance's disks")
1799 result = self.rpc.call_instance_start(target_node,
1800 (instance, None, None), False,
1802 msg = result.fail_msg
1804 ShutdownInstanceDisks(self, instance)
1805 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1806 (instance.name, target_node, msg))
1809 class LUInstanceMultiAlloc(NoHooksLU):
1810 """Allocates multiple instances at the same time.
1815 def CheckArguments(self):
1820 for inst in self.op.instances:
1821 if inst.iallocator is not None:
1822 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1823 " instance objects", errors.ECODE_INVAL)
1824 nodes.append(bool(inst.pnode))
1825 if inst.disk_template in constants.DTS_INT_MIRROR:
1826 nodes.append(bool(inst.snode))
1828 has_nodes = compat.any(nodes)
1829 if compat.all(nodes) ^ has_nodes:
1830 raise errors.OpPrereqError("There are instance objects providing"
1831 " pnode/snode while others do not",
1834 if not has_nodes and self.op.iallocator is None:
1835 default_iallocator = self.cfg.GetDefaultIAllocator()
1836 if default_iallocator:
1837 self.op.iallocator = default_iallocator
1839 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1840 " given and no cluster-wide default"
1841 " iallocator found; please specify either"
1842 " an iallocator or nodes on the instances"
1843 " or set a cluster-wide default iallocator",
1846 _CheckOpportunisticLocking(self.op)
1848 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1850 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1851 utils.CommaJoin(dups), errors.ECODE_INVAL)
1853 def ExpandNames(self):
1854 """Calculate the locks.
1857 self.share_locks = ShareAll()
1858 self.needed_locks = {
1859 # iallocator will select nodes and even if no iallocator is used,
1860 # collisions with LUInstanceCreate should be avoided
1861 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1864 if self.op.iallocator:
1865 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1866 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1868 if self.op.opportunistic_locking:
1869 self.opportunistic_locks[locking.LEVEL_NODE] = True
1870 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1873 for inst in self.op.instances:
1874 inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1875 nodeslist.append(inst.pnode)
1876 if inst.snode is not None:
1877 inst.snode = ExpandNodeName(self.cfg, inst.snode)
1878 nodeslist.append(inst.snode)
1880 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1881 # Lock resources of instance's primary and secondary nodes (copy to
1882 # prevent accidential modification)
1883 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1885 def CheckPrereq(self):
1886 """Check prerequisite.
1889 if self.op.iallocator:
1890 cluster = self.cfg.GetClusterInfo()
1891 default_vg = self.cfg.GetVGName()
1892 ec_id = self.proc.GetECId()
1894 if self.op.opportunistic_locking:
1895 # Only consider nodes for which a lock is held
1896 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1898 node_whitelist = None
1900 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1901 _ComputeNics(op, cluster, None,
1903 _ComputeFullBeParams(op, cluster),
1905 for op in self.op.instances]
1907 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1908 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1910 ial.Run(self.op.iallocator)
1913 raise errors.OpPrereqError("Can't compute nodes using"
1914 " iallocator '%s': %s" %
1915 (self.op.iallocator, ial.info),
1918 self.ia_result = ial.result
1921 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1922 constants.JOB_IDS_KEY: [],
1925 def _ConstructPartialResult(self):
1926 """Contructs the partial result.
1929 if self.op.iallocator:
1930 (allocatable, failed_insts) = self.ia_result
1931 allocatable_insts = map(compat.fst, allocatable)
1933 allocatable_insts = [op.instance_name for op in self.op.instances]
1937 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
1938 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
1941 def Exec(self, feedback_fn):
1942 """Executes the opcode.
1946 if self.op.iallocator:
1947 op2inst = dict((op.instance_name, op) for op in self.op.instances)
1948 (allocatable, failed) = self.ia_result
1950 for (name, nodes) in allocatable:
1951 op = op2inst.pop(name)
1954 (op.pnode, op.snode) = nodes
1960 missing = set(op2inst.keys()) - set(failed)
1961 assert not missing, \
1962 "Iallocator did return incomplete result: %s" % \
1963 utils.CommaJoin(missing)
1965 jobs.extend([op] for op in self.op.instances)
1967 return ResultWithJobs(jobs, **self._ConstructPartialResult())
1970 class _InstNicModPrivate:
1971 """Data structure for network interface modifications.
1973 Used by L{LUInstanceSetParams}.
1981 def _PrepareContainerMods(mods, private_fn):
1982 """Prepares a list of container modifications by adding a private data field.
1984 @type mods: list of tuples; (operation, index, parameters)
1985 @param mods: List of modifications
1986 @type private_fn: callable or None
1987 @param private_fn: Callable for constructing a private data field for a
1992 if private_fn is None:
1997 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2000 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
2001 """Checks if nodes have enough physical CPUs
2003 This function checks if all given nodes have the needed number of
2004 physical CPUs. In case any node has less CPUs or we cannot get the
2005 information from the node, this function raises an OpPrereqError
2008 @type lu: C{LogicalUnit}
2009 @param lu: a logical unit from which we get configuration data
2010 @type nodenames: C{list}
2011 @param nodenames: the list of node names to check
2012 @type requested: C{int}
2013 @param requested: the minimum acceptable number of physical CPUs
2014 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2015 or we cannot check the node
2018 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2019 for node in nodenames:
2020 info = nodeinfo[node]
2021 info.Raise("Cannot get current information from node %s" % node,
2022 prereq=True, ecode=errors.ECODE_ENVIRON)
2023 (_, _, (hv_info, )) = info.payload
2024 num_cpus = hv_info.get("cpu_total", None)
2025 if not isinstance(num_cpus, int):
2026 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2027 " on node %s, result was '%s'" %
2028 (node, num_cpus), errors.ECODE_ENVIRON)
2029 if requested > num_cpus:
2030 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2031 "required" % (node, num_cpus, requested),
2035 def GetItemFromContainer(identifier, kind, container):
2036 """Return the item refered by the identifier.
2038 @type identifier: string
2039 @param identifier: Item index or name or UUID
2041 @param kind: One-word item description
2042 @type container: list
2043 @param container: Container to get the item from
2048 idx = int(identifier)
2051 absidx = len(container) - 1
2053 raise IndexError("Not accepting negative indices other than -1")
2054 elif idx > len(container):
2055 raise IndexError("Got %s index %s, but there are only %s" %
2056 (kind, idx, len(container)))
2059 return (absidx, container[idx])
2063 for idx, item in enumerate(container):
2064 if item.uuid == identifier or item.name == identifier:
2067 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2068 (kind, identifier), errors.ECODE_NOENT)
2071 def _ApplyContainerMods(kind, container, chgdesc, mods,
2072 create_fn, modify_fn, remove_fn):
2073 """Applies descriptions in C{mods} to C{container}.
2076 @param kind: One-word item description
2077 @type container: list
2078 @param container: Container to modify
2079 @type chgdesc: None or list
2080 @param chgdesc: List of applied changes
2082 @param mods: Modifications as returned by L{_PrepareContainerMods}
2083 @type create_fn: callable
2084 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2085 receives absolute item index, parameters and private data object as added
2086 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2088 @type modify_fn: callable
2089 @param modify_fn: Callback for modifying an existing item
2090 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2091 and private data object as added by L{_PrepareContainerMods}, returns
2093 @type remove_fn: callable
2094 @param remove_fn: Callback on removing item; receives absolute item index,
2095 item and private data object as added by L{_PrepareContainerMods}
2098 for (op, identifier, params, private) in mods:
2101 if op == constants.DDM_ADD:
2102 # Calculate where item will be added
2103 # When adding an item, identifier can only be an index
2105 idx = int(identifier)
2107 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2108 " identifier for %s" % constants.DDM_ADD,
2111 addidx = len(container)
2114 raise IndexError("Not accepting negative indices other than -1")
2115 elif idx > len(container):
2116 raise IndexError("Got %s index %s, but there are only %s" %
2117 (kind, idx, len(container)))
2120 if create_fn is None:
2123 (item, changes) = create_fn(addidx, params, private)
2126 container.append(item)
2129 assert idx <= len(container)
2130 # list.insert does so before the specified index
2131 container.insert(idx, item)
2133 # Retrieve existing item
2134 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2136 if op == constants.DDM_REMOVE:
2139 if remove_fn is not None:
2140 remove_fn(absidx, item, private)
2142 changes = [("%s/%s" % (kind, absidx), "remove")]
2144 assert container[absidx] == item
2145 del container[absidx]
2146 elif op == constants.DDM_MODIFY:
2147 if modify_fn is not None:
2148 changes = modify_fn(absidx, item, params, private)
2150 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2152 assert _TApplyContModsCbChanges(changes)
2154 if not (chgdesc is None or changes is None):
2155 chgdesc.extend(changes)
2158 def _UpdateIvNames(base_index, disks):
2159 """Updates the C{iv_name} attribute of disks.
2161 @type disks: list of L{objects.Disk}
2164 for (idx, disk) in enumerate(disks):
2165 disk.iv_name = "disk/%s" % (base_index + idx, )
2168 class LUInstanceSetParams(LogicalUnit):
2169 """Modifies an instances's parameters.
2172 HPATH = "instance-modify"
2173 HTYPE = constants.HTYPE_INSTANCE
2177 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2178 assert ht.TList(mods)
2179 assert not mods or len(mods[0]) in (2, 3)
2181 if mods and len(mods[0]) == 2:
2185 for op, params in mods:
2186 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2187 result.append((op, -1, params))
2191 raise errors.OpPrereqError("Only one %s add or remove operation is"
2192 " supported at a time" % kind,
2195 result.append((constants.DDM_MODIFY, op, params))
2197 assert verify_fn(result)
2204 def _CheckMods(kind, mods, key_types, item_fn):
2205 """Ensures requested disk/NIC modifications are valid.
2208 for (op, _, params) in mods:
2209 assert ht.TDict(params)
2211 # If 'key_types' is an empty dict, we assume we have an
2212 # 'ext' template and thus do not ForceDictType
2214 utils.ForceDictType(params, key_types)
2216 if op == constants.DDM_REMOVE:
2218 raise errors.OpPrereqError("No settings should be passed when"
2219 " removing a %s" % kind,
2221 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2224 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2227 def _VerifyDiskModification(op, params):
2228 """Verifies a disk modification.
2231 if op == constants.DDM_ADD:
2232 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2233 if mode not in constants.DISK_ACCESS_SET:
2234 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2237 size = params.get(constants.IDISK_SIZE, None)
2239 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2240 constants.IDISK_SIZE, errors.ECODE_INVAL)
2244 except (TypeError, ValueError), err:
2245 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2248 params[constants.IDISK_SIZE] = size
2249 name = params.get(constants.IDISK_NAME, None)
2250 if name is not None and name.lower() == constants.VALUE_NONE:
2251 params[constants.IDISK_NAME] = None
2253 elif op == constants.DDM_MODIFY:
2254 if constants.IDISK_SIZE in params:
2255 raise errors.OpPrereqError("Disk size change not possible, use"
2256 " grow-disk", errors.ECODE_INVAL)
2258 raise errors.OpPrereqError("Disk modification doesn't support"
2259 " additional arbitrary parameters",
2261 name = params.get(constants.IDISK_NAME, None)
2262 if name is not None and name.lower() == constants.VALUE_NONE:
2263 params[constants.IDISK_NAME] = None
2266 def _VerifyNicModification(op, params):
2267 """Verifies a network interface modification.
2270 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2271 ip = params.get(constants.INIC_IP, None)
2272 name = params.get(constants.INIC_NAME, None)
2273 req_net = params.get(constants.INIC_NETWORK, None)
2274 link = params.get(constants.NIC_LINK, None)
2275 mode = params.get(constants.NIC_MODE, None)
2276 if name is not None and name.lower() == constants.VALUE_NONE:
2277 params[constants.INIC_NAME] = None
2278 if req_net is not None:
2279 if req_net.lower() == constants.VALUE_NONE:
2280 params[constants.INIC_NETWORK] = None
2282 elif link is not None or mode is not None:
2283 raise errors.OpPrereqError("If network is given"
2284 " mode or link should not",
2287 if op == constants.DDM_ADD:
2288 macaddr = params.get(constants.INIC_MAC, None)
2290 params[constants.INIC_MAC] = constants.VALUE_AUTO
2293 if ip.lower() == constants.VALUE_NONE:
2294 params[constants.INIC_IP] = None
2296 if ip.lower() == constants.NIC_IP_POOL:
2297 if op == constants.DDM_ADD and req_net is None:
2298 raise errors.OpPrereqError("If ip=pool, parameter network"
2302 if not netutils.IPAddress.IsValid(ip):
2303 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2306 if constants.INIC_MAC in params:
2307 macaddr = params[constants.INIC_MAC]
2308 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2309 macaddr = utils.NormalizeAndValidateMac(macaddr)
2311 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2312 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2313 " modifying an existing NIC",
2316 def CheckArguments(self):
2317 if not (self.op.nics or self.op.disks or self.op.disk_template or
2318 self.op.hvparams or self.op.beparams or self.op.os_name or
2319 self.op.offline is not None or self.op.runtime_mem or
2321 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2323 if self.op.hvparams:
2324 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2325 "hypervisor", "instance", "cluster")
2327 self.op.disks = self._UpgradeDiskNicMods(
2328 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2329 self.op.nics = self._UpgradeDiskNicMods(
2330 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2332 if self.op.disks and self.op.disk_template is not None:
2333 raise errors.OpPrereqError("Disk template conversion and other disk"
2334 " changes not supported at the same time",
2337 if (self.op.disk_template and
2338 self.op.disk_template in constants.DTS_INT_MIRROR and
2339 self.op.remote_node is None):
2340 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2341 " one requires specifying a secondary node",
2344 # Check NIC modifications
2345 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2346 self._VerifyNicModification)
2349 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2351 def ExpandNames(self):
2352 self._ExpandAndLockInstance()
2353 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2354 # Can't even acquire node locks in shared mode as upcoming changes in
2355 # Ganeti 2.6 will start to modify the node object on disk conversion
2356 self.needed_locks[locking.LEVEL_NODE] = []
2357 self.needed_locks[locking.LEVEL_NODE_RES] = []
2358 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2359 # Look node group to look up the ipolicy
2360 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2362 def DeclareLocks(self, level):
2363 if level == locking.LEVEL_NODEGROUP:
2364 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2365 # Acquire locks for the instance's nodegroups optimistically. Needs
2366 # to be verified in CheckPrereq
2367 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2368 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2369 elif level == locking.LEVEL_NODE:
2370 self._LockInstancesNodes()
2371 if self.op.disk_template and self.op.remote_node:
2372 self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2373 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2374 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2376 self.needed_locks[locking.LEVEL_NODE_RES] = \
2377 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2379 def BuildHooksEnv(self):
2382 This runs on the master, primary and secondaries.
2386 if constants.BE_MINMEM in self.be_new:
2387 args["minmem"] = self.be_new[constants.BE_MINMEM]
2388 if constants.BE_MAXMEM in self.be_new:
2389 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2390 if constants.BE_VCPUS in self.be_new:
2391 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2392 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2393 # information at all.
2395 if self._new_nics is not None:
2398 for nic in self._new_nics:
2399 n = copy.deepcopy(nic)
2400 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2401 n.nicparams = nicparams
2402 nics.append(NICToTuple(self, n))
2406 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2407 if self.op.disk_template:
2408 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2409 if self.op.runtime_mem:
2410 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2414 def BuildHooksNodes(self):
2415 """Build hooks nodes.
2418 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2421 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2422 old_params, cluster, pnode):
2424 update_params_dict = dict([(key, params[key])
2425 for key in constants.NICS_PARAMETERS
2428 req_link = update_params_dict.get(constants.NIC_LINK, None)
2429 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2432 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2433 if new_net_uuid_or_name:
2434 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2435 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2438 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2441 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2443 raise errors.OpPrereqError("No netparams found for the network"
2444 " %s, probably not connected" %
2445 new_net_obj.name, errors.ECODE_INVAL)
2446 new_params = dict(netparams)
2448 new_params = GetUpdatedParams(old_params, update_params_dict)
2450 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2452 new_filled_params = cluster.SimpleFillNIC(new_params)
2453 objects.NIC.CheckParameterSyntax(new_filled_params)
2455 new_mode = new_filled_params[constants.NIC_MODE]
2456 if new_mode == constants.NIC_MODE_BRIDGED:
2457 bridge = new_filled_params[constants.NIC_LINK]
2458 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2460 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2462 self.warn.append(msg)
2464 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2466 elif new_mode == constants.NIC_MODE_ROUTED:
2467 ip = params.get(constants.INIC_IP, old_ip)
2469 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2470 " on a routed NIC", errors.ECODE_INVAL)
2472 elif new_mode == constants.NIC_MODE_OVS:
2473 # TODO: check OVS link
2474 self.LogInfo("OVS links are currently not checked for correctness")
2476 if constants.INIC_MAC in params:
2477 mac = params[constants.INIC_MAC]
2479 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2481 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2482 # otherwise generate the MAC address
2483 params[constants.INIC_MAC] = \
2484 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2486 # or validate/reserve the current one
2488 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2489 except errors.ReservationError:
2490 raise errors.OpPrereqError("MAC address '%s' already in use"
2491 " in cluster" % mac,
2492 errors.ECODE_NOTUNIQUE)
2493 elif new_net_uuid != old_net_uuid:
2495 def get_net_prefix(net_uuid):
2498 nobj = self.cfg.GetNetwork(net_uuid)
2499 mac_prefix = nobj.mac_prefix
2503 new_prefix = get_net_prefix(new_net_uuid)
2504 old_prefix = get_net_prefix(old_net_uuid)
2505 if old_prefix != new_prefix:
2506 params[constants.INIC_MAC] = \
2507 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2509 # if there is a change in (ip, network) tuple
2510 new_ip = params.get(constants.INIC_IP, old_ip)
2511 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2513 # if IP is pool then require a network and generate one IP
2514 if new_ip.lower() == constants.NIC_IP_POOL:
2517 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2518 except errors.ReservationError:
2519 raise errors.OpPrereqError("Unable to get a free IP"
2520 " from the address pool",
2522 self.LogInfo("Chose IP %s from network %s",
2525 params[constants.INIC_IP] = new_ip
2527 raise errors.OpPrereqError("ip=pool, but no network found",
2529 # Reserve new IP if in the new network if any
2532 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2533 self.LogInfo("Reserving IP %s in network %s",
2534 new_ip, new_net_obj.name)
2535 except errors.ReservationError:
2536 raise errors.OpPrereqError("IP %s not available in network %s" %
2537 (new_ip, new_net_obj.name),
2538 errors.ECODE_NOTUNIQUE)
2539 # new network is None so check if new IP is a conflicting IP
2540 elif self.op.conflicts_check:
2541 _CheckForConflictingIp(self, new_ip, pnode)
2543 # release old IP if old network is not None
2544 if old_ip and old_net_uuid:
2546 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2547 except errors.AddressPoolError:
2548 logging.warning("Release IP %s not contained in network %s",
2549 old_ip, old_net_obj.name)
2551 # there are no changes in (ip, network) tuple and old network is not None
2552 elif (old_net_uuid is not None and
2553 (req_link is not None or req_mode is not None)):
2554 raise errors.OpPrereqError("Not allowed to change link or mode of"
2555 " a NIC that is connected to a network",
2558 private.params = new_params
2559 private.filled = new_filled_params
2561 def _PreCheckDiskTemplate(self, pnode_info):
2562 """CheckPrereq checks related to a new disk template."""
2563 # Arguments are passed to avoid configuration lookups
2564 instance = self.instance
2565 pnode = instance.primary_node
2566 cluster = self.cluster
2567 if instance.disk_template == self.op.disk_template:
2568 raise errors.OpPrereqError("Instance already has disk template %s" %
2569 instance.disk_template, errors.ECODE_INVAL)
2571 if (instance.disk_template,
2572 self.op.disk_template) not in self._DISK_CONVERSIONS:
2573 raise errors.OpPrereqError("Unsupported disk template conversion from"
2574 " %s to %s" % (instance.disk_template,
2575 self.op.disk_template),
2577 CheckInstanceState(self, instance, INSTANCE_DOWN,
2578 msg="cannot change disk template")
2579 if self.op.disk_template in constants.DTS_INT_MIRROR:
2580 if self.op.remote_node == pnode:
2581 raise errors.OpPrereqError("Given new secondary node %s is the same"
2582 " as the primary node of the instance" %
2583 self.op.remote_node, errors.ECODE_STATE)
2584 CheckNodeOnline(self, self.op.remote_node)
2585 CheckNodeNotDrained(self, self.op.remote_node)
2586 # FIXME: here we assume that the old instance type is DT_PLAIN
2587 assert instance.disk_template == constants.DT_PLAIN
2588 disks = [{constants.IDISK_SIZE: d.size,
2589 constants.IDISK_VG: d.logical_id[0]}
2590 for d in instance.disks]
2591 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2592 CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2594 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2595 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2596 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2598 CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2599 ignore=self.op.ignore_ipolicy)
2600 if pnode_info.group != snode_info.group:
2601 self.LogWarning("The primary and secondary nodes are in two"
2602 " different node groups; the disk parameters"
2603 " from the first disk's node group will be"
2606 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2607 # Make sure none of the nodes require exclusive storage
2608 nodes = [pnode_info]
2609 if self.op.disk_template in constants.DTS_INT_MIRROR:
2611 nodes.append(snode_info)
2612 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2613 if compat.any(map(has_es, nodes)):
2614 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2615 " storage is enabled" % (instance.disk_template,
2616 self.op.disk_template))
2617 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2619 def CheckPrereq(self):
2620 """Check prerequisites.
2622 This only checks the instance list against the existing names.
2625 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2626 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2628 cluster = self.cluster = self.cfg.GetClusterInfo()
2629 assert self.instance is not None, \
2630 "Cannot retrieve locked instance %s" % self.op.instance_name
2632 pnode = instance.primary_node
2636 if (self.op.pnode is not None and self.op.pnode != pnode and
2638 # verify that the instance is not up
2639 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2640 instance.hypervisor)
2641 if instance_info.fail_msg:
2642 self.warn.append("Can't get instance runtime information: %s" %
2643 instance_info.fail_msg)
2644 elif instance_info.payload:
2645 raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2648 assert pnode in self.owned_locks(locking.LEVEL_NODE)
2649 nodelist = list(instance.all_nodes)
2650 pnode_info = self.cfg.GetNodeInfo(pnode)
2651 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2653 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2654 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2655 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2657 # dictionary with instance information after the modification
2660 # Check disk modifications. This is done here and not in CheckArguments
2661 # (as with NICs), because we need to know the instance's disk template
2662 if instance.disk_template == constants.DT_EXT:
2663 self._CheckMods("disk", self.op.disks, {},
2664 self._VerifyDiskModification)
2666 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2667 self._VerifyDiskModification)
2669 # Prepare disk/NIC modifications
2670 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2671 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2673 # Check the validity of the `provider' parameter
2674 if instance.disk_template in constants.DT_EXT:
2675 for mod in self.diskmod:
2676 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2677 if mod[0] == constants.DDM_ADD:
2678 if ext_provider is None:
2679 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2680 " '%s' missing, during disk add" %
2682 constants.IDISK_PROVIDER),
2684 elif mod[0] == constants.DDM_MODIFY:
2686 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2688 constants.IDISK_PROVIDER,
2691 for mod in self.diskmod:
2692 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2693 if ext_provider is not None:
2694 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2695 " instances of type '%s'" %
2696 (constants.IDISK_PROVIDER,
2701 if self.op.os_name and not self.op.force:
2702 CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2703 self.op.force_variant)
2704 instance_os = self.op.os_name
2706 instance_os = instance.os
2708 assert not (self.op.disk_template and self.op.disks), \
2709 "Can't modify disk template and apply disk changes at the same time"
2711 if self.op.disk_template:
2712 self._PreCheckDiskTemplate(pnode_info)
2714 # hvparams processing
2715 if self.op.hvparams:
2716 hv_type = instance.hypervisor
2717 i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2718 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2719 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2722 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2723 CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2724 self.hv_proposed = self.hv_new = hv_new # the new actual values
2725 self.hv_inst = i_hvdict # the new dict (without defaults)
2727 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2729 self.hv_new = self.hv_inst = {}
2731 # beparams processing
2732 if self.op.beparams:
2733 i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2735 objects.UpgradeBeParams(i_bedict)
2736 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2737 be_new = cluster.SimpleFillBE(i_bedict)
2738 self.be_proposed = self.be_new = be_new # the new actual values
2739 self.be_inst = i_bedict # the new dict (without defaults)
2741 self.be_new = self.be_inst = {}
2742 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2743 be_old = cluster.FillBE(instance)
2745 # CPU param validation -- checking every time a parameter is
2746 # changed to cover all cases where either CPU mask or vcpus have
2748 if (constants.BE_VCPUS in self.be_proposed and
2749 constants.HV_CPU_MASK in self.hv_proposed):
2751 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2752 # Verify mask is consistent with number of vCPUs. Can skip this
2753 # test if only 1 entry in the CPU mask, which means same mask
2754 # is applied to all vCPUs.
2755 if (len(cpu_list) > 1 and
2756 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2757 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2759 (self.be_proposed[constants.BE_VCPUS],
2760 self.hv_proposed[constants.HV_CPU_MASK]),
2763 # Only perform this test if a new CPU mask is given
2764 if constants.HV_CPU_MASK in self.hv_new:
2765 # Calculate the largest CPU number requested
2766 max_requested_cpu = max(map(max, cpu_list))
2767 # Check that all of the instance's nodes have enough physical CPUs to
2768 # satisfy the requested CPU mask
2769 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2770 max_requested_cpu + 1, instance.hypervisor)
2772 # osparams processing
2773 if self.op.osparams:
2774 i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2775 CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2776 self.os_inst = i_osdict # the new dict (without defaults)
2780 #TODO(dynmem): do the appropriate check involving MINMEM
2781 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2782 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2783 mem_check_list = [pnode]
2784 if be_new[constants.BE_AUTO_BALANCE]:
2785 # either we changed auto_balance to yes or it was from before
2786 mem_check_list.extend(instance.secondary_nodes)
2787 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2788 instance.hypervisor)
2789 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2790 [instance.hypervisor], False)
2791 pninfo = nodeinfo[pnode]
2792 msg = pninfo.fail_msg
2794 # Assume the primary node is unreachable and go ahead
2795 self.warn.append("Can't get info from primary node %s: %s" %
2798 (_, _, (pnhvinfo, )) = pninfo.payload
2799 if not isinstance(pnhvinfo.get("memory_free", None), int):
2800 self.warn.append("Node data from primary node %s doesn't contain"
2801 " free memory information" % pnode)
2802 elif instance_info.fail_msg:
2803 self.warn.append("Can't get instance runtime information: %s" %
2804 instance_info.fail_msg)
2806 if instance_info.payload:
2807 current_mem = int(instance_info.payload["memory"])
2809 # Assume instance not running
2810 # (there is a slight race condition here, but it's not very
2811 # probable, and we have no other way to check)
2812 # TODO: Describe race condition
2814 #TODO(dynmem): do the appropriate check involving MINMEM
2815 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2816 pnhvinfo["memory_free"])
2818 raise errors.OpPrereqError("This change will prevent the instance"
2819 " from starting, due to %d MB of memory"
2820 " missing on its primary node" %
2821 miss_mem, errors.ECODE_NORES)
2823 if be_new[constants.BE_AUTO_BALANCE]:
2824 for node, nres in nodeinfo.items():
2825 if node not in instance.secondary_nodes:
2827 nres.Raise("Can't get info from secondary node %s" % node,
2828 prereq=True, ecode=errors.ECODE_STATE)
2829 (_, _, (nhvinfo, )) = nres.payload
2830 if not isinstance(nhvinfo.get("memory_free", None), int):
2831 raise errors.OpPrereqError("Secondary node %s didn't return free"
2832 " memory information" % node,
2834 #TODO(dynmem): do the appropriate check involving MINMEM
2835 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2836 raise errors.OpPrereqError("This change will prevent the instance"
2837 " from failover to its secondary node"
2838 " %s, due to not enough memory" % node,
2841 if self.op.runtime_mem:
2842 remote_info = self.rpc.call_instance_info(instance.primary_node,
2844 instance.hypervisor)
2845 remote_info.Raise("Error checking node %s" % instance.primary_node)
2846 if not remote_info.payload: # not running already
2847 raise errors.OpPrereqError("Instance %s is not running" %
2848 instance.name, errors.ECODE_STATE)
2850 current_memory = remote_info.payload["memory"]
2851 if (not self.op.force and
2852 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2853 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2854 raise errors.OpPrereqError("Instance %s must have memory between %d"
2855 " and %d MB of memory unless --force is"
2858 self.be_proposed[constants.BE_MINMEM],
2859 self.be_proposed[constants.BE_MAXMEM]),
2862 delta = self.op.runtime_mem - current_memory
2864 CheckNodeFreeMemory(self, instance.primary_node,
2865 "ballooning memory for instance %s" %
2866 instance.name, delta, instance.hypervisor)
2868 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2869 raise errors.OpPrereqError("Disk operations not supported for"
2870 " diskless instances", errors.ECODE_INVAL)
2872 def _PrepareNicCreate(_, params, private):
2873 self._PrepareNicModification(params, private, None, None,
2877 def _PrepareNicMod(_, nic, params, private):
2878 self._PrepareNicModification(params, private, nic.ip, nic.network,
2879 nic.nicparams, cluster, pnode)
2882 def _PrepareNicRemove(_, params, __):
2884 net = params.network
2885 if net is not None and ip is not None:
2886 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2888 # Verify NIC changes (operating on copy)
2889 nics = instance.nics[:]
2890 _ApplyContainerMods("NIC", nics, None, self.nicmod,
2891 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2892 if len(nics) > constants.MAX_NICS:
2893 raise errors.OpPrereqError("Instance has too many network interfaces"
2894 " (%d), cannot add more" % constants.MAX_NICS,
2897 def _PrepareDiskMod(_, disk, params, __):
2898 disk.name = params.get(constants.IDISK_NAME, None)
2900 # Verify disk changes (operating on a copy)
2901 disks = copy.deepcopy(instance.disks)
2902 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2903 _PrepareDiskMod, None)
2904 utils.ValidateDeviceNames("disk", disks)
2905 if len(disks) > constants.MAX_DISKS:
2906 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2907 " more" % constants.MAX_DISKS,
2909 disk_sizes = [disk.size for disk in instance.disks]
2910 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2911 self.diskmod if op == constants.DDM_ADD)
2912 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2913 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2915 if self.op.offline is not None and self.op.offline:
2916 CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2917 msg="can't change to offline")
2919 # Pre-compute NIC changes (necessary to use result in hooks)
2920 self._nic_chgdesc = []
2922 # Operate on copies as this is still in prereq
2923 nics = [nic.Copy() for nic in instance.nics]
2924 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2925 self._CreateNewNic, self._ApplyNicMods, None)
2926 # Verify that NIC names are unique and valid
2927 utils.ValidateDeviceNames("NIC", nics)
2928 self._new_nics = nics
2929 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2931 self._new_nics = None
2932 ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2934 if not self.op.ignore_ipolicy:
2935 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2938 # Fill ispec with backend parameters
2939 ispec[constants.ISPEC_SPINDLE_USE] = \
2940 self.be_new.get(constants.BE_SPINDLE_USE, None)
2941 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2944 # Copy ispec to verify parameters with min/max values separately
2945 if self.op.disk_template:
2946 new_disk_template = self.op.disk_template
2948 new_disk_template = instance.disk_template
2949 ispec_max = ispec.copy()
2950 ispec_max[constants.ISPEC_MEM_SIZE] = \
2951 self.be_new.get(constants.BE_MAXMEM, None)
2952 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2954 ispec_min = ispec.copy()
2955 ispec_min[constants.ISPEC_MEM_SIZE] = \
2956 self.be_new.get(constants.BE_MINMEM, None)
2957 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2960 if (res_max or res_min):
2961 # FIXME: Improve error message by including information about whether
2962 # the upper or lower limit of the parameter fails the ipolicy.
2963 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
2964 (group_info, group_info.name,
2965 utils.CommaJoin(set(res_max + res_min))))
2966 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2968 def _ConvertPlainToDrbd(self, feedback_fn):
2969 """Converts an instance from plain to drbd.
2972 feedback_fn("Converting template to drbd")
2973 instance = self.instance
2974 pnode = instance.primary_node
2975 snode = self.op.remote_node
2977 assert instance.disk_template == constants.DT_PLAIN
2979 # create a fake disk info for _GenerateDiskTemplate
2980 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
2981 constants.IDISK_VG: d.logical_id[0],
2982 constants.IDISK_NAME: d.name}
2983 for d in instance.disks]
2984 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
2985 instance.name, pnode, [snode],
2986 disk_info, None, None, 0, feedback_fn,
2988 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
2990 p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
2991 s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
2992 info = GetInstanceInfoText(instance)
2993 feedback_fn("Creating additional volumes...")
2994 # first, create the missing data and meta devices
2995 for disk in anno_disks:
2996 # unfortunately this is... not too nice
2997 CreateSingleBlockDev(self, pnode, instance, disk.children[1],
2998 info, True, p_excl_stor)
2999 for child in disk.children:
3000 CreateSingleBlockDev(self, snode, instance, child, info, True,
3002 # at this stage, all new LVs have been created, we can rename the
3004 feedback_fn("Renaming original volumes...")
3005 rename_list = [(o, n.children[0].logical_id)
3006 for (o, n) in zip(instance.disks, new_disks)]
3007 result = self.rpc.call_blockdev_rename(pnode, rename_list)
3008 result.Raise("Failed to rename original LVs")
3010 feedback_fn("Initializing DRBD devices...")
3011 # all child devices are in place, we can now create the DRBD devices
3013 for disk in anno_disks:
3014 for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3015 f_create = node == pnode
3016 CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3018 except errors.GenericError, e:
3019 feedback_fn("Initializing of DRBD devices failed;"
3020 " renaming back original volumes...")
3021 for disk in new_disks:
3022 self.cfg.SetDiskID(disk, pnode)
3023 rename_back_list = [(n.children[0], o.logical_id)
3024 for (n, o) in zip(new_disks, instance.disks)]
3025 result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3026 result.Raise("Failed to rename LVs back after error %s" % str(e))
3029 # at this point, the instance has been modified
3030 instance.disk_template = constants.DT_DRBD8
3031 instance.disks = new_disks
3032 self.cfg.Update(instance, feedback_fn)
3034 # Release node locks while waiting for sync
3035 ReleaseLocks(self, locking.LEVEL_NODE)
3037 # disks are created, waiting for sync
3038 disk_abort = not WaitForSync(self, instance,
3039 oneshot=not self.op.wait_for_sync)
3041 raise errors.OpExecError("There are some degraded disks for"
3042 " this instance, please cleanup manually")
3044 # Node resource locks will be released by caller
3046 def _ConvertDrbdToPlain(self, feedback_fn):
3047 """Converts an instance from drbd to plain.
3050 instance = self.instance
3052 assert len(instance.secondary_nodes) == 1
3053 assert instance.disk_template == constants.DT_DRBD8
3055 pnode = instance.primary_node
3056 snode = instance.secondary_nodes[0]
3057 feedback_fn("Converting template to plain")
3059 old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3060 new_disks = [d.children[0] for d in instance.disks]
3062 # copy over size, mode and name
3063 for parent, child in zip(old_disks, new_disks):
3064 child.size = parent.size
3065 child.mode = parent.mode
3066 child.name = parent.name
3068 # this is a DRBD disk, return its port to the pool
3069 # NOTE: this must be done right before the call to cfg.Update!
3070 for disk in old_disks:
3071 tcp_port = disk.logical_id[2]
3072 self.cfg.AddTcpUdpPort(tcp_port)
3074 # update instance structure
3075 instance.disks = new_disks
3076 instance.disk_template = constants.DT_PLAIN
3077 _UpdateIvNames(0, instance.disks)
3078 self.cfg.Update(instance, feedback_fn)
3080 # Release locks in case removing disks takes a while
3081 ReleaseLocks(self, locking.LEVEL_NODE)
3083 feedback_fn("Removing volumes on the secondary node...")
3084 for disk in old_disks:
3085 self.cfg.SetDiskID(disk, snode)
3086 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3088 self.LogWarning("Could not remove block device %s on node %s,"
3089 " continuing anyway: %s", disk.iv_name, snode, msg)
3091 feedback_fn("Removing unneeded volumes on the primary node...")
3092 for idx, disk in enumerate(old_disks):
3093 meta = disk.children[1]
3094 self.cfg.SetDiskID(meta, pnode)
3095 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3097 self.LogWarning("Could not remove metadata for disk %d on node %s,"
3098 " continuing anyway: %s", idx, pnode, msg)
3100 def _CreateNewDisk(self, idx, params, _):
3101 """Creates a new disk.
3104 instance = self.instance
3107 if instance.disk_template in constants.DTS_FILEBASED:
3108 (file_driver, file_path) = instance.disks[0].logical_id
3109 file_path = os.path.dirname(file_path)
3111 file_driver = file_path = None
3114 GenerateDiskTemplate(self, instance.disk_template, instance.name,
3115 instance.primary_node, instance.secondary_nodes,
3116 [params], file_path, file_driver, idx,
3117 self.Log, self.diskparams)[0]
3119 new_disks = CreateDisks(self, instance, disks=[disk])
3121 if self.cluster.prealloc_wipe_disks:
3123 WipeOrCleanupDisks(self, instance,
3124 disks=[(idx, disk, 0)],
3128 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3132 def _ModifyDisk(idx, disk, params, _):
3137 mode = params.get(constants.IDISK_MODE, None)
3140 changes.append(("disk.mode/%d" % idx, disk.mode))
3142 name = params.get(constants.IDISK_NAME, None)
3144 changes.append(("disk.name/%d" % idx, disk.name))
3148 def _RemoveDisk(self, idx, root, _):
3152 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3153 for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3154 self.cfg.SetDiskID(disk, node)
3155 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3157 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3158 " continuing anyway", idx, node, msg)
3160 # if this is a DRBD disk, return its port to the pool
3161 if root.dev_type in constants.LDS_DRBD:
3162 self.cfg.AddTcpUdpPort(root.logical_id[2])
3164 def _CreateNewNic(self, idx, params, private):
3165 """Creates data structure for a new network interface.
3168 mac = params[constants.INIC_MAC]
3169 ip = params.get(constants.INIC_IP, None)
3170 net = params.get(constants.INIC_NETWORK, None)
3171 name = params.get(constants.INIC_NAME, None)
3172 net_uuid = self.cfg.LookupNetwork(net)
3173 #TODO: not private.filled?? can a nic have no nicparams??
3174 nicparams = private.filled
3175 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3176 nicparams=nicparams)
3177 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3181 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3182 (mac, ip, private.filled[constants.NIC_MODE],
3183 private.filled[constants.NIC_LINK],
3187 def _ApplyNicMods(self, idx, nic, params, private):
3188 """Modifies a network interface.
3193 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3195 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3196 setattr(nic, key, params[key])
3198 new_net = params.get(constants.INIC_NETWORK, nic.network)
3199 new_net_uuid = self.cfg.LookupNetwork(new_net)
3200 if new_net_uuid != nic.network:
3201 changes.append(("nic.network/%d" % idx, new_net))
3202 nic.network = new_net_uuid
3205 nic.nicparams = private.filled
3207 for (key, val) in nic.nicparams.items():
3208 changes.append(("nic.%s/%d" % (key, idx), val))
3212 def Exec(self, feedback_fn):
3213 """Modifies an instance.
3215 All parameters take effect only at the next restart of the instance.
3218 # Process here the warnings from CheckPrereq, as we don't have a
3219 # feedback_fn there.
3220 # TODO: Replace with self.LogWarning
3221 for warn in self.warn:
3222 feedback_fn("WARNING: %s" % warn)
3224 assert ((self.op.disk_template is None) ^
3225 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3226 "Not owning any node resource locks"
3229 instance = self.instance
3233 instance.primary_node = self.op.pnode
3236 if self.op.runtime_mem:
3237 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3239 self.op.runtime_mem)
3240 rpcres.Raise("Cannot modify instance runtime memory")
3241 result.append(("runtime_memory", self.op.runtime_mem))
3243 # Apply disk changes
3244 _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3245 self._CreateNewDisk, self._ModifyDisk,
3247 _UpdateIvNames(0, instance.disks)
3249 if self.op.disk_template:
3251 check_nodes = set(instance.all_nodes)
3252 if self.op.remote_node:
3253 check_nodes.add(self.op.remote_node)
3254 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3255 owned = self.owned_locks(level)
3256 assert not (check_nodes - owned), \
3257 ("Not owning the correct locks, owning %r, expected at least %r" %
3258 (owned, check_nodes))
3260 r_shut = ShutdownInstanceDisks(self, instance)
3262 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3263 " proceed with disk template conversion")
3264 mode = (instance.disk_template, self.op.disk_template)
3266 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3268 self.cfg.ReleaseDRBDMinors(instance.name)
3270 result.append(("disk_template", self.op.disk_template))
3272 assert instance.disk_template == self.op.disk_template, \
3273 ("Expected disk template '%s', found '%s'" %
3274 (self.op.disk_template, instance.disk_template))
3276 # Release node and resource locks if there are any (they might already have
3277 # been released during disk conversion)
3278 ReleaseLocks(self, locking.LEVEL_NODE)
3279 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3282 if self._new_nics is not None:
3283 instance.nics = self._new_nics
3284 result.extend(self._nic_chgdesc)
3287 if self.op.hvparams:
3288 instance.hvparams = self.hv_inst
3289 for key, val in self.op.hvparams.iteritems():
3290 result.append(("hv/%s" % key, val))
3293 if self.op.beparams:
3294 instance.beparams = self.be_inst
3295 for key, val in self.op.beparams.iteritems():
3296 result.append(("be/%s" % key, val))
3300 instance.os = self.op.os_name
3303 if self.op.osparams:
3304 instance.osparams = self.os_inst
3305 for key, val in self.op.osparams.iteritems():
3306 result.append(("os/%s" % key, val))
3308 if self.op.offline is None:
3311 elif self.op.offline:
3312 # Mark instance as offline
3313 self.cfg.MarkInstanceOffline(instance.name)
3314 result.append(("admin_state", constants.ADMINST_OFFLINE))
3316 # Mark instance as online, but stopped
3317 self.cfg.MarkInstanceDown(instance.name)
3318 result.append(("admin_state", constants.ADMINST_DOWN))
3320 self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3322 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3323 self.owned_locks(locking.LEVEL_NODE)), \
3324 "All node locks should have been released by now"
3328 _DISK_CONVERSIONS = {
3329 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3330 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3334 class LUInstanceChangeGroup(LogicalUnit):
3335 HPATH = "instance-change-group"
3336 HTYPE = constants.HTYPE_INSTANCE
3339 def ExpandNames(self):
3340 self.share_locks = ShareAll()
3342 self.needed_locks = {
3343 locking.LEVEL_NODEGROUP: [],
3344 locking.LEVEL_NODE: [],
3345 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3348 self._ExpandAndLockInstance()
3350 if self.op.target_groups:
3351 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3352 self.op.target_groups)
3354 self.req_target_uuids = None
3356 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3358 def DeclareLocks(self, level):
3359 if level == locking.LEVEL_NODEGROUP:
3360 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3362 if self.req_target_uuids:
3363 lock_groups = set(self.req_target_uuids)
3365 # Lock all groups used by instance optimistically; this requires going
3366 # via the node before it's locked, requiring verification later on
3367 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3368 lock_groups.update(instance_groups)
3370 # No target groups, need to lock all of them
3371 lock_groups = locking.ALL_SET
3373 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3375 elif level == locking.LEVEL_NODE:
3376 if self.req_target_uuids:
3377 # Lock all nodes used by instances
3378 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3379 self._LockInstancesNodes()
3381 # Lock all nodes in all potential target groups
3382 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3383 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3384 member_nodes = [node_name
3385 for group in lock_groups
3386 for node_name in self.cfg.GetNodeGroup(group).members]
3387 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3389 # Lock all nodes as all groups are potential targets
3390 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3392 def CheckPrereq(self):
3393 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3394 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3395 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3397 assert (self.req_target_uuids is None or
3398 owned_groups.issuperset(self.req_target_uuids))
3399 assert owned_instances == set([self.op.instance_name])
3401 # Get instance information
3402 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3404 # Check if node groups for locked instance are still correct
3405 assert owned_nodes.issuperset(self.instance.all_nodes), \
3406 ("Instance %s's nodes changed while we kept the lock" %
3407 self.op.instance_name)
3409 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3412 if self.req_target_uuids:
3413 # User requested specific target groups
3414 self.target_uuids = frozenset(self.req_target_uuids)
3416 # All groups except those used by the instance are potential targets
3417 self.target_uuids = owned_groups - inst_groups
3419 conflicting_groups = self.target_uuids & inst_groups
3420 if conflicting_groups:
3421 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3422 " used by the instance '%s'" %
3423 (utils.CommaJoin(conflicting_groups),
3424 self.op.instance_name),
3427 if not self.target_uuids:
3428 raise errors.OpPrereqError("There are no possible target groups",
3431 def BuildHooksEnv(self):
3435 assert self.target_uuids
3438 "TARGET_GROUPS": " ".join(self.target_uuids),
3441 env.update(BuildInstanceHookEnvByObject(self, self.instance))
3445 def BuildHooksNodes(self):
3446 """Build hooks nodes.
3449 mn = self.cfg.GetMasterNode()
3452 def Exec(self, feedback_fn):
3453 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3455 assert instances == [self.op.instance_name], "Instance not locked"
3457 req = iallocator.IAReqGroupChange(instances=instances,
3458 target_groups=list(self.target_uuids))
3459 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3461 ial.Run(self.op.iallocator)
3464 raise errors.OpPrereqError("Can't compute solution for changing group of"
3465 " instance '%s' using iallocator '%s': %s" %
3466 (self.op.instance_name, self.op.iallocator,
3467 ial.info), errors.ECODE_NORES)
3469 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3471 self.LogInfo("Iallocator returned %s job(s) for changing group of"
3472 " instance '%s'", len(jobs), self.op.instance_name)
3474 return ResultWithJobs(jobs)