4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Logical units dealing with instances."""
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65 import ganeti.masterd.instance
68 #: Type description for changes as returned by L{_ApplyContainerMods}'s
70 _TApplyContModsCbChanges = \
71 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
77 def _CheckHostnameSane(lu, name):
78 """Ensures that a given hostname resolves to a 'sane' name.
80 The given name is required to be a prefix of the resolved hostname,
81 to prevent accidental mismatches.
83 @param lu: the logical unit on behalf of which we're checking
84 @param name: the name we should resolve and check
85 @return: the resolved hostname object
88 hostname = netutils.GetHostname(name=name)
89 if hostname.name != name:
90 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91 if not utils.MatchNameComponent(name, [hostname.name]):
92 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93 " same as given hostname '%s'") %
94 (hostname.name, name), errors.ECODE_INVAL)
98 def _CheckOpportunisticLocking(op):
99 """Generate error if opportunistic locking is not possible.
102 if op.opportunistic_locking and not op.iallocator:
103 raise errors.OpPrereqError("Opportunistic locking is only available in"
104 " combination with an instance allocator",
108 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109 """Wrapper around IAReqInstanceAlloc.
111 @param op: The instance opcode
112 @param disks: The computed disks
113 @param nics: The computed nics
114 @param beparams: The full filled beparams
115 @param node_whitelist: List of nodes which should appear as online to the
116 allocator (unless the node is already marked offline)
118 @returns: A filled L{iallocator.IAReqInstanceAlloc}
121 spindle_use = beparams[constants.BE_SPINDLE_USE]
122 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123 disk_template=op.disk_template,
126 vcpus=beparams[constants.BE_VCPUS],
127 memory=beparams[constants.BE_MAXMEM],
128 spindle_use=spindle_use,
130 nics=[n.ToDict() for n in nics],
131 hypervisor=op.hypervisor,
132 node_whitelist=node_whitelist)
135 def _ComputeFullBeParams(op, cluster):
136 """Computes the full beparams.
138 @param op: The instance opcode
139 @param cluster: The cluster config object
141 @return: The fully filled beparams
144 default_beparams = cluster.beparams[constants.PP_DEFAULT]
145 for param, value in op.beparams.iteritems():
146 if value == constants.VALUE_AUTO:
147 op.beparams[param] = default_beparams[param]
148 objects.UpgradeBeParams(op.beparams)
149 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150 return cluster.SimpleFillBE(op.beparams)
153 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154 """Computes the nics.
156 @param op: The instance opcode
157 @param cluster: Cluster configuration object
158 @param default_ip: The default ip to assign
159 @param cfg: An instance of the configuration object
160 @param ec_id: Execution context ID
162 @returns: The build up nics
167 nic_mode_req = nic.get(constants.INIC_MODE, None)
168 nic_mode = nic_mode_req
169 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172 net = nic.get(constants.INIC_NETWORK, None)
173 link = nic.get(constants.NIC_LINK, None)
174 ip = nic.get(constants.INIC_IP, None)
176 if net is None or net.lower() == constants.VALUE_NONE:
179 if nic_mode_req is not None or link is not None:
180 raise errors.OpPrereqError("If network is given, no mode or link"
181 " is allowed to be passed",
185 if ip is None or ip.lower() == constants.VALUE_NONE:
187 elif ip.lower() == constants.VALUE_AUTO:
188 if not op.name_check:
189 raise errors.OpPrereqError("IP address set to auto but name checks"
190 " have been skipped",
194 # We defer pool operations until later, so that the iallocator has
195 # filled in the instance's node(s) dimara
196 if ip.lower() == constants.NIC_IP_POOL:
198 raise errors.OpPrereqError("if ip=pool, parameter network"
199 " must be passed too",
202 elif not netutils.IPAddress.IsValid(ip):
203 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
208 # TODO: check the ip address for uniqueness
209 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210 raise errors.OpPrereqError("Routed nic mode requires an ip address",
213 # MAC address verification
214 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216 mac = utils.NormalizeAndValidateMac(mac)
219 # TODO: We need to factor this out
220 cfg.ReserveMAC(mac, ec_id)
221 except errors.ReservationError:
222 raise errors.OpPrereqError("MAC address %s already in use"
224 errors.ECODE_NOTUNIQUE)
226 # Build nic parameters
229 nicparams[constants.NIC_MODE] = nic_mode
231 nicparams[constants.NIC_LINK] = link
233 check_params = cluster.SimpleFillNIC(nicparams)
234 objects.NIC.CheckParameterSyntax(check_params)
235 net_uuid = cfg.LookupNetwork(net)
236 name = nic.get(constants.INIC_NAME, None)
237 if name is not None and name.lower() == constants.VALUE_NONE:
239 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240 network=net_uuid, nicparams=nicparams)
241 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
247 def _CheckForConflictingIp(lu, ip, node):
248 """In case of conflicting IP address raise error.
251 @param ip: IP address
253 @param node: node name
256 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257 if conf_net is not None:
258 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259 " network %s, but the target NIC does not." %
266 def _ComputeIPolicyInstanceSpecViolation(
267 ipolicy, instance_spec, disk_template,
268 _compute_fn=ComputeIPolicySpecViolation):
269 """Compute if instance specs meets the specs of ipolicy.
272 @param ipolicy: The ipolicy to verify against
273 @param instance_spec: dict
274 @param instance_spec: The instance spec to verify
275 @type disk_template: string
276 @param disk_template: the disk template of the instance
277 @param _compute_fn: The function to verify ipolicy (unittest only)
278 @see: L{ComputeIPolicySpecViolation}
281 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
288 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289 disk_sizes, spindle_use, disk_template)
292 def _CheckOSVariant(os_obj, name):
293 """Check whether an OS name conforms to the os variants specification.
295 @type os_obj: L{objects.OS}
296 @param os_obj: OS object to check
298 @param name: OS name passed by the user, to check for validity
301 variant = objects.OS.GetVariant(name)
302 if not os_obj.supported_variants:
304 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305 " passed)" % (os_obj.name, variant),
309 raise errors.OpPrereqError("OS name must include a variant",
312 if variant not in os_obj.supported_variants:
313 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
316 class LUInstanceCreate(LogicalUnit):
317 """Create an instance.
320 HPATH = "instance-add"
321 HTYPE = constants.HTYPE_INSTANCE
324 def CheckArguments(self):
328 # do not require name_check to ease forward/backward compatibility
330 if self.op.no_install and self.op.start:
331 self.LogInfo("No-installation mode selected, disabling startup")
332 self.op.start = False
333 # validate/normalize the instance name
334 self.op.instance_name = \
335 netutils.Hostname.GetNormalizedName(self.op.instance_name)
337 if self.op.ip_check and not self.op.name_check:
338 # TODO: make the ip check more flexible and not depend on the name check
339 raise errors.OpPrereqError("Cannot do IP address check without a name"
340 " check", errors.ECODE_INVAL)
342 # check nics' parameter names
343 for nic in self.op.nics:
344 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345 # check that NIC's parameters names are unique and valid
346 utils.ValidateDeviceNames("NIC", self.op.nics)
348 # check that disk's names are unique and valid
349 utils.ValidateDeviceNames("disk", self.op.disks)
351 cluster = self.cfg.GetClusterInfo()
352 if not self.op.disk_template in cluster.enabled_disk_templates:
353 raise errors.OpPrereqError("Cannot create an instance with disk template"
354 " '%s', because it is not enabled in the"
355 " cluster. Enabled disk templates are: %s." %
356 (self.op.disk_template,
357 ",".join(cluster.enabled_disk_templates)))
359 # check disks. parameter names and consistent adopt/no-adopt strategy
360 has_adopt = has_no_adopt = False
361 for disk in self.op.disks:
362 if self.op.disk_template != constants.DT_EXT:
363 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364 if constants.IDISK_ADOPT in disk:
368 if has_adopt and has_no_adopt:
369 raise errors.OpPrereqError("Either all disks are adopted or none is",
372 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373 raise errors.OpPrereqError("Disk adoption is not supported for the"
374 " '%s' disk template" %
375 self.op.disk_template,
377 if self.op.iallocator is not None:
378 raise errors.OpPrereqError("Disk adoption not allowed with an"
379 " iallocator script", errors.ECODE_INVAL)
380 if self.op.mode == constants.INSTANCE_IMPORT:
381 raise errors.OpPrereqError("Disk adoption not allowed for"
382 " instance import", errors.ECODE_INVAL)
384 if self.op.disk_template in constants.DTS_MUST_ADOPT:
385 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386 " but no 'adopt' parameter given" %
387 self.op.disk_template,
390 self.adopt_disks = has_adopt
392 # instance name verification
393 if self.op.name_check:
394 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395 self.op.instance_name = self.hostname1.name
396 # used in CheckPrereq for ip ping check
397 self.check_ip = self.hostname1.ip
401 # file storage checks
402 if (self.op.file_driver and
403 not self.op.file_driver in constants.FILE_DRIVER):
404 raise errors.OpPrereqError("Invalid file driver name '%s'" %
405 self.op.file_driver, errors.ECODE_INVAL)
407 # set default file_driver if unset and required
408 if (not self.op.file_driver and
409 self.op.disk_template in [constants.DT_FILE,
410 constants.DT_SHARED_FILE]):
411 self.op.file_driver = constants.FD_DEFAULT
413 if self.op.disk_template == constants.DT_FILE:
414 opcodes.RequireFileStorage()
415 elif self.op.disk_template == constants.DT_SHARED_FILE:
416 opcodes.RequireSharedFileStorage()
418 ### Node/iallocator related checks
419 CheckIAllocatorOrNode(self, "iallocator", "pnode")
421 if self.op.pnode is not None:
422 if self.op.disk_template in constants.DTS_INT_MIRROR:
423 if self.op.snode is None:
424 raise errors.OpPrereqError("The networked disk templates need"
425 " a mirror node", errors.ECODE_INVAL)
427 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
431 _CheckOpportunisticLocking(self.op)
433 self._cds = GetClusterDomainSecret()
435 if self.op.mode == constants.INSTANCE_IMPORT:
436 # On import force_variant must be True, because if we forced it at
437 # initial install, our only chance when importing it back is that it
439 self.op.force_variant = True
441 if self.op.no_install:
442 self.LogInfo("No-installation mode has no effect during import")
444 elif self.op.mode == constants.INSTANCE_CREATE:
445 if self.op.os_type is None:
446 raise errors.OpPrereqError("No guest OS specified",
448 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
449 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
450 " installation" % self.op.os_type,
452 if self.op.disk_template is None:
453 raise errors.OpPrereqError("No disk template specified",
456 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
457 # Check handshake to ensure both clusters have the same domain secret
458 src_handshake = self.op.source_handshake
459 if not src_handshake:
460 raise errors.OpPrereqError("Missing source handshake",
463 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
466 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
469 # Load and check source CA
470 self.source_x509_ca_pem = self.op.source_x509_ca
471 if not self.source_x509_ca_pem:
472 raise errors.OpPrereqError("Missing source X509 CA",
476 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
478 except OpenSSL.crypto.Error, err:
479 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
480 (err, ), errors.ECODE_INVAL)
482 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
483 if errcode is not None:
484 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
487 self.source_x509_ca = cert
489 src_instance_name = self.op.source_instance_name
490 if not src_instance_name:
491 raise errors.OpPrereqError("Missing source instance name",
494 self.source_instance_name = \
495 netutils.GetHostname(name=src_instance_name).name
498 raise errors.OpPrereqError("Invalid instance creation mode %r" %
499 self.op.mode, errors.ECODE_INVAL)
501 def ExpandNames(self):
502 """ExpandNames for CreateInstance.
504 Figure out the right locks for instance creation.
507 self.needed_locks = {}
509 instance_name = self.op.instance_name
510 # this is just a preventive check, but someone might still add this
511 # instance in the meantime, and creation will fail at lock-add time
512 if instance_name in self.cfg.GetInstanceList():
513 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
514 instance_name, errors.ECODE_EXISTS)
516 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
518 if self.op.iallocator:
519 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
520 # specifying a group on instance creation and then selecting nodes from
522 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
523 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
525 if self.op.opportunistic_locking:
526 self.opportunistic_locks[locking.LEVEL_NODE] = True
528 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
529 nodelist = [self.op.pnode]
530 if self.op.snode is not None:
531 self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
532 nodelist.append(self.op.snode)
533 self.needed_locks[locking.LEVEL_NODE] = nodelist
535 # in case of import lock the source node too
536 if self.op.mode == constants.INSTANCE_IMPORT:
537 src_node = self.op.src_node
538 src_path = self.op.src_path
541 self.op.src_path = src_path = self.op.instance_name
544 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
545 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
546 self.op.src_node = None
547 if os.path.isabs(src_path):
548 raise errors.OpPrereqError("Importing an instance from a path"
549 " requires a source node option",
552 self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
553 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
554 self.needed_locks[locking.LEVEL_NODE].append(src_node)
555 if not os.path.isabs(src_path):
556 self.op.src_path = src_path = \
557 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
559 self.needed_locks[locking.LEVEL_NODE_RES] = \
560 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
562 # Optimistically acquire shared group locks (we're reading the
563 # configuration). We can't just call GetInstanceNodeGroups, because the
564 # instance doesn't exist yet. Therefore we lock all node groups of all
566 if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
567 # In the case we lock all nodes for opportunistic allocation, we have no
568 # choice than to lock all groups, because they're allocated before nodes.
569 # This is sad, but true. At least we release all those we don't need in
571 self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
573 self.needed_locks[locking.LEVEL_NODEGROUP] = \
574 list(self.cfg.GetNodeGroupsFromNodes(
575 self.needed_locks[locking.LEVEL_NODE]))
576 self.share_locks[locking.LEVEL_NODEGROUP] = 1
578 def DeclareLocks(self, level):
579 if level == locking.LEVEL_NODE_RES and \
580 self.opportunistic_locks[locking.LEVEL_NODE]:
581 # Even when using opportunistic locking, we require the same set of
582 # NODE_RES locks as we got NODE locks
583 self.needed_locks[locking.LEVEL_NODE_RES] = \
584 self.owned_locks(locking.LEVEL_NODE)
586 def _RunAllocator(self):
587 """Run the allocator based on input opcode.
590 if self.op.opportunistic_locking:
591 # Only consider nodes for which a lock is held
592 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
594 node_whitelist = None
596 #TODO Export network to iallocator so that it chooses a pnode
597 # in a nodegroup that has the desired network connected to
598 req = _CreateInstanceAllocRequest(self.op, self.disks,
599 self.nics, self.be_full,
601 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
603 ial.Run(self.op.iallocator)
606 # When opportunistic locks are used only a temporary failure is generated
607 if self.op.opportunistic_locking:
608 ecode = errors.ECODE_TEMP_NORES
610 ecode = errors.ECODE_NORES
612 raise errors.OpPrereqError("Can't compute nodes using"
613 " iallocator '%s': %s" %
614 (self.op.iallocator, ial.info),
617 self.op.pnode = ial.result[0]
618 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
619 self.op.instance_name, self.op.iallocator,
620 utils.CommaJoin(ial.result))
622 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
624 if req.RequiredNodes() == 2:
625 self.op.snode = ial.result[1]
627 def BuildHooksEnv(self):
630 This runs on master, primary and secondary nodes of the instance.
634 "ADD_MODE": self.op.mode,
636 if self.op.mode == constants.INSTANCE_IMPORT:
637 env["SRC_NODE"] = self.op.src_node
638 env["SRC_PATH"] = self.op.src_path
639 env["SRC_IMAGES"] = self.src_images
641 env.update(BuildInstanceHookEnv(
642 name=self.op.instance_name,
643 primary_node=self.op.pnode,
644 secondary_nodes=self.secondaries,
645 status=self.op.start,
646 os_type=self.op.os_type,
647 minmem=self.be_full[constants.BE_MINMEM],
648 maxmem=self.be_full[constants.BE_MAXMEM],
649 vcpus=self.be_full[constants.BE_VCPUS],
650 nics=NICListToTuple(self, self.nics),
651 disk_template=self.op.disk_template,
652 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
653 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
654 for d in self.disks],
657 hypervisor_name=self.op.hypervisor,
663 def BuildHooksNodes(self):
664 """Build hooks nodes.
667 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
670 def _ReadExportInfo(self):
671 """Reads the export information from disk.
673 It will override the opcode source node and path with the actual
674 information, if these two were not specified before.
676 @return: the export information
679 assert self.op.mode == constants.INSTANCE_IMPORT
681 src_node = self.op.src_node
682 src_path = self.op.src_path
685 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
686 exp_list = self.rpc.call_export_list(locked_nodes)
688 for node in exp_list:
689 if exp_list[node].fail_msg:
691 if src_path in exp_list[node].payload:
693 self.op.src_node = src_node = node
694 self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
698 raise errors.OpPrereqError("No export found for relative path %s" %
699 src_path, errors.ECODE_INVAL)
701 CheckNodeOnline(self, src_node)
702 result = self.rpc.call_export_info(src_node, src_path)
703 result.Raise("No export or invalid export found in dir %s" % src_path)
705 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
706 if not export_info.has_section(constants.INISECT_EXP):
707 raise errors.ProgrammerError("Corrupted export config",
708 errors.ECODE_ENVIRON)
710 ei_version = export_info.get(constants.INISECT_EXP, "version")
711 if (int(ei_version) != constants.EXPORT_VERSION):
712 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
713 (ei_version, constants.EXPORT_VERSION),
714 errors.ECODE_ENVIRON)
717 def _ReadExportParams(self, einfo):
718 """Use export parameters as defaults.
720 In case the opcode doesn't specify (as in override) some instance
721 parameters, then try to use them from the export information, if
725 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
727 if self.op.disk_template is None:
728 if einfo.has_option(constants.INISECT_INS, "disk_template"):
729 self.op.disk_template = einfo.get(constants.INISECT_INS,
731 if self.op.disk_template not in constants.DISK_TEMPLATES:
732 raise errors.OpPrereqError("Disk template specified in configuration"
733 " file is not one of the allowed values:"
735 " ".join(constants.DISK_TEMPLATES),
738 raise errors.OpPrereqError("No disk template specified and the export"
739 " is missing the disk_template information",
742 if not self.op.disks:
744 # TODO: import the disk iv_name too
745 for idx in range(constants.MAX_DISKS):
746 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
747 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
748 disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
750 constants.IDISK_SIZE: disk_sz,
751 constants.IDISK_NAME: disk_name
754 self.op.disks = disks
755 if not disks and self.op.disk_template != constants.DT_DISKLESS:
756 raise errors.OpPrereqError("No disk info specified and the export"
757 " is missing the disk information",
762 for idx in range(constants.MAX_NICS):
763 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
765 for name in [constants.INIC_IP,
766 constants.INIC_MAC, constants.INIC_NAME]:
767 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
769 network = einfo.get(constants.INISECT_INS,
770 "nic%d_%s" % (idx, constants.INIC_NETWORK))
771 # in case network is given link and mode are inherited
772 # from nodegroup's netparams and thus should not be passed here
774 ndict[constants.INIC_NETWORK] = network
776 for name in list(constants.NICS_PARAMETERS):
777 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
784 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
785 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
787 if (self.op.hypervisor is None and
788 einfo.has_option(constants.INISECT_INS, "hypervisor")):
789 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
791 if einfo.has_section(constants.INISECT_HYP):
792 # use the export parameters but do not override the ones
793 # specified by the user
794 for name, value in einfo.items(constants.INISECT_HYP):
795 if name not in self.op.hvparams:
796 self.op.hvparams[name] = value
798 if einfo.has_section(constants.INISECT_BEP):
799 # use the parameters, without overriding
800 for name, value in einfo.items(constants.INISECT_BEP):
801 if name not in self.op.beparams:
802 self.op.beparams[name] = value
803 # Compatibility for the old "memory" be param
804 if name == constants.BE_MEMORY:
805 if constants.BE_MAXMEM not in self.op.beparams:
806 self.op.beparams[constants.BE_MAXMEM] = value
807 if constants.BE_MINMEM not in self.op.beparams:
808 self.op.beparams[constants.BE_MINMEM] = value
810 # try to read the parameters old style, from the main section
811 for name in constants.BES_PARAMETERS:
812 if (name not in self.op.beparams and
813 einfo.has_option(constants.INISECT_INS, name)):
814 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
816 if einfo.has_section(constants.INISECT_OSP):
817 # use the parameters, without overriding
818 for name, value in einfo.items(constants.INISECT_OSP):
819 if name not in self.op.osparams:
820 self.op.osparams[name] = value
822 def _RevertToDefaults(self, cluster):
823 """Revert the instance parameters to the default values.
827 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
828 for name in self.op.hvparams.keys():
829 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
830 del self.op.hvparams[name]
832 be_defs = cluster.SimpleFillBE({})
833 for name in self.op.beparams.keys():
834 if name in be_defs and be_defs[name] == self.op.beparams[name]:
835 del self.op.beparams[name]
837 nic_defs = cluster.SimpleFillNIC({})
838 for nic in self.op.nics:
839 for name in constants.NICS_PARAMETERS:
840 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
843 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
844 for name in self.op.osparams.keys():
845 if name in os_defs and os_defs[name] == self.op.osparams[name]:
846 del self.op.osparams[name]
848 def _CalculateFileStorageDir(self):
849 """Calculate final instance file storage dir.
852 # file storage dir calculation/check
853 self.instance_file_storage_dir = None
854 if self.op.disk_template in constants.DTS_FILEBASED:
855 # build the full file storage dir path
858 if self.op.disk_template == constants.DT_SHARED_FILE:
859 get_fsd_fn = self.cfg.GetSharedFileStorageDir
861 get_fsd_fn = self.cfg.GetFileStorageDir
863 cfg_storagedir = get_fsd_fn()
864 if not cfg_storagedir:
865 raise errors.OpPrereqError("Cluster file storage dir not defined",
867 joinargs.append(cfg_storagedir)
869 if self.op.file_storage_dir is not None:
870 joinargs.append(self.op.file_storage_dir)
872 joinargs.append(self.op.instance_name)
874 # pylint: disable=W0142
875 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
877 def CheckPrereq(self): # pylint: disable=R0914
878 """Check prerequisites.
881 # Check that the optimistically acquired groups are correct wrt the
883 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
884 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
885 cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
886 if not owned_groups.issuperset(cur_groups):
887 raise errors.OpPrereqError("New instance %s's node groups changed since"
888 " locks were acquired, current groups are"
889 " are '%s', owning groups '%s'; retry the"
891 (self.op.instance_name,
892 utils.CommaJoin(cur_groups),
893 utils.CommaJoin(owned_groups)),
896 self._CalculateFileStorageDir()
898 if self.op.mode == constants.INSTANCE_IMPORT:
899 export_info = self._ReadExportInfo()
900 self._ReadExportParams(export_info)
901 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
903 self._old_instance_name = None
905 if (not self.cfg.GetVGName() and
906 self.op.disk_template not in constants.DTS_NOT_LVM):
907 raise errors.OpPrereqError("Cluster does not support lvm-based"
908 " instances", errors.ECODE_STATE)
910 if (self.op.hypervisor is None or
911 self.op.hypervisor == constants.VALUE_AUTO):
912 self.op.hypervisor = self.cfg.GetHypervisorType()
914 cluster = self.cfg.GetClusterInfo()
915 enabled_hvs = cluster.enabled_hypervisors
916 if self.op.hypervisor not in enabled_hvs:
917 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
919 (self.op.hypervisor, ",".join(enabled_hvs)),
923 for tag in self.op.tags:
924 objects.TaggableObject.ValidateTag(tag)
926 # check hypervisor parameter syntax (locally)
927 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
928 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
930 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
931 hv_type.CheckParameterSyntax(filled_hvp)
932 self.hv_full = filled_hvp
933 # check that we don't specify global parameters on an instance
934 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
935 "instance", "cluster")
937 # fill and remember the beparams dict
938 self.be_full = _ComputeFullBeParams(self.op, cluster)
940 # build os parameters
941 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
943 # now that hvp/bep are in final format, let's reset to defaults,
945 if self.op.identify_defaults:
946 self._RevertToDefaults(cluster)
949 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
952 # disk checks/pre-build
953 default_vg = self.cfg.GetVGName()
954 self.disks = ComputeDisks(self.op, default_vg)
956 if self.op.mode == constants.INSTANCE_IMPORT:
958 for idx in range(len(self.disks)):
959 option = "disk%d_dump" % idx
960 if export_info.has_option(constants.INISECT_INS, option):
961 # FIXME: are the old os-es, disk sizes, etc. useful?
962 export_name = export_info.get(constants.INISECT_INS, option)
963 image = utils.PathJoin(self.op.src_path, export_name)
964 disk_images.append(image)
966 disk_images.append(False)
968 self.src_images = disk_images
970 if self.op.instance_name == self._old_instance_name:
971 for idx, nic in enumerate(self.nics):
972 if nic.mac == constants.VALUE_AUTO:
973 nic_mac_ini = "nic%d_mac" % idx
974 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
976 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
978 # ip ping checks (we use the same ip that was resolved in ExpandNames)
980 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
981 raise errors.OpPrereqError("IP %s of instance %s already in use" %
982 (self.check_ip, self.op.instance_name),
983 errors.ECODE_NOTUNIQUE)
985 #### mac address generation
986 # By generating here the mac address both the allocator and the hooks get
987 # the real final mac address rather than the 'auto' or 'generate' value.
988 # There is a race condition between the generation and the instance object
989 # creation, which means that we know the mac is valid now, but we're not
990 # sure it will be when we actually add the instance. If things go bad
991 # adding the instance will abort because of a duplicate mac, and the
992 # creation job will fail.
993 for nic in self.nics:
994 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
995 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
999 if self.op.iallocator is not None:
1000 self._RunAllocator()
1002 # Release all unneeded node locks
1003 keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
1004 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1005 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1006 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1007 # Release all unneeded group locks
1008 ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1009 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1011 assert (self.owned_locks(locking.LEVEL_NODE) ==
1012 self.owned_locks(locking.LEVEL_NODE_RES)), \
1013 "Node locks differ from node resource locks"
1015 #### node related checks
1017 # check primary node
1018 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
1019 assert self.pnode is not None, \
1020 "Cannot retrieve locked node %s" % self.op.pnode
1022 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1023 pnode.name, errors.ECODE_STATE)
1025 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1026 pnode.name, errors.ECODE_STATE)
1027 if not pnode.vm_capable:
1028 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1029 " '%s'" % pnode.name, errors.ECODE_STATE)
1031 self.secondaries = []
1033 # Fill in any IPs from IP pools. This must happen here, because we need to
1034 # know the nic's primary node, as specified by the iallocator
1035 for idx, nic in enumerate(self.nics):
1036 net_uuid = nic.network
1037 if net_uuid is not None:
1038 nobj = self.cfg.GetNetwork(net_uuid)
1039 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
1040 if netparams is None:
1041 raise errors.OpPrereqError("No netparams found for network"
1042 " %s. Propably not connected to"
1043 " node's %s nodegroup" %
1044 (nobj.name, self.pnode.name),
1046 self.LogInfo("NIC/%d inherits netparams %s" %
1047 (idx, netparams.values()))
1048 nic.nicparams = dict(netparams)
1049 if nic.ip is not None:
1050 if nic.ip.lower() == constants.NIC_IP_POOL:
1052 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1053 except errors.ReservationError:
1054 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1055 " from the address pool" % idx,
1057 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1060 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1061 check=self.op.conflicts_check)
1062 except errors.ReservationError:
1063 raise errors.OpPrereqError("IP address %s already in use"
1064 " or does not belong to network %s" %
1065 (nic.ip, nobj.name),
1066 errors.ECODE_NOTUNIQUE)
1068 # net is None, ip None or given
1069 elif self.op.conflicts_check:
1070 _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1072 # mirror node verification
1073 if self.op.disk_template in constants.DTS_INT_MIRROR:
1074 if self.op.snode == pnode.name:
1075 raise errors.OpPrereqError("The secondary node cannot be the"
1076 " primary node", errors.ECODE_INVAL)
1077 CheckNodeOnline(self, self.op.snode)
1078 CheckNodeNotDrained(self, self.op.snode)
1079 CheckNodeVmCapable(self, self.op.snode)
1080 self.secondaries.append(self.op.snode)
1082 snode = self.cfg.GetNodeInfo(self.op.snode)
1083 if pnode.group != snode.group:
1084 self.LogWarning("The primary and secondary nodes are in two"
1085 " different node groups; the disk parameters"
1086 " from the first disk's node group will be"
1089 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1091 if self.op.disk_template in constants.DTS_INT_MIRROR:
1093 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1094 if compat.any(map(has_es, nodes)):
1095 raise errors.OpPrereqError("Disk template %s not supported with"
1096 " exclusive storage" % self.op.disk_template,
1099 nodenames = [pnode.name] + self.secondaries
1101 if not self.adopt_disks:
1102 if self.op.disk_template == constants.DT_RBD:
1103 # _CheckRADOSFreeSpace() is just a placeholder.
1104 # Any function that checks prerequisites can be placed here.
1105 # Check if there is enough space on the RADOS cluster.
1106 CheckRADOSFreeSpace()
1107 elif self.op.disk_template == constants.DT_EXT:
1108 # FIXME: Function that checks prereqs if needed
1111 # Check lv size requirements, if not adopting
1112 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1113 CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1115 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1116 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1117 disk[constants.IDISK_ADOPT])
1118 for disk in self.disks])
1119 if len(all_lvs) != len(self.disks):
1120 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1122 for lv_name in all_lvs:
1124 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1125 # to ReserveLV uses the same syntax
1126 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1127 except errors.ReservationError:
1128 raise errors.OpPrereqError("LV named %s used by another instance" %
1129 lv_name, errors.ECODE_NOTUNIQUE)
1131 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1132 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1134 node_lvs = self.rpc.call_lv_list([pnode.name],
1135 vg_names.payload.keys())[pnode.name]
1136 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1137 node_lvs = node_lvs.payload
1139 delta = all_lvs.difference(node_lvs.keys())
1141 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1142 utils.CommaJoin(delta),
1144 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1146 raise errors.OpPrereqError("Online logical volumes found, cannot"
1147 " adopt: %s" % utils.CommaJoin(online_lvs),
1149 # update the size of disk based on what is found
1150 for dsk in self.disks:
1151 dsk[constants.IDISK_SIZE] = \
1152 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1153 dsk[constants.IDISK_ADOPT])][0]))
1155 elif self.op.disk_template == constants.DT_BLOCK:
1156 # Normalize and de-duplicate device paths
1157 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1158 for disk in self.disks])
1159 if len(all_disks) != len(self.disks):
1160 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1162 baddisks = [d for d in all_disks
1163 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1165 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1166 " cannot be adopted" %
1167 (utils.CommaJoin(baddisks),
1168 constants.ADOPTABLE_BLOCKDEV_ROOT),
1171 node_disks = self.rpc.call_bdev_sizes([pnode.name],
1172 list(all_disks))[pnode.name]
1173 node_disks.Raise("Cannot get block device information from node %s" %
1175 node_disks = node_disks.payload
1176 delta = all_disks.difference(node_disks.keys())
1178 raise errors.OpPrereqError("Missing block device(s): %s" %
1179 utils.CommaJoin(delta),
1181 for dsk in self.disks:
1182 dsk[constants.IDISK_SIZE] = \
1183 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1185 # Verify instance specs
1186 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1188 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1189 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1190 constants.ISPEC_DISK_COUNT: len(self.disks),
1191 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1192 for disk in self.disks],
1193 constants.ISPEC_NIC_COUNT: len(self.nics),
1194 constants.ISPEC_SPINDLE_USE: spindle_use,
1197 group_info = self.cfg.GetNodeGroup(pnode.group)
1198 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1199 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1200 self.op.disk_template)
1201 if not self.op.ignore_ipolicy and res:
1202 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1203 (pnode.group, group_info.name, utils.CommaJoin(res)))
1204 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1206 CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1208 CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1209 # check OS parameters (remotely)
1210 CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1212 CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1214 #TODO: _CheckExtParams (remotely)
1215 # Check parameters for extstorage
1217 # memory check on primary node
1218 #TODO(dynmem): use MINMEM for checking
1220 CheckNodeFreeMemory(self, self.pnode.name,
1221 "creating instance %s" % self.op.instance_name,
1222 self.be_full[constants.BE_MAXMEM],
1225 self.dry_run_result = list(nodenames)
1227 def Exec(self, feedback_fn):
1228 """Create and add the instance to the cluster.
1231 instance = self.op.instance_name
1232 pnode_name = self.pnode.name
1234 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1235 self.owned_locks(locking.LEVEL_NODE)), \
1236 "Node locks differ from node resource locks"
1237 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1239 ht_kind = self.op.hypervisor
1240 if ht_kind in constants.HTS_REQ_PORT:
1241 network_port = self.cfg.AllocatePort()
1245 # This is ugly but we got a chicken-egg problem here
1246 # We can only take the group disk parameters, as the instance
1247 # has no disks yet (we are generating them right here).
1248 node = self.cfg.GetNodeInfo(pnode_name)
1249 nodegroup = self.cfg.GetNodeGroup(node.group)
1250 disks = GenerateDiskTemplate(self,
1251 self.op.disk_template,
1252 instance, pnode_name,
1255 self.instance_file_storage_dir,
1256 self.op.file_driver,
1259 self.cfg.GetGroupDiskParams(nodegroup))
1261 iobj = objects.Instance(name=instance, os=self.op.os_type,
1262 primary_node=pnode_name,
1263 nics=self.nics, disks=disks,
1264 disk_template=self.op.disk_template,
1266 admin_state=constants.ADMINST_DOWN,
1267 network_port=network_port,
1268 beparams=self.op.beparams,
1269 hvparams=self.op.hvparams,
1270 hypervisor=self.op.hypervisor,
1271 osparams=self.op.osparams,
1275 for tag in self.op.tags:
1278 if self.adopt_disks:
1279 if self.op.disk_template == constants.DT_PLAIN:
1280 # rename LVs to the newly-generated names; we need to construct
1281 # 'fake' LV disks with the old data, plus the new unique_id
1282 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1284 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1285 rename_to.append(t_dsk.logical_id)
1286 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1287 self.cfg.SetDiskID(t_dsk, pnode_name)
1288 result = self.rpc.call_blockdev_rename(pnode_name,
1289 zip(tmp_disks, rename_to))
1290 result.Raise("Failed to rename adoped LVs")
1292 feedback_fn("* creating instance disks...")
1294 CreateDisks(self, iobj)
1295 except errors.OpExecError:
1296 self.LogWarning("Device creation failed")
1297 self.cfg.ReleaseDRBDMinors(instance)
1300 feedback_fn("adding instance %s to cluster config" % instance)
1302 self.cfg.AddInstance(iobj, self.proc.GetECId())
1304 # Declare that we don't want to remove the instance lock anymore, as we've
1305 # added the instance to the config
1306 del self.remove_locks[locking.LEVEL_INSTANCE]
1308 if self.op.mode == constants.INSTANCE_IMPORT:
1309 # Release unused nodes
1310 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1313 ReleaseLocks(self, locking.LEVEL_NODE)
1316 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1317 feedback_fn("* wiping instance disks...")
1319 WipeDisks(self, iobj)
1320 except errors.OpExecError, err:
1321 logging.exception("Wiping disks failed")
1322 self.LogWarning("Wiping instance disks failed (%s)", err)
1326 # Something is already wrong with the disks, don't do anything else
1328 elif self.op.wait_for_sync:
1329 disk_abort = not WaitForSync(self, iobj)
1330 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1331 # make sure the disks are not degraded (still sync-ing is ok)
1332 feedback_fn("* checking mirrors status")
1333 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1338 RemoveDisks(self, iobj)
1339 self.cfg.RemoveInstance(iobj.name)
1340 # Make sure the instance lock gets removed
1341 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1342 raise errors.OpExecError("There are some degraded disks for"
1345 # instance disks are now active
1346 iobj.disks_active = True
1348 # Release all node resource locks
1349 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1351 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1352 # we need to set the disks ID to the primary node, since the
1353 # preceding code might or might have not done it, depending on
1354 # disk template and other options
1355 for disk in iobj.disks:
1356 self.cfg.SetDiskID(disk, pnode_name)
1357 if self.op.mode == constants.INSTANCE_CREATE:
1358 if not self.op.no_install:
1359 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1360 not self.op.wait_for_sync)
1362 feedback_fn("* pausing disk sync to install instance OS")
1363 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1366 for idx, success in enumerate(result.payload):
1368 logging.warn("pause-sync of instance %s for disk %d failed",
1371 feedback_fn("* running the instance OS create scripts...")
1372 # FIXME: pass debug option from opcode to backend
1374 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1375 self.op.debug_level)
1377 feedback_fn("* resuming disk sync")
1378 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1381 for idx, success in enumerate(result.payload):
1383 logging.warn("resume-sync of instance %s for disk %d failed",
1386 os_add_result.Raise("Could not add os for instance %s"
1387 " on node %s" % (instance, pnode_name))
1390 if self.op.mode == constants.INSTANCE_IMPORT:
1391 feedback_fn("* running the instance OS import scripts...")
1395 for idx, image in enumerate(self.src_images):
1399 # FIXME: pass debug option from opcode to backend
1400 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1401 constants.IEIO_FILE, (image, ),
1402 constants.IEIO_SCRIPT,
1403 (iobj.disks[idx], idx),
1405 transfers.append(dt)
1408 masterd.instance.TransferInstanceData(self, feedback_fn,
1409 self.op.src_node, pnode_name,
1410 self.pnode.secondary_ip,
1412 if not compat.all(import_result):
1413 self.LogWarning("Some disks for instance %s on node %s were not"
1414 " imported successfully" % (instance, pnode_name))
1416 rename_from = self._old_instance_name
1418 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1419 feedback_fn("* preparing remote import...")
1420 # The source cluster will stop the instance before attempting to make
1421 # a connection. In some cases stopping an instance can take a long
1422 # time, hence the shutdown timeout is added to the connection
1424 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1425 self.op.source_shutdown_timeout)
1426 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1428 assert iobj.primary_node == self.pnode.name
1430 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1431 self.source_x509_ca,
1432 self._cds, timeouts)
1433 if not compat.all(disk_results):
1434 # TODO: Should the instance still be started, even if some disks
1435 # failed to import (valid for local imports, too)?
1436 self.LogWarning("Some disks for instance %s on node %s were not"
1437 " imported successfully" % (instance, pnode_name))
1439 rename_from = self.source_instance_name
1442 # also checked in the prereq part
1443 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1446 # Run rename script on newly imported instance
1447 assert iobj.name == instance
1448 feedback_fn("Running rename script for %s" % instance)
1449 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1451 self.op.debug_level)
1453 self.LogWarning("Failed to run rename script for %s on node"
1454 " %s: %s" % (instance, pnode_name, result.fail_msg))
1456 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1459 iobj.admin_state = constants.ADMINST_UP
1460 self.cfg.Update(iobj, feedback_fn)
1461 logging.info("Starting instance %s on node %s", instance, pnode_name)
1462 feedback_fn("* starting instance...")
1463 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1464 False, self.op.reason)
1465 result.Raise("Could not start instance")
1467 return list(iobj.all_nodes)
1470 class LUInstanceRename(LogicalUnit):
1471 """Rename an instance.
1474 HPATH = "instance-rename"
1475 HTYPE = constants.HTYPE_INSTANCE
1477 def CheckArguments(self):
1481 if self.op.ip_check and not self.op.name_check:
1482 # TODO: make the ip check more flexible and not depend on the name check
1483 raise errors.OpPrereqError("IP address check requires a name check",
1486 def BuildHooksEnv(self):
1489 This runs on master, primary and secondary nodes of the instance.
1492 env = BuildInstanceHookEnvByObject(self, self.instance)
1493 env["INSTANCE_NEW_NAME"] = self.op.new_name
1496 def BuildHooksNodes(self):
1497 """Build hooks nodes.
1500 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1503 def CheckPrereq(self):
1504 """Check prerequisites.
1506 This checks that the instance is in the cluster and is not running.
1509 self.op.instance_name = ExpandInstanceName(self.cfg,
1510 self.op.instance_name)
1511 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1512 assert instance is not None
1513 CheckNodeOnline(self, instance.primary_node)
1514 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1515 msg="cannot rename")
1516 self.instance = instance
1518 new_name = self.op.new_name
1519 if self.op.name_check:
1520 hostname = _CheckHostnameSane(self, new_name)
1521 new_name = self.op.new_name = hostname.name
1522 if (self.op.ip_check and
1523 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1524 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1525 (hostname.ip, new_name),
1526 errors.ECODE_NOTUNIQUE)
1528 instance_list = self.cfg.GetInstanceList()
1529 if new_name in instance_list and new_name != instance.name:
1530 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1531 new_name, errors.ECODE_EXISTS)
1533 def Exec(self, feedback_fn):
1534 """Rename the instance.
1537 inst = self.instance
1538 old_name = inst.name
1540 rename_file_storage = False
1541 if (inst.disk_template in constants.DTS_FILEBASED and
1542 self.op.new_name != inst.name):
1543 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1544 rename_file_storage = True
1546 self.cfg.RenameInstance(inst.name, self.op.new_name)
1547 # Change the instance lock. This is definitely safe while we hold the BGL.
1548 # Otherwise the new lock would have to be added in acquired mode.
1550 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1551 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1552 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1554 # re-read the instance from the configuration after rename
1555 inst = self.cfg.GetInstanceInfo(self.op.new_name)
1557 if rename_file_storage:
1558 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1559 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1560 old_file_storage_dir,
1561 new_file_storage_dir)
1562 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1563 " (but the instance has been renamed in Ganeti)" %
1564 (inst.primary_node, old_file_storage_dir,
1565 new_file_storage_dir))
1567 StartInstanceDisks(self, inst, None)
1568 # update info on disks
1569 info = GetInstanceInfoText(inst)
1570 for (idx, disk) in enumerate(inst.disks):
1571 for node in inst.all_nodes:
1572 self.cfg.SetDiskID(disk, node)
1573 result = self.rpc.call_blockdev_setinfo(node, disk, info)
1575 self.LogWarning("Error setting info on node %s for disk %s: %s",
1576 node, idx, result.fail_msg)
1578 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1579 old_name, self.op.debug_level)
1580 msg = result.fail_msg
1582 msg = ("Could not run OS rename script for instance %s on node %s"
1583 " (but the instance has been renamed in Ganeti): %s" %
1584 (inst.name, inst.primary_node, msg))
1585 self.LogWarning(msg)
1587 ShutdownInstanceDisks(self, inst)
1592 class LUInstanceRemove(LogicalUnit):
1593 """Remove an instance.
1596 HPATH = "instance-remove"
1597 HTYPE = constants.HTYPE_INSTANCE
1600 def ExpandNames(self):
1601 self._ExpandAndLockInstance()
1602 self.needed_locks[locking.LEVEL_NODE] = []
1603 self.needed_locks[locking.LEVEL_NODE_RES] = []
1604 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1606 def DeclareLocks(self, level):
1607 if level == locking.LEVEL_NODE:
1608 self._LockInstancesNodes()
1609 elif level == locking.LEVEL_NODE_RES:
1611 self.needed_locks[locking.LEVEL_NODE_RES] = \
1612 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1614 def BuildHooksEnv(self):
1617 This runs on master, primary and secondary nodes of the instance.
1620 env = BuildInstanceHookEnvByObject(self, self.instance)
1621 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1624 def BuildHooksNodes(self):
1625 """Build hooks nodes.
1628 nl = [self.cfg.GetMasterNode()]
1629 nl_post = list(self.instance.all_nodes) + nl
1630 return (nl, nl_post)
1632 def CheckPrereq(self):
1633 """Check prerequisites.
1635 This checks that the instance is in the cluster.
1638 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1639 assert self.instance is not None, \
1640 "Cannot retrieve locked instance %s" % self.op.instance_name
1642 def Exec(self, feedback_fn):
1643 """Remove the instance.
1646 instance = self.instance
1647 logging.info("Shutting down instance %s on node %s",
1648 instance.name, instance.primary_node)
1650 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1651 self.op.shutdown_timeout,
1653 msg = result.fail_msg
1655 if self.op.ignore_failures:
1656 feedback_fn("Warning: can't shutdown instance: %s" % msg)
1658 raise errors.OpExecError("Could not shutdown instance %s on"
1660 (instance.name, instance.primary_node, msg))
1662 assert (self.owned_locks(locking.LEVEL_NODE) ==
1663 self.owned_locks(locking.LEVEL_NODE_RES))
1664 assert not (set(instance.all_nodes) -
1665 self.owned_locks(locking.LEVEL_NODE)), \
1666 "Not owning correct locks"
1668 RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1671 class LUInstanceMove(LogicalUnit):
1672 """Move an instance by data-copying.
1675 HPATH = "instance-move"
1676 HTYPE = constants.HTYPE_INSTANCE
1679 def ExpandNames(self):
1680 self._ExpandAndLockInstance()
1681 target_node = ExpandNodeName(self.cfg, self.op.target_node)
1682 self.op.target_node = target_node
1683 self.needed_locks[locking.LEVEL_NODE] = [target_node]
1684 self.needed_locks[locking.LEVEL_NODE_RES] = []
1685 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1687 def DeclareLocks(self, level):
1688 if level == locking.LEVEL_NODE:
1689 self._LockInstancesNodes(primary_only=True)
1690 elif level == locking.LEVEL_NODE_RES:
1692 self.needed_locks[locking.LEVEL_NODE_RES] = \
1693 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1695 def BuildHooksEnv(self):
1698 This runs on master, primary and secondary nodes of the instance.
1702 "TARGET_NODE": self.op.target_node,
1703 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1705 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1708 def BuildHooksNodes(self):
1709 """Build hooks nodes.
1713 self.cfg.GetMasterNode(),
1714 self.instance.primary_node,
1715 self.op.target_node,
1719 def CheckPrereq(self):
1720 """Check prerequisites.
1722 This checks that the instance is in the cluster.
1725 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1726 assert self.instance is not None, \
1727 "Cannot retrieve locked instance %s" % self.op.instance_name
1729 if instance.disk_template not in constants.DTS_COPYABLE:
1730 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1731 instance.disk_template, errors.ECODE_STATE)
1733 node = self.cfg.GetNodeInfo(self.op.target_node)
1734 assert node is not None, \
1735 "Cannot retrieve locked node %s" % self.op.target_node
1737 self.target_node = target_node = node.name
1739 if target_node == instance.primary_node:
1740 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1741 (instance.name, target_node),
1744 bep = self.cfg.GetClusterInfo().FillBE(instance)
1746 for idx, dsk in enumerate(instance.disks):
1747 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1748 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1749 " cannot copy" % idx, errors.ECODE_STATE)
1751 CheckNodeOnline(self, target_node)
1752 CheckNodeNotDrained(self, target_node)
1753 CheckNodeVmCapable(self, target_node)
1754 cluster = self.cfg.GetClusterInfo()
1755 group_info = self.cfg.GetNodeGroup(node.group)
1756 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1757 CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1758 ignore=self.op.ignore_ipolicy)
1760 if instance.admin_state == constants.ADMINST_UP:
1761 # check memory requirements on the secondary node
1762 CheckNodeFreeMemory(self, target_node,
1763 "failing over instance %s" %
1764 instance.name, bep[constants.BE_MAXMEM],
1765 instance.hypervisor)
1767 self.LogInfo("Not checking memory on the secondary node as"
1768 " instance will not be started")
1770 # check bridge existance
1771 CheckInstanceBridgesExist(self, instance, node=target_node)
1773 def Exec(self, feedback_fn):
1774 """Move an instance.
1776 The move is done by shutting it down on its present node, copying
1777 the data over (slow) and starting it on the new node.
1780 instance = self.instance
1782 source_node = instance.primary_node
1783 target_node = self.target_node
1785 self.LogInfo("Shutting down instance %s on source node %s",
1786 instance.name, source_node)
1788 assert (self.owned_locks(locking.LEVEL_NODE) ==
1789 self.owned_locks(locking.LEVEL_NODE_RES))
1791 result = self.rpc.call_instance_shutdown(source_node, instance,
1792 self.op.shutdown_timeout,
1794 msg = result.fail_msg
1796 if self.op.ignore_consistency:
1797 self.LogWarning("Could not shutdown instance %s on node %s."
1798 " Proceeding anyway. Please make sure node"
1799 " %s is down. Error details: %s",
1800 instance.name, source_node, source_node, msg)
1802 raise errors.OpExecError("Could not shutdown instance %s on"
1804 (instance.name, source_node, msg))
1806 # create the target disks
1808 CreateDisks(self, instance, target_node=target_node)
1809 except errors.OpExecError:
1810 self.LogWarning("Device creation failed")
1811 self.cfg.ReleaseDRBDMinors(instance.name)
1814 cluster_name = self.cfg.GetClusterInfo().cluster_name
1817 # activate, get path, copy the data over
1818 for idx, disk in enumerate(instance.disks):
1819 self.LogInfo("Copying data for disk %d", idx)
1820 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1821 instance.name, True, idx)
1823 self.LogWarning("Can't assemble newly created disk %d: %s",
1824 idx, result.fail_msg)
1825 errs.append(result.fail_msg)
1827 dev_path, _ = result.payload
1828 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1829 target_node, dev_path,
1832 self.LogWarning("Can't copy data over for disk %d: %s",
1833 idx, result.fail_msg)
1834 errs.append(result.fail_msg)
1838 self.LogWarning("Some disks failed to copy, aborting")
1840 RemoveDisks(self, instance, target_node=target_node)
1842 self.cfg.ReleaseDRBDMinors(instance.name)
1843 raise errors.OpExecError("Errors during disk copy: %s" %
1846 instance.primary_node = target_node
1847 self.cfg.Update(instance, feedback_fn)
1849 self.LogInfo("Removing the disks on the original node")
1850 RemoveDisks(self, instance, target_node=source_node)
1852 # Only start the instance if it's marked as up
1853 if instance.admin_state == constants.ADMINST_UP:
1854 self.LogInfo("Starting instance %s on node %s",
1855 instance.name, target_node)
1857 disks_ok, _ = AssembleInstanceDisks(self, instance,
1858 ignore_secondaries=True)
1860 ShutdownInstanceDisks(self, instance)
1861 raise errors.OpExecError("Can't activate the instance's disks")
1863 result = self.rpc.call_instance_start(target_node,
1864 (instance, None, None), False,
1866 msg = result.fail_msg
1868 ShutdownInstanceDisks(self, instance)
1869 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1870 (instance.name, target_node, msg))
1873 class LUInstanceMultiAlloc(NoHooksLU):
1874 """Allocates multiple instances at the same time.
1879 def CheckArguments(self):
1884 for inst in self.op.instances:
1885 if inst.iallocator is not None:
1886 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1887 " instance objects", errors.ECODE_INVAL)
1888 nodes.append(bool(inst.pnode))
1889 if inst.disk_template in constants.DTS_INT_MIRROR:
1890 nodes.append(bool(inst.snode))
1892 has_nodes = compat.any(nodes)
1893 if compat.all(nodes) ^ has_nodes:
1894 raise errors.OpPrereqError("There are instance objects providing"
1895 " pnode/snode while others do not",
1898 if not has_nodes and self.op.iallocator is None:
1899 default_iallocator = self.cfg.GetDefaultIAllocator()
1900 if default_iallocator:
1901 self.op.iallocator = default_iallocator
1903 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1904 " given and no cluster-wide default"
1905 " iallocator found; please specify either"
1906 " an iallocator or nodes on the instances"
1907 " or set a cluster-wide default iallocator",
1910 _CheckOpportunisticLocking(self.op)
1912 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1914 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1915 utils.CommaJoin(dups), errors.ECODE_INVAL)
1917 def ExpandNames(self):
1918 """Calculate the locks.
1921 self.share_locks = ShareAll()
1922 self.needed_locks = {
1923 # iallocator will select nodes and even if no iallocator is used,
1924 # collisions with LUInstanceCreate should be avoided
1925 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1928 if self.op.iallocator:
1929 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1930 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1932 if self.op.opportunistic_locking:
1933 self.opportunistic_locks[locking.LEVEL_NODE] = True
1936 for inst in self.op.instances:
1937 inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1938 nodeslist.append(inst.pnode)
1939 if inst.snode is not None:
1940 inst.snode = ExpandNodeName(self.cfg, inst.snode)
1941 nodeslist.append(inst.snode)
1943 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1944 # Lock resources of instance's primary and secondary nodes (copy to
1945 # prevent accidential modification)
1946 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1948 def DeclareLocks(self, level):
1949 if level == locking.LEVEL_NODE_RES and \
1950 self.opportunistic_locks[locking.LEVEL_NODE]:
1951 # Even when using opportunistic locking, we require the same set of
1952 # NODE_RES locks as we got NODE locks
1953 self.needed_locks[locking.LEVEL_NODE_RES] = \
1954 self.owned_locks(locking.LEVEL_NODE)
1956 def CheckPrereq(self):
1957 """Check prerequisite.
1960 if self.op.iallocator:
1961 cluster = self.cfg.GetClusterInfo()
1962 default_vg = self.cfg.GetVGName()
1963 ec_id = self.proc.GetECId()
1965 if self.op.opportunistic_locking:
1966 # Only consider nodes for which a lock is held
1967 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1969 node_whitelist = None
1971 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1972 _ComputeNics(op, cluster, None,
1974 _ComputeFullBeParams(op, cluster),
1976 for op in self.op.instances]
1978 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1979 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1981 ial.Run(self.op.iallocator)
1984 raise errors.OpPrereqError("Can't compute nodes using"
1985 " iallocator '%s': %s" %
1986 (self.op.iallocator, ial.info),
1989 self.ia_result = ial.result
1992 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1993 constants.JOB_IDS_KEY: [],
1996 def _ConstructPartialResult(self):
1997 """Contructs the partial result.
2000 if self.op.iallocator:
2001 (allocatable, failed_insts) = self.ia_result
2002 allocatable_insts = map(compat.fst, allocatable)
2004 allocatable_insts = [op.instance_name for op in self.op.instances]
2008 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
2009 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
2012 def Exec(self, feedback_fn):
2013 """Executes the opcode.
2017 if self.op.iallocator:
2018 op2inst = dict((op.instance_name, op) for op in self.op.instances)
2019 (allocatable, failed) = self.ia_result
2021 for (name, nodes) in allocatable:
2022 op = op2inst.pop(name)
2025 (op.pnode, op.snode) = nodes
2031 missing = set(op2inst.keys()) - set(failed)
2032 assert not missing, \
2033 "Iallocator did return incomplete result: %s" % \
2034 utils.CommaJoin(missing)
2036 jobs.extend([op] for op in self.op.instances)
2038 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2041 class _InstNicModPrivate:
2042 """Data structure for network interface modifications.
2044 Used by L{LUInstanceSetParams}.
2052 def _PrepareContainerMods(mods, private_fn):
2053 """Prepares a list of container modifications by adding a private data field.
2055 @type mods: list of tuples; (operation, index, parameters)
2056 @param mods: List of modifications
2057 @type private_fn: callable or None
2058 @param private_fn: Callable for constructing a private data field for a
2063 if private_fn is None:
2068 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2071 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
2072 """Checks if nodes have enough physical CPUs
2074 This function checks if all given nodes have the needed number of
2075 physical CPUs. In case any node has less CPUs or we cannot get the
2076 information from the node, this function raises an OpPrereqError
2079 @type lu: C{LogicalUnit}
2080 @param lu: a logical unit from which we get configuration data
2081 @type nodenames: C{list}
2082 @param nodenames: the list of node names to check
2083 @type requested: C{int}
2084 @param requested: the minimum acceptable number of physical CPUs
2085 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2086 or we cannot check the node
2089 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2090 for node in nodenames:
2091 info = nodeinfo[node]
2092 info.Raise("Cannot get current information from node %s" % node,
2093 prereq=True, ecode=errors.ECODE_ENVIRON)
2094 (_, _, (hv_info, )) = info.payload
2095 num_cpus = hv_info.get("cpu_total", None)
2096 if not isinstance(num_cpus, int):
2097 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2098 " on node %s, result was '%s'" %
2099 (node, num_cpus), errors.ECODE_ENVIRON)
2100 if requested > num_cpus:
2101 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2102 "required" % (node, num_cpus, requested),
2106 def GetItemFromContainer(identifier, kind, container):
2107 """Return the item refered by the identifier.
2109 @type identifier: string
2110 @param identifier: Item index or name or UUID
2112 @param kind: One-word item description
2113 @type container: list
2114 @param container: Container to get the item from
2119 idx = int(identifier)
2122 absidx = len(container) - 1
2124 raise IndexError("Not accepting negative indices other than -1")
2125 elif idx > len(container):
2126 raise IndexError("Got %s index %s, but there are only %s" %
2127 (kind, idx, len(container)))
2130 return (absidx, container[idx])
2134 for idx, item in enumerate(container):
2135 if item.uuid == identifier or item.name == identifier:
2138 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2139 (kind, identifier), errors.ECODE_NOENT)
2142 def _ApplyContainerMods(kind, container, chgdesc, mods,
2143 create_fn, modify_fn, remove_fn):
2144 """Applies descriptions in C{mods} to C{container}.
2147 @param kind: One-word item description
2148 @type container: list
2149 @param container: Container to modify
2150 @type chgdesc: None or list
2151 @param chgdesc: List of applied changes
2153 @param mods: Modifications as returned by L{_PrepareContainerMods}
2154 @type create_fn: callable
2155 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2156 receives absolute item index, parameters and private data object as added
2157 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2159 @type modify_fn: callable
2160 @param modify_fn: Callback for modifying an existing item
2161 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2162 and private data object as added by L{_PrepareContainerMods}, returns
2164 @type remove_fn: callable
2165 @param remove_fn: Callback on removing item; receives absolute item index,
2166 item and private data object as added by L{_PrepareContainerMods}
2169 for (op, identifier, params, private) in mods:
2172 if op == constants.DDM_ADD:
2173 # Calculate where item will be added
2174 # When adding an item, identifier can only be an index
2176 idx = int(identifier)
2178 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2179 " identifier for %s" % constants.DDM_ADD,
2182 addidx = len(container)
2185 raise IndexError("Not accepting negative indices other than -1")
2186 elif idx > len(container):
2187 raise IndexError("Got %s index %s, but there are only %s" %
2188 (kind, idx, len(container)))
2191 if create_fn is None:
2194 (item, changes) = create_fn(addidx, params, private)
2197 container.append(item)
2200 assert idx <= len(container)
2201 # list.insert does so before the specified index
2202 container.insert(idx, item)
2204 # Retrieve existing item
2205 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2207 if op == constants.DDM_REMOVE:
2210 changes = [("%s/%s" % (kind, absidx), "remove")]
2212 if remove_fn is not None:
2213 msg = remove_fn(absidx, item, private)
2215 changes.append(("%s/%s" % (kind, absidx), msg))
2217 assert container[absidx] == item
2218 del container[absidx]
2219 elif op == constants.DDM_MODIFY:
2220 if modify_fn is not None:
2221 changes = modify_fn(absidx, item, params, private)
2223 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2225 assert _TApplyContModsCbChanges(changes)
2227 if not (chgdesc is None or changes is None):
2228 chgdesc.extend(changes)
2231 def _UpdateIvNames(base_index, disks):
2232 """Updates the C{iv_name} attribute of disks.
2234 @type disks: list of L{objects.Disk}
2237 for (idx, disk) in enumerate(disks):
2238 disk.iv_name = "disk/%s" % (base_index + idx, )
2241 class LUInstanceSetParams(LogicalUnit):
2242 """Modifies an instances's parameters.
2245 HPATH = "instance-modify"
2246 HTYPE = constants.HTYPE_INSTANCE
2250 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2251 assert ht.TList(mods)
2252 assert not mods or len(mods[0]) in (2, 3)
2254 if mods and len(mods[0]) == 2:
2258 for op, params in mods:
2259 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2260 result.append((op, -1, params))
2264 raise errors.OpPrereqError("Only one %s add or remove operation is"
2265 " supported at a time" % kind,
2268 result.append((constants.DDM_MODIFY, op, params))
2270 assert verify_fn(result)
2277 def _CheckMods(kind, mods, key_types, item_fn):
2278 """Ensures requested disk/NIC modifications are valid.
2281 for (op, _, params) in mods:
2282 assert ht.TDict(params)
2284 # If 'key_types' is an empty dict, we assume we have an
2285 # 'ext' template and thus do not ForceDictType
2287 utils.ForceDictType(params, key_types)
2289 if op == constants.DDM_REMOVE:
2291 raise errors.OpPrereqError("No settings should be passed when"
2292 " removing a %s" % kind,
2294 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2297 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2299 def _VerifyDiskModification(self, op, params):
2300 """Verifies a disk modification.
2303 if op == constants.DDM_ADD:
2304 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2305 if mode not in constants.DISK_ACCESS_SET:
2306 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2309 size = params.get(constants.IDISK_SIZE, None)
2311 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2312 constants.IDISK_SIZE, errors.ECODE_INVAL)
2316 except (TypeError, ValueError), err:
2317 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2320 params[constants.IDISK_SIZE] = size
2321 name = params.get(constants.IDISK_NAME, None)
2322 if name is not None and name.lower() == constants.VALUE_NONE:
2323 params[constants.IDISK_NAME] = None
2325 elif op == constants.DDM_MODIFY:
2326 if constants.IDISK_SIZE in params:
2327 raise errors.OpPrereqError("Disk size change not possible, use"
2328 " grow-disk", errors.ECODE_INVAL)
2330 # Disk modification supports changing only the disk name and mode.
2331 # Changing arbitrary parameters is allowed only for ext disk template",
2332 if self.instance.disk_template != constants.DT_EXT:
2333 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2335 name = params.get(constants.IDISK_NAME, None)
2336 if name is not None and name.lower() == constants.VALUE_NONE:
2337 params[constants.IDISK_NAME] = None
2340 def _VerifyNicModification(op, params):
2341 """Verifies a network interface modification.
2344 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2345 ip = params.get(constants.INIC_IP, None)
2346 name = params.get(constants.INIC_NAME, None)
2347 req_net = params.get(constants.INIC_NETWORK, None)
2348 link = params.get(constants.NIC_LINK, None)
2349 mode = params.get(constants.NIC_MODE, None)
2350 if name is not None and name.lower() == constants.VALUE_NONE:
2351 params[constants.INIC_NAME] = None
2352 if req_net is not None:
2353 if req_net.lower() == constants.VALUE_NONE:
2354 params[constants.INIC_NETWORK] = None
2356 elif link is not None or mode is not None:
2357 raise errors.OpPrereqError("If network is given"
2358 " mode or link should not",
2361 if op == constants.DDM_ADD:
2362 macaddr = params.get(constants.INIC_MAC, None)
2364 params[constants.INIC_MAC] = constants.VALUE_AUTO
2367 if ip.lower() == constants.VALUE_NONE:
2368 params[constants.INIC_IP] = None
2370 if ip.lower() == constants.NIC_IP_POOL:
2371 if op == constants.DDM_ADD and req_net is None:
2372 raise errors.OpPrereqError("If ip=pool, parameter network"
2376 if not netutils.IPAddress.IsValid(ip):
2377 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2380 if constants.INIC_MAC in params:
2381 macaddr = params[constants.INIC_MAC]
2382 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2383 macaddr = utils.NormalizeAndValidateMac(macaddr)
2385 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2386 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2387 " modifying an existing NIC",
2390 def CheckArguments(self):
2391 if not (self.op.nics or self.op.disks or self.op.disk_template or
2392 self.op.hvparams or self.op.beparams or self.op.os_name or
2393 self.op.osparams or self.op.offline is not None or
2394 self.op.runtime_mem or self.op.pnode):
2395 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2397 if self.op.hvparams:
2398 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2399 "hypervisor", "instance", "cluster")
2401 self.op.disks = self._UpgradeDiskNicMods(
2402 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2403 self.op.nics = self._UpgradeDiskNicMods(
2404 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2406 if self.op.disks and self.op.disk_template is not None:
2407 raise errors.OpPrereqError("Disk template conversion and other disk"
2408 " changes not supported at the same time",
2411 if (self.op.disk_template and
2412 self.op.disk_template in constants.DTS_INT_MIRROR and
2413 self.op.remote_node is None):
2414 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2415 " one requires specifying a secondary node",
2418 # Check NIC modifications
2419 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2420 self._VerifyNicModification)
2423 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2425 def ExpandNames(self):
2426 self._ExpandAndLockInstance()
2427 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2428 # Can't even acquire node locks in shared mode as upcoming changes in
2429 # Ganeti 2.6 will start to modify the node object on disk conversion
2430 self.needed_locks[locking.LEVEL_NODE] = []
2431 self.needed_locks[locking.LEVEL_NODE_RES] = []
2432 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2433 # Look node group to look up the ipolicy
2434 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2436 def DeclareLocks(self, level):
2437 if level == locking.LEVEL_NODEGROUP:
2438 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2439 # Acquire locks for the instance's nodegroups optimistically. Needs
2440 # to be verified in CheckPrereq
2441 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2442 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2443 elif level == locking.LEVEL_NODE:
2444 self._LockInstancesNodes()
2445 if self.op.disk_template and self.op.remote_node:
2446 self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2447 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2448 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2450 self.needed_locks[locking.LEVEL_NODE_RES] = \
2451 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2453 def BuildHooksEnv(self):
2456 This runs on the master, primary and secondaries.
2460 if constants.BE_MINMEM in self.be_new:
2461 args["minmem"] = self.be_new[constants.BE_MINMEM]
2462 if constants.BE_MAXMEM in self.be_new:
2463 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2464 if constants.BE_VCPUS in self.be_new:
2465 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2466 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2467 # information at all.
2469 if self._new_nics is not None:
2472 for nic in self._new_nics:
2473 n = copy.deepcopy(nic)
2474 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2475 n.nicparams = nicparams
2476 nics.append(NICToTuple(self, n))
2480 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2481 if self.op.disk_template:
2482 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2483 if self.op.runtime_mem:
2484 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2488 def BuildHooksNodes(self):
2489 """Build hooks nodes.
2492 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2495 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2496 old_params, cluster, pnode):
2498 update_params_dict = dict([(key, params[key])
2499 for key in constants.NICS_PARAMETERS
2502 req_link = update_params_dict.get(constants.NIC_LINK, None)
2503 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2506 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2507 if new_net_uuid_or_name:
2508 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2509 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2512 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2515 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2517 raise errors.OpPrereqError("No netparams found for the network"
2518 " %s, probably not connected" %
2519 new_net_obj.name, errors.ECODE_INVAL)
2520 new_params = dict(netparams)
2522 new_params = GetUpdatedParams(old_params, update_params_dict)
2524 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2526 new_filled_params = cluster.SimpleFillNIC(new_params)
2527 objects.NIC.CheckParameterSyntax(new_filled_params)
2529 new_mode = new_filled_params[constants.NIC_MODE]
2530 if new_mode == constants.NIC_MODE_BRIDGED:
2531 bridge = new_filled_params[constants.NIC_LINK]
2532 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2534 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2536 self.warn.append(msg)
2538 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2540 elif new_mode == constants.NIC_MODE_ROUTED:
2541 ip = params.get(constants.INIC_IP, old_ip)
2543 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2544 " on a routed NIC", errors.ECODE_INVAL)
2546 elif new_mode == constants.NIC_MODE_OVS:
2547 # TODO: check OVS link
2548 self.LogInfo("OVS links are currently not checked for correctness")
2550 if constants.INIC_MAC in params:
2551 mac = params[constants.INIC_MAC]
2553 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2555 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2556 # otherwise generate the MAC address
2557 params[constants.INIC_MAC] = \
2558 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2560 # or validate/reserve the current one
2562 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2563 except errors.ReservationError:
2564 raise errors.OpPrereqError("MAC address '%s' already in use"
2565 " in cluster" % mac,
2566 errors.ECODE_NOTUNIQUE)
2567 elif new_net_uuid != old_net_uuid:
2569 def get_net_prefix(net_uuid):
2572 nobj = self.cfg.GetNetwork(net_uuid)
2573 mac_prefix = nobj.mac_prefix
2577 new_prefix = get_net_prefix(new_net_uuid)
2578 old_prefix = get_net_prefix(old_net_uuid)
2579 if old_prefix != new_prefix:
2580 params[constants.INIC_MAC] = \
2581 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2583 # if there is a change in (ip, network) tuple
2584 new_ip = params.get(constants.INIC_IP, old_ip)
2585 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2587 # if IP is pool then require a network and generate one IP
2588 if new_ip.lower() == constants.NIC_IP_POOL:
2591 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2592 except errors.ReservationError:
2593 raise errors.OpPrereqError("Unable to get a free IP"
2594 " from the address pool",
2596 self.LogInfo("Chose IP %s from network %s",
2599 params[constants.INIC_IP] = new_ip
2601 raise errors.OpPrereqError("ip=pool, but no network found",
2603 # Reserve new IP if in the new network if any
2606 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2607 check=self.op.conflicts_check)
2608 self.LogInfo("Reserving IP %s in network %s",
2609 new_ip, new_net_obj.name)
2610 except errors.ReservationError:
2611 raise errors.OpPrereqError("IP %s not available in network %s" %
2612 (new_ip, new_net_obj.name),
2613 errors.ECODE_NOTUNIQUE)
2614 # new network is None so check if new IP is a conflicting IP
2615 elif self.op.conflicts_check:
2616 _CheckForConflictingIp(self, new_ip, pnode)
2618 # release old IP if old network is not None
2619 if old_ip and old_net_uuid:
2621 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2622 except errors.AddressPoolError:
2623 logging.warning("Release IP %s not contained in network %s",
2624 old_ip, old_net_obj.name)
2626 # there are no changes in (ip, network) tuple and old network is not None
2627 elif (old_net_uuid is not None and
2628 (req_link is not None or req_mode is not None)):
2629 raise errors.OpPrereqError("Not allowed to change link or mode of"
2630 " a NIC that is connected to a network",
2633 private.params = new_params
2634 private.filled = new_filled_params
2636 def _PreCheckDiskTemplate(self, pnode_info):
2637 """CheckPrereq checks related to a new disk template."""
2638 # Arguments are passed to avoid configuration lookups
2639 instance = self.instance
2640 pnode = instance.primary_node
2641 cluster = self.cluster
2642 if instance.disk_template == self.op.disk_template:
2643 raise errors.OpPrereqError("Instance already has disk template %s" %
2644 instance.disk_template, errors.ECODE_INVAL)
2646 if (instance.disk_template,
2647 self.op.disk_template) not in self._DISK_CONVERSIONS:
2648 raise errors.OpPrereqError("Unsupported disk template conversion from"
2649 " %s to %s" % (instance.disk_template,
2650 self.op.disk_template),
2652 CheckInstanceState(self, instance, INSTANCE_DOWN,
2653 msg="cannot change disk template")
2654 if self.op.disk_template in constants.DTS_INT_MIRROR:
2655 if self.op.remote_node == pnode:
2656 raise errors.OpPrereqError("Given new secondary node %s is the same"
2657 " as the primary node of the instance" %
2658 self.op.remote_node, errors.ECODE_STATE)
2659 CheckNodeOnline(self, self.op.remote_node)
2660 CheckNodeNotDrained(self, self.op.remote_node)
2661 # FIXME: here we assume that the old instance type is DT_PLAIN
2662 assert instance.disk_template == constants.DT_PLAIN
2663 disks = [{constants.IDISK_SIZE: d.size,
2664 constants.IDISK_VG: d.logical_id[0]}
2665 for d in instance.disks]
2666 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2667 CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2669 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2670 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2671 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2673 CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2674 ignore=self.op.ignore_ipolicy)
2675 if pnode_info.group != snode_info.group:
2676 self.LogWarning("The primary and secondary nodes are in two"
2677 " different node groups; the disk parameters"
2678 " from the first disk's node group will be"
2681 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2682 # Make sure none of the nodes require exclusive storage
2683 nodes = [pnode_info]
2684 if self.op.disk_template in constants.DTS_INT_MIRROR:
2686 nodes.append(snode_info)
2687 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2688 if compat.any(map(has_es, nodes)):
2689 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2690 " storage is enabled" % (instance.disk_template,
2691 self.op.disk_template))
2692 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2694 def CheckPrereq(self):
2695 """Check prerequisites.
2697 This only checks the instance list against the existing names.
2700 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2701 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2703 cluster = self.cluster = self.cfg.GetClusterInfo()
2704 assert self.instance is not None, \
2705 "Cannot retrieve locked instance %s" % self.op.instance_name
2707 pnode = instance.primary_node
2711 if (self.op.pnode is not None and self.op.pnode != pnode and
2713 # verify that the instance is not up
2714 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2715 instance.hypervisor)
2716 if instance_info.fail_msg:
2717 self.warn.append("Can't get instance runtime information: %s" %
2718 instance_info.fail_msg)
2719 elif instance_info.payload:
2720 raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2723 assert pnode in self.owned_locks(locking.LEVEL_NODE)
2724 nodelist = list(instance.all_nodes)
2725 pnode_info = self.cfg.GetNodeInfo(pnode)
2726 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2728 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2729 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2730 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2732 # dictionary with instance information after the modification
2735 # Check disk modifications. This is done here and not in CheckArguments
2736 # (as with NICs), because we need to know the instance's disk template
2737 if instance.disk_template == constants.DT_EXT:
2738 self._CheckMods("disk", self.op.disks, {},
2739 self._VerifyDiskModification)
2741 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2742 self._VerifyDiskModification)
2744 # Prepare disk/NIC modifications
2745 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2746 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2748 # Check the validity of the `provider' parameter
2749 if instance.disk_template in constants.DT_EXT:
2750 for mod in self.diskmod:
2751 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2752 if mod[0] == constants.DDM_ADD:
2753 if ext_provider is None:
2754 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2755 " '%s' missing, during disk add" %
2757 constants.IDISK_PROVIDER),
2759 elif mod[0] == constants.DDM_MODIFY:
2761 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2763 constants.IDISK_PROVIDER,
2766 for mod in self.diskmod:
2767 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2768 if ext_provider is not None:
2769 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2770 " instances of type '%s'" %
2771 (constants.IDISK_PROVIDER,
2776 result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2778 result.Raise("Hotplug is not supported.")
2781 if self.op.os_name and not self.op.force:
2782 CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2783 self.op.force_variant)
2784 instance_os = self.op.os_name
2786 instance_os = instance.os
2788 assert not (self.op.disk_template and self.op.disks), \
2789 "Can't modify disk template and apply disk changes at the same time"
2791 if self.op.disk_template:
2792 self._PreCheckDiskTemplate(pnode_info)
2794 # hvparams processing
2795 if self.op.hvparams:
2796 hv_type = instance.hypervisor
2797 i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2798 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2799 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2802 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2803 CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2804 self.hv_proposed = self.hv_new = hv_new # the new actual values
2805 self.hv_inst = i_hvdict # the new dict (without defaults)
2807 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2809 self.hv_new = self.hv_inst = {}
2811 # beparams processing
2812 if self.op.beparams:
2813 i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2815 objects.UpgradeBeParams(i_bedict)
2816 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2817 be_new = cluster.SimpleFillBE(i_bedict)
2818 self.be_proposed = self.be_new = be_new # the new actual values
2819 self.be_inst = i_bedict # the new dict (without defaults)
2821 self.be_new = self.be_inst = {}
2822 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2823 be_old = cluster.FillBE(instance)
2825 # CPU param validation -- checking every time a parameter is
2826 # changed to cover all cases where either CPU mask or vcpus have
2828 if (constants.BE_VCPUS in self.be_proposed and
2829 constants.HV_CPU_MASK in self.hv_proposed):
2831 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2832 # Verify mask is consistent with number of vCPUs. Can skip this
2833 # test if only 1 entry in the CPU mask, which means same mask
2834 # is applied to all vCPUs.
2835 if (len(cpu_list) > 1 and
2836 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2837 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2839 (self.be_proposed[constants.BE_VCPUS],
2840 self.hv_proposed[constants.HV_CPU_MASK]),
2843 # Only perform this test if a new CPU mask is given
2844 if constants.HV_CPU_MASK in self.hv_new:
2845 # Calculate the largest CPU number requested
2846 max_requested_cpu = max(map(max, cpu_list))
2847 # Check that all of the instance's nodes have enough physical CPUs to
2848 # satisfy the requested CPU mask
2849 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2850 max_requested_cpu + 1, instance.hypervisor)
2852 # osparams processing
2853 if self.op.osparams:
2854 i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2855 CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2856 self.os_inst = i_osdict # the new dict (without defaults)
2860 #TODO(dynmem): do the appropriate check involving MINMEM
2861 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2862 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2863 mem_check_list = [pnode]
2864 if be_new[constants.BE_AUTO_BALANCE]:
2865 # either we changed auto_balance to yes or it was from before
2866 mem_check_list.extend(instance.secondary_nodes)
2867 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2868 instance.hypervisor)
2869 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2870 [instance.hypervisor], False)
2871 pninfo = nodeinfo[pnode]
2872 msg = pninfo.fail_msg
2874 # Assume the primary node is unreachable and go ahead
2875 self.warn.append("Can't get info from primary node %s: %s" %
2878 (_, _, (pnhvinfo, )) = pninfo.payload
2879 if not isinstance(pnhvinfo.get("memory_free", None), int):
2880 self.warn.append("Node data from primary node %s doesn't contain"
2881 " free memory information" % pnode)
2882 elif instance_info.fail_msg:
2883 self.warn.append("Can't get instance runtime information: %s" %
2884 instance_info.fail_msg)
2886 if instance_info.payload:
2887 current_mem = int(instance_info.payload["memory"])
2889 # Assume instance not running
2890 # (there is a slight race condition here, but it's not very
2891 # probable, and we have no other way to check)
2892 # TODO: Describe race condition
2894 #TODO(dynmem): do the appropriate check involving MINMEM
2895 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2896 pnhvinfo["memory_free"])
2898 raise errors.OpPrereqError("This change will prevent the instance"
2899 " from starting, due to %d MB of memory"
2900 " missing on its primary node" %
2901 miss_mem, errors.ECODE_NORES)
2903 if be_new[constants.BE_AUTO_BALANCE]:
2904 for node, nres in nodeinfo.items():
2905 if node not in instance.secondary_nodes:
2907 nres.Raise("Can't get info from secondary node %s" % node,
2908 prereq=True, ecode=errors.ECODE_STATE)
2909 (_, _, (nhvinfo, )) = nres.payload
2910 if not isinstance(nhvinfo.get("memory_free", None), int):
2911 raise errors.OpPrereqError("Secondary node %s didn't return free"
2912 " memory information" % node,
2914 #TODO(dynmem): do the appropriate check involving MINMEM
2915 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2916 raise errors.OpPrereqError("This change will prevent the instance"
2917 " from failover to its secondary node"
2918 " %s, due to not enough memory" % node,
2921 if self.op.runtime_mem:
2922 remote_info = self.rpc.call_instance_info(instance.primary_node,
2924 instance.hypervisor)
2925 remote_info.Raise("Error checking node %s" % instance.primary_node)
2926 if not remote_info.payload: # not running already
2927 raise errors.OpPrereqError("Instance %s is not running" %
2928 instance.name, errors.ECODE_STATE)
2930 current_memory = remote_info.payload["memory"]
2931 if (not self.op.force and
2932 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2933 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2934 raise errors.OpPrereqError("Instance %s must have memory between %d"
2935 " and %d MB of memory unless --force is"
2938 self.be_proposed[constants.BE_MINMEM],
2939 self.be_proposed[constants.BE_MAXMEM]),
2942 delta = self.op.runtime_mem - current_memory
2944 CheckNodeFreeMemory(self, instance.primary_node,
2945 "ballooning memory for instance %s" %
2946 instance.name, delta, instance.hypervisor)
2948 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2949 raise errors.OpPrereqError("Disk operations not supported for"
2950 " diskless instances", errors.ECODE_INVAL)
2952 def _PrepareNicCreate(_, params, private):
2953 self._PrepareNicModification(params, private, None, None,
2957 def _PrepareNicMod(_, nic, params, private):
2958 self._PrepareNicModification(params, private, nic.ip, nic.network,
2959 nic.nicparams, cluster, pnode)
2962 def _PrepareNicRemove(_, params, __):
2964 net = params.network
2965 if net is not None and ip is not None:
2966 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2968 # Verify NIC changes (operating on copy)
2969 nics = instance.nics[:]
2970 _ApplyContainerMods("NIC", nics, None, self.nicmod,
2971 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2972 if len(nics) > constants.MAX_NICS:
2973 raise errors.OpPrereqError("Instance has too many network interfaces"
2974 " (%d), cannot add more" % constants.MAX_NICS,
2977 def _PrepareDiskMod(_, disk, params, __):
2978 disk.name = params.get(constants.IDISK_NAME, None)
2980 # Verify disk changes (operating on a copy)
2981 disks = copy.deepcopy(instance.disks)
2982 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2983 _PrepareDiskMod, None)
2984 utils.ValidateDeviceNames("disk", disks)
2985 if len(disks) > constants.MAX_DISKS:
2986 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2987 " more" % constants.MAX_DISKS,
2989 disk_sizes = [disk.size for disk in instance.disks]
2990 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2991 self.diskmod if op == constants.DDM_ADD)
2992 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2993 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2995 if self.op.offline is not None and self.op.offline:
2996 CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2997 msg="can't change to offline")
2999 # Pre-compute NIC changes (necessary to use result in hooks)
3000 self._nic_chgdesc = []
3002 # Operate on copies as this is still in prereq
3003 nics = [nic.Copy() for nic in instance.nics]
3004 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3005 self._CreateNewNic, self._ApplyNicMods,
3007 # Verify that NIC names are unique and valid
3008 utils.ValidateDeviceNames("NIC", nics)
3009 self._new_nics = nics
3010 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3012 self._new_nics = None
3013 ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
3015 if not self.op.ignore_ipolicy:
3016 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
3019 # Fill ispec with backend parameters
3020 ispec[constants.ISPEC_SPINDLE_USE] = \
3021 self.be_new.get(constants.BE_SPINDLE_USE, None)
3022 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3025 # Copy ispec to verify parameters with min/max values separately
3026 if self.op.disk_template:
3027 new_disk_template = self.op.disk_template
3029 new_disk_template = instance.disk_template
3030 ispec_max = ispec.copy()
3031 ispec_max[constants.ISPEC_MEM_SIZE] = \
3032 self.be_new.get(constants.BE_MAXMEM, None)
3033 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3035 ispec_min = ispec.copy()
3036 ispec_min[constants.ISPEC_MEM_SIZE] = \
3037 self.be_new.get(constants.BE_MINMEM, None)
3038 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3041 if (res_max or res_min):
3042 # FIXME: Improve error message by including information about whether
3043 # the upper or lower limit of the parameter fails the ipolicy.
3044 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3045 (group_info, group_info.name,
3046 utils.CommaJoin(set(res_max + res_min))))
3047 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3049 def _ConvertPlainToDrbd(self, feedback_fn):
3050 """Converts an instance from plain to drbd.
3053 feedback_fn("Converting template to drbd")
3054 instance = self.instance
3055 pnode = instance.primary_node
3056 snode = self.op.remote_node
3058 assert instance.disk_template == constants.DT_PLAIN
3060 # create a fake disk info for _GenerateDiskTemplate
3061 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3062 constants.IDISK_VG: d.logical_id[0],
3063 constants.IDISK_NAME: d.name}
3064 for d in instance.disks]
3065 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3066 instance.name, pnode, [snode],
3067 disk_info, None, None, 0, feedback_fn,
3069 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3071 p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3072 s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3073 info = GetInstanceInfoText(instance)
3074 feedback_fn("Creating additional volumes...")
3075 # first, create the missing data and meta devices
3076 for disk in anno_disks:
3077 # unfortunately this is... not too nice
3078 CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3079 info, True, p_excl_stor)
3080 for child in disk.children:
3081 CreateSingleBlockDev(self, snode, instance, child, info, True,
3083 # at this stage, all new LVs have been created, we can rename the
3085 feedback_fn("Renaming original volumes...")
3086 rename_list = [(o, n.children[0].logical_id)
3087 for (o, n) in zip(instance.disks, new_disks)]
3088 result = self.rpc.call_blockdev_rename(pnode, rename_list)
3089 result.Raise("Failed to rename original LVs")
3091 feedback_fn("Initializing DRBD devices...")
3092 # all child devices are in place, we can now create the DRBD devices
3094 for disk in anno_disks:
3095 for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3096 f_create = node == pnode
3097 CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3099 except errors.GenericError, e:
3100 feedback_fn("Initializing of DRBD devices failed;"
3101 " renaming back original volumes...")
3102 for disk in new_disks:
3103 self.cfg.SetDiskID(disk, pnode)
3104 rename_back_list = [(n.children[0], o.logical_id)
3105 for (n, o) in zip(new_disks, instance.disks)]
3106 result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3107 result.Raise("Failed to rename LVs back after error %s" % str(e))
3110 # at this point, the instance has been modified
3111 instance.disk_template = constants.DT_DRBD8
3112 instance.disks = new_disks
3113 self.cfg.Update(instance, feedback_fn)
3115 # Release node locks while waiting for sync
3116 ReleaseLocks(self, locking.LEVEL_NODE)
3118 # disks are created, waiting for sync
3119 disk_abort = not WaitForSync(self, instance,
3120 oneshot=not self.op.wait_for_sync)
3122 raise errors.OpExecError("There are some degraded disks for"
3123 " this instance, please cleanup manually")
3125 # Node resource locks will be released by caller
3127 def _ConvertDrbdToPlain(self, feedback_fn):
3128 """Converts an instance from drbd to plain.
3131 instance = self.instance
3133 assert len(instance.secondary_nodes) == 1
3134 assert instance.disk_template == constants.DT_DRBD8
3136 pnode = instance.primary_node
3137 snode = instance.secondary_nodes[0]
3138 feedback_fn("Converting template to plain")
3140 old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3141 new_disks = [d.children[0] for d in instance.disks]
3143 # copy over size, mode and name
3144 for parent, child in zip(old_disks, new_disks):
3145 child.size = parent.size
3146 child.mode = parent.mode
3147 child.name = parent.name
3149 # this is a DRBD disk, return its port to the pool
3150 # NOTE: this must be done right before the call to cfg.Update!
3151 for disk in old_disks:
3152 tcp_port = disk.logical_id[2]
3153 self.cfg.AddTcpUdpPort(tcp_port)
3155 # update instance structure
3156 instance.disks = new_disks
3157 instance.disk_template = constants.DT_PLAIN
3158 _UpdateIvNames(0, instance.disks)
3159 self.cfg.Update(instance, feedback_fn)
3161 # Release locks in case removing disks takes a while
3162 ReleaseLocks(self, locking.LEVEL_NODE)
3164 feedback_fn("Removing volumes on the secondary node...")
3165 for disk in old_disks:
3166 self.cfg.SetDiskID(disk, snode)
3167 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3169 self.LogWarning("Could not remove block device %s on node %s,"
3170 " continuing anyway: %s", disk.iv_name, snode, msg)
3172 feedback_fn("Removing unneeded volumes on the primary node...")
3173 for idx, disk in enumerate(old_disks):
3174 meta = disk.children[1]
3175 self.cfg.SetDiskID(meta, pnode)
3176 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3178 self.LogWarning("Could not remove metadata for disk %d on node %s,"
3179 " continuing anyway: %s", idx, pnode, msg)
3181 def _HotplugDevice(self, action, dev_type, device, extra, seq):
3182 self.LogInfo("Trying to hotplug device...")
3184 result = self.rpc.call_hotplug_device(self.instance.primary_node,
3185 self.instance, action, dev_type,
3186 (device, self.instance),
3189 self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3190 self.LogInfo("Continuing execution..")
3193 self.LogInfo("Hotplug done.")
3197 def _CreateNewDisk(self, idx, params, _):
3198 """Creates a new disk.
3201 instance = self.instance
3204 if instance.disk_template in constants.DTS_FILEBASED:
3205 (file_driver, file_path) = instance.disks[0].logical_id
3206 file_path = os.path.dirname(file_path)
3208 file_driver = file_path = None
3211 GenerateDiskTemplate(self, instance.disk_template, instance.name,
3212 instance.primary_node, instance.secondary_nodes,
3213 [params], file_path, file_driver, idx,
3214 self.Log, self.diskparams)[0]
3216 new_disks = CreateDisks(self, instance, disks=[disk])
3218 if self.cluster.prealloc_wipe_disks:
3220 WipeOrCleanupDisks(self, instance,
3221 disks=[(idx, disk, 0)],
3226 "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3229 self.cfg.SetDiskID(disk, self.instance.primary_node)
3230 result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3231 (disk, self.instance),
3232 self.instance.name, True, idx)
3234 changes.append(("disk/%d" % idx, "assemble:failed"))
3235 self.LogWarning("Can't assemble newly created disk %d: %s",
3236 idx, result.fail_msg)
3238 _, link_name = result.payload
3239 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3240 constants.HOTPLUG_TARGET_DISK,
3241 disk, link_name, idx)
3242 changes.append(("disk/%d" % idx, msg))
3244 return (disk, changes)
3246 def _ModifyDisk(self, idx, disk, params, _):
3251 if constants.IDISK_MODE in params:
3252 disk.mode = params.get(constants.IDISK_MODE)
3253 changes.append(("disk.mode/%d" % idx, disk.mode))
3255 if constants.IDISK_NAME in params:
3256 disk.name = params.get(constants.IDISK_NAME)
3257 changes.append(("disk.name/%d" % idx, disk.name))
3259 # Modify arbitrary params in case instance template is ext
3260 for key, value in params.iteritems():
3261 if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3262 self.instance.disk_template == constants.DT_EXT):
3263 # stolen from GetUpdatedParams: default means reset/delete
3264 if value.lower() == constants.VALUE_DEFAULT:
3266 del disk.params[key]
3270 disk.params[key] = value
3271 changes.append(("disk.params:%s/%d" % (key, idx), value))
3275 def _RemoveDisk(self, idx, root, _):
3281 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3282 constants.HOTPLUG_TARGET_DISK,
3284 ShutdownInstanceDisks(self, self.instance, [root])
3286 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3287 for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3288 self.cfg.SetDiskID(disk, node)
3289 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3291 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3292 " continuing anyway", idx, node, msg)
3294 # if this is a DRBD disk, return its port to the pool
3295 if root.dev_type in constants.LDS_DRBD:
3296 self.cfg.AddTcpUdpPort(root.logical_id[2])
3300 def _CreateNewNic(self, idx, params, private):
3301 """Creates data structure for a new network interface.
3304 mac = params[constants.INIC_MAC]
3305 ip = params.get(constants.INIC_IP, None)
3306 net = params.get(constants.INIC_NETWORK, None)
3307 name = params.get(constants.INIC_NAME, None)
3308 net_uuid = self.cfg.LookupNetwork(net)
3309 #TODO: not private.filled?? can a nic have no nicparams??
3310 nicparams = private.filled
3311 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3312 nicparams=nicparams)
3313 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3317 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3318 (mac, ip, private.filled[constants.NIC_MODE],
3319 private.filled[constants.NIC_LINK], net)),
3323 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3324 constants.HOTPLUG_TARGET_NIC,
3326 changes.append(("nic.%d" % idx, msg))
3328 return (nobj, changes)
3330 def _ApplyNicMods(self, idx, nic, params, private):
3331 """Modifies a network interface.
3336 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3338 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3339 setattr(nic, key, params[key])
3341 new_net = params.get(constants.INIC_NETWORK, nic.network)
3342 new_net_uuid = self.cfg.LookupNetwork(new_net)
3343 if new_net_uuid != nic.network:
3344 changes.append(("nic.network/%d" % idx, new_net))
3345 nic.network = new_net_uuid
3348 nic.nicparams = private.filled
3350 for (key, val) in nic.nicparams.items():
3351 changes.append(("nic.%s/%d" % (key, idx), val))
3354 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3355 constants.HOTPLUG_TARGET_NIC,
3357 changes.append(("nic/%d" % idx, msg))
3361 def _RemoveNic(self, idx, nic, _):
3363 return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3364 constants.HOTPLUG_TARGET_NIC,
3367 def Exec(self, feedback_fn):
3368 """Modifies an instance.
3370 All parameters take effect only at the next restart of the instance.
3373 # Process here the warnings from CheckPrereq, as we don't have a
3374 # feedback_fn there.
3375 # TODO: Replace with self.LogWarning
3376 for warn in self.warn:
3377 feedback_fn("WARNING: %s" % warn)
3379 assert ((self.op.disk_template is None) ^
3380 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3381 "Not owning any node resource locks"
3384 instance = self.instance
3388 instance.primary_node = self.op.pnode
3391 if self.op.runtime_mem:
3392 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3394 self.op.runtime_mem)
3395 rpcres.Raise("Cannot modify instance runtime memory")
3396 result.append(("runtime_memory", self.op.runtime_mem))
3398 # Apply disk changes
3399 _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3400 self._CreateNewDisk, self._ModifyDisk,
3402 _UpdateIvNames(0, instance.disks)
3404 if self.op.disk_template:
3406 check_nodes = set(instance.all_nodes)
3407 if self.op.remote_node:
3408 check_nodes.add(self.op.remote_node)
3409 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3410 owned = self.owned_locks(level)
3411 assert not (check_nodes - owned), \
3412 ("Not owning the correct locks, owning %r, expected at least %r" %
3413 (owned, check_nodes))
3415 r_shut = ShutdownInstanceDisks(self, instance)
3417 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3418 " proceed with disk template conversion")
3419 mode = (instance.disk_template, self.op.disk_template)
3421 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3423 self.cfg.ReleaseDRBDMinors(instance.name)
3425 result.append(("disk_template", self.op.disk_template))
3427 assert instance.disk_template == self.op.disk_template, \
3428 ("Expected disk template '%s', found '%s'" %
3429 (self.op.disk_template, instance.disk_template))
3431 # Release node and resource locks if there are any (they might already have
3432 # been released during disk conversion)
3433 ReleaseLocks(self, locking.LEVEL_NODE)
3434 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3437 if self._new_nics is not None:
3438 instance.nics = self._new_nics
3439 result.extend(self._nic_chgdesc)
3442 if self.op.hvparams:
3443 instance.hvparams = self.hv_inst
3444 for key, val in self.op.hvparams.iteritems():
3445 result.append(("hv/%s" % key, val))
3448 if self.op.beparams:
3449 instance.beparams = self.be_inst
3450 for key, val in self.op.beparams.iteritems():
3451 result.append(("be/%s" % key, val))
3455 instance.os = self.op.os_name
3458 if self.op.osparams:
3459 instance.osparams = self.os_inst
3460 for key, val in self.op.osparams.iteritems():
3461 result.append(("os/%s" % key, val))
3463 if self.op.offline is None:
3466 elif self.op.offline:
3467 # Mark instance as offline
3468 self.cfg.MarkInstanceOffline(instance.name)
3469 result.append(("admin_state", constants.ADMINST_OFFLINE))
3471 # Mark instance as online, but stopped
3472 self.cfg.MarkInstanceDown(instance.name)
3473 result.append(("admin_state", constants.ADMINST_DOWN))
3475 self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3477 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3478 self.owned_locks(locking.LEVEL_NODE)), \
3479 "All node locks should have been released by now"
3483 _DISK_CONVERSIONS = {
3484 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3485 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3489 class LUInstanceChangeGroup(LogicalUnit):
3490 HPATH = "instance-change-group"
3491 HTYPE = constants.HTYPE_INSTANCE
3494 def ExpandNames(self):
3495 self.share_locks = ShareAll()
3497 self.needed_locks = {
3498 locking.LEVEL_NODEGROUP: [],
3499 locking.LEVEL_NODE: [],
3500 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3503 self._ExpandAndLockInstance()
3505 if self.op.target_groups:
3506 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3507 self.op.target_groups)
3509 self.req_target_uuids = None
3511 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3513 def DeclareLocks(self, level):
3514 if level == locking.LEVEL_NODEGROUP:
3515 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3517 if self.req_target_uuids:
3518 lock_groups = set(self.req_target_uuids)
3520 # Lock all groups used by instance optimistically; this requires going
3521 # via the node before it's locked, requiring verification later on
3522 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3523 lock_groups.update(instance_groups)
3525 # No target groups, need to lock all of them
3526 lock_groups = locking.ALL_SET
3528 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3530 elif level == locking.LEVEL_NODE:
3531 if self.req_target_uuids:
3532 # Lock all nodes used by instances
3533 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3534 self._LockInstancesNodes()
3536 # Lock all nodes in all potential target groups
3537 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3538 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3539 member_nodes = [node_name
3540 for group in lock_groups
3541 for node_name in self.cfg.GetNodeGroup(group).members]
3542 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3544 # Lock all nodes as all groups are potential targets
3545 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3547 def CheckPrereq(self):
3548 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3549 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3550 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3552 assert (self.req_target_uuids is None or
3553 owned_groups.issuperset(self.req_target_uuids))
3554 assert owned_instances == set([self.op.instance_name])
3556 # Get instance information
3557 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3559 # Check if node groups for locked instance are still correct
3560 assert owned_nodes.issuperset(self.instance.all_nodes), \
3561 ("Instance %s's nodes changed while we kept the lock" %
3562 self.op.instance_name)
3564 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3567 if self.req_target_uuids:
3568 # User requested specific target groups
3569 self.target_uuids = frozenset(self.req_target_uuids)
3571 # All groups except those used by the instance are potential targets
3572 self.target_uuids = owned_groups - inst_groups
3574 conflicting_groups = self.target_uuids & inst_groups
3575 if conflicting_groups:
3576 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3577 " used by the instance '%s'" %
3578 (utils.CommaJoin(conflicting_groups),
3579 self.op.instance_name),
3582 if not self.target_uuids:
3583 raise errors.OpPrereqError("There are no possible target groups",
3586 def BuildHooksEnv(self):
3590 assert self.target_uuids
3593 "TARGET_GROUPS": " ".join(self.target_uuids),
3596 env.update(BuildInstanceHookEnvByObject(self, self.instance))
3600 def BuildHooksNodes(self):
3601 """Build hooks nodes.
3604 mn = self.cfg.GetMasterNode()
3607 def Exec(self, feedback_fn):
3608 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3610 assert instances == [self.op.instance_name], "Instance not locked"
3612 req = iallocator.IAReqGroupChange(instances=instances,
3613 target_groups=list(self.target_uuids))
3614 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3616 ial.Run(self.op.iallocator)
3619 raise errors.OpPrereqError("Can't compute solution for changing group of"
3620 " instance '%s' using iallocator '%s': %s" %
3621 (self.op.instance_name, self.op.iallocator,
3622 ial.info), errors.ECODE_NORES)
3624 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3626 self.LogInfo("Iallocator returned %s job(s) for changing group of"
3627 " instance '%s'", len(jobs), self.op.instance_name)
3629 return ResultWithJobs(jobs)