4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Logical units dealing with instances."""
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65 import ganeti.masterd.instance
68 #: Type description for changes as returned by L{_ApplyContainerMods}'s
70 _TApplyContModsCbChanges = \
71 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
77 def _CheckHostnameSane(lu, name):
78 """Ensures that a given hostname resolves to a 'sane' name.
80 The given name is required to be a prefix of the resolved hostname,
81 to prevent accidental mismatches.
83 @param lu: the logical unit on behalf of which we're checking
84 @param name: the name we should resolve and check
85 @return: the resolved hostname object
88 hostname = netutils.GetHostname(name=name)
89 if hostname.name != name:
90 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91 if not utils.MatchNameComponent(name, [hostname.name]):
92 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93 " same as given hostname '%s'") %
94 (hostname.name, name), errors.ECODE_INVAL)
98 def _CheckOpportunisticLocking(op):
99 """Generate error if opportunistic locking is not possible.
102 if op.opportunistic_locking and not op.iallocator:
103 raise errors.OpPrereqError("Opportunistic locking is only available in"
104 " combination with an instance allocator",
108 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109 """Wrapper around IAReqInstanceAlloc.
111 @param op: The instance opcode
112 @param disks: The computed disks
113 @param nics: The computed nics
114 @param beparams: The full filled beparams
115 @param node_whitelist: List of nodes which should appear as online to the
116 allocator (unless the node is already marked offline)
118 @returns: A filled L{iallocator.IAReqInstanceAlloc}
121 spindle_use = beparams[constants.BE_SPINDLE_USE]
122 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123 disk_template=op.disk_template,
126 vcpus=beparams[constants.BE_VCPUS],
127 memory=beparams[constants.BE_MAXMEM],
128 spindle_use=spindle_use,
130 nics=[n.ToDict() for n in nics],
131 hypervisor=op.hypervisor,
132 node_whitelist=node_whitelist)
135 def _ComputeFullBeParams(op, cluster):
136 """Computes the full beparams.
138 @param op: The instance opcode
139 @param cluster: The cluster config object
141 @return: The fully filled beparams
144 default_beparams = cluster.beparams[constants.PP_DEFAULT]
145 for param, value in op.beparams.iteritems():
146 if value == constants.VALUE_AUTO:
147 op.beparams[param] = default_beparams[param]
148 objects.UpgradeBeParams(op.beparams)
149 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150 return cluster.SimpleFillBE(op.beparams)
153 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154 """Computes the nics.
156 @param op: The instance opcode
157 @param cluster: Cluster configuration object
158 @param default_ip: The default ip to assign
159 @param cfg: An instance of the configuration object
160 @param ec_id: Execution context ID
162 @returns: The build up nics
167 nic_mode_req = nic.get(constants.INIC_MODE, None)
168 nic_mode = nic_mode_req
169 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172 net = nic.get(constants.INIC_NETWORK, None)
173 link = nic.get(constants.NIC_LINK, None)
174 ip = nic.get(constants.INIC_IP, None)
176 if net is None or net.lower() == constants.VALUE_NONE:
179 if nic_mode_req is not None or link is not None:
180 raise errors.OpPrereqError("If network is given, no mode or link"
181 " is allowed to be passed",
185 if ip is None or ip.lower() == constants.VALUE_NONE:
187 elif ip.lower() == constants.VALUE_AUTO:
188 if not op.name_check:
189 raise errors.OpPrereqError("IP address set to auto but name checks"
190 " have been skipped",
194 # We defer pool operations until later, so that the iallocator has
195 # filled in the instance's node(s) dimara
196 if ip.lower() == constants.NIC_IP_POOL:
198 raise errors.OpPrereqError("if ip=pool, parameter network"
199 " must be passed too",
202 elif not netutils.IPAddress.IsValid(ip):
203 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
208 # TODO: check the ip address for uniqueness
209 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210 raise errors.OpPrereqError("Routed nic mode requires an ip address",
213 # MAC address verification
214 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216 mac = utils.NormalizeAndValidateMac(mac)
219 # TODO: We need to factor this out
220 cfg.ReserveMAC(mac, ec_id)
221 except errors.ReservationError:
222 raise errors.OpPrereqError("MAC address %s already in use"
224 errors.ECODE_NOTUNIQUE)
226 # Build nic parameters
229 nicparams[constants.NIC_MODE] = nic_mode
231 nicparams[constants.NIC_LINK] = link
233 check_params = cluster.SimpleFillNIC(nicparams)
234 objects.NIC.CheckParameterSyntax(check_params)
235 net_uuid = cfg.LookupNetwork(net)
236 name = nic.get(constants.INIC_NAME, None)
237 if name is not None and name.lower() == constants.VALUE_NONE:
239 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240 network=net_uuid, nicparams=nicparams)
241 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
247 def _CheckForConflictingIp(lu, ip, node):
248 """In case of conflicting IP address raise error.
251 @param ip: IP address
253 @param node: node name
256 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257 if conf_net is not None:
258 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259 " network %s, but the target NIC does not." %
266 def _ComputeIPolicyInstanceSpecViolation(
267 ipolicy, instance_spec, disk_template,
268 _compute_fn=ComputeIPolicySpecViolation):
269 """Compute if instance specs meets the specs of ipolicy.
272 @param ipolicy: The ipolicy to verify against
273 @param instance_spec: dict
274 @param instance_spec: The instance spec to verify
275 @type disk_template: string
276 @param disk_template: the disk template of the instance
277 @param _compute_fn: The function to verify ipolicy (unittest only)
278 @see: L{ComputeIPolicySpecViolation}
281 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
288 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289 disk_sizes, spindle_use, disk_template)
292 def _CheckOSVariant(os_obj, name):
293 """Check whether an OS name conforms to the os variants specification.
295 @type os_obj: L{objects.OS}
296 @param os_obj: OS object to check
298 @param name: OS name passed by the user, to check for validity
301 variant = objects.OS.GetVariant(name)
302 if not os_obj.supported_variants:
304 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305 " passed)" % (os_obj.name, variant),
309 raise errors.OpPrereqError("OS name must include a variant",
312 if variant not in os_obj.supported_variants:
313 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
316 class LUInstanceCreate(LogicalUnit):
317 """Create an instance.
320 HPATH = "instance-add"
321 HTYPE = constants.HTYPE_INSTANCE
324 def CheckArguments(self):
328 # do not require name_check to ease forward/backward compatibility
330 if self.op.no_install and self.op.start:
331 self.LogInfo("No-installation mode selected, disabling startup")
332 self.op.start = False
333 # validate/normalize the instance name
334 self.op.instance_name = \
335 netutils.Hostname.GetNormalizedName(self.op.instance_name)
337 if self.op.ip_check and not self.op.name_check:
338 # TODO: make the ip check more flexible and not depend on the name check
339 raise errors.OpPrereqError("Cannot do IP address check without a name"
340 " check", errors.ECODE_INVAL)
342 # check nics' parameter names
343 for nic in self.op.nics:
344 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345 # check that NIC's parameters names are unique and valid
346 utils.ValidateDeviceNames("NIC", self.op.nics)
348 # check that disk's names are unique and valid
349 utils.ValidateDeviceNames("disk", self.op.disks)
351 cluster = self.cfg.GetClusterInfo()
352 if not self.op.disk_template in cluster.enabled_disk_templates:
353 raise errors.OpPrereqError("Cannot create an instance with disk template"
354 " '%s', because it is not enabled in the"
355 " cluster. Enabled disk templates are: %s." %
356 (self.op.disk_template,
357 ",".join(cluster.enabled_disk_templates)))
359 # check disks. parameter names and consistent adopt/no-adopt strategy
360 has_adopt = has_no_adopt = False
361 for disk in self.op.disks:
362 if self.op.disk_template != constants.DT_EXT:
363 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364 if constants.IDISK_ADOPT in disk:
368 if has_adopt and has_no_adopt:
369 raise errors.OpPrereqError("Either all disks are adopted or none is",
372 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373 raise errors.OpPrereqError("Disk adoption is not supported for the"
374 " '%s' disk template" %
375 self.op.disk_template,
377 if self.op.iallocator is not None:
378 raise errors.OpPrereqError("Disk adoption not allowed with an"
379 " iallocator script", errors.ECODE_INVAL)
380 if self.op.mode == constants.INSTANCE_IMPORT:
381 raise errors.OpPrereqError("Disk adoption not allowed for"
382 " instance import", errors.ECODE_INVAL)
384 if self.op.disk_template in constants.DTS_MUST_ADOPT:
385 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386 " but no 'adopt' parameter given" %
387 self.op.disk_template,
390 self.adopt_disks = has_adopt
392 # instance name verification
393 if self.op.name_check:
394 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395 self.op.instance_name = self.hostname1.name
396 # used in CheckPrereq for ip ping check
397 self.check_ip = self.hostname1.ip
401 # file storage checks
402 if (self.op.file_driver and
403 not self.op.file_driver in constants.FILE_DRIVER):
404 raise errors.OpPrereqError("Invalid file driver name '%s'" %
405 self.op.file_driver, errors.ECODE_INVAL)
407 # set default file_driver if unset and required
408 if (not self.op.file_driver and
409 self.op.disk_template in [constants.DT_FILE,
410 constants.DT_SHARED_FILE]):
411 self.op.file_driver = constants.FD_DEFAULT
413 if self.op.disk_template == constants.DT_FILE:
414 opcodes.RequireFileStorage()
415 elif self.op.disk_template == constants.DT_SHARED_FILE:
416 opcodes.RequireSharedFileStorage()
418 ### Node/iallocator related checks
419 CheckIAllocatorOrNode(self, "iallocator", "pnode")
421 if self.op.pnode is not None:
422 if self.op.disk_template in constants.DTS_INT_MIRROR:
423 if self.op.snode is None:
424 raise errors.OpPrereqError("The networked disk templates need"
425 " a mirror node", errors.ECODE_INVAL)
427 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
431 _CheckOpportunisticLocking(self.op)
433 self._cds = GetClusterDomainSecret()
435 if self.op.mode == constants.INSTANCE_IMPORT:
436 # On import force_variant must be True, because if we forced it at
437 # initial install, our only chance when importing it back is that it
439 self.op.force_variant = True
441 if self.op.no_install:
442 self.LogInfo("No-installation mode has no effect during import")
444 elif self.op.mode == constants.INSTANCE_CREATE:
445 if self.op.os_type is None:
446 raise errors.OpPrereqError("No guest OS specified",
448 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
449 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
450 " installation" % self.op.os_type,
452 if self.op.disk_template is None:
453 raise errors.OpPrereqError("No disk template specified",
456 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
457 # Check handshake to ensure both clusters have the same domain secret
458 src_handshake = self.op.source_handshake
459 if not src_handshake:
460 raise errors.OpPrereqError("Missing source handshake",
463 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
466 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
469 # Load and check source CA
470 self.source_x509_ca_pem = self.op.source_x509_ca
471 if not self.source_x509_ca_pem:
472 raise errors.OpPrereqError("Missing source X509 CA",
476 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
478 except OpenSSL.crypto.Error, err:
479 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
480 (err, ), errors.ECODE_INVAL)
482 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
483 if errcode is not None:
484 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
487 self.source_x509_ca = cert
489 src_instance_name = self.op.source_instance_name
490 if not src_instance_name:
491 raise errors.OpPrereqError("Missing source instance name",
494 self.source_instance_name = \
495 netutils.GetHostname(name=src_instance_name).name
498 raise errors.OpPrereqError("Invalid instance creation mode %r" %
499 self.op.mode, errors.ECODE_INVAL)
501 def ExpandNames(self):
502 """ExpandNames for CreateInstance.
504 Figure out the right locks for instance creation.
507 self.needed_locks = {}
509 instance_name = self.op.instance_name
510 # this is just a preventive check, but someone might still add this
511 # instance in the meantime, and creation will fail at lock-add time
512 if instance_name in self.cfg.GetInstanceList():
513 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
514 instance_name, errors.ECODE_EXISTS)
516 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
518 if self.op.iallocator:
519 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
520 # specifying a group on instance creation and then selecting nodes from
522 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
523 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
525 if self.op.opportunistic_locking:
526 self.opportunistic_locks[locking.LEVEL_NODE] = True
528 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
529 nodelist = [self.op.pnode]
530 if self.op.snode is not None:
531 self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
532 nodelist.append(self.op.snode)
533 self.needed_locks[locking.LEVEL_NODE] = nodelist
535 # in case of import lock the source node too
536 if self.op.mode == constants.INSTANCE_IMPORT:
537 src_node = self.op.src_node
538 src_path = self.op.src_path
541 self.op.src_path = src_path = self.op.instance_name
544 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
545 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
546 self.op.src_node = None
547 if os.path.isabs(src_path):
548 raise errors.OpPrereqError("Importing an instance from a path"
549 " requires a source node option",
552 self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
553 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
554 self.needed_locks[locking.LEVEL_NODE].append(src_node)
555 if not os.path.isabs(src_path):
556 self.op.src_path = src_path = \
557 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
559 self.needed_locks[locking.LEVEL_NODE_RES] = \
560 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
562 # Optimistically acquire shared group locks (we're reading the
563 # configuration). We can't just call GetInstanceNodeGroups, because the
564 # instance doesn't exist yet. Therefore we lock all node groups of all
566 if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
567 # In the case we lock all nodes for opportunistic allocation, we have no
568 # choice than to lock all groups, because they're allocated before nodes.
569 # This is sad, but true. At least we release all those we don't need in
571 self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
573 self.needed_locks[locking.LEVEL_NODEGROUP] = \
574 list(self.cfg.GetNodeGroupsFromNodes(
575 self.needed_locks[locking.LEVEL_NODE]))
576 self.share_locks[locking.LEVEL_NODEGROUP] = 1
578 def DeclareLocks(self, level):
579 if level == locking.LEVEL_NODE_RES and \
580 self.opportunistic_locks[locking.LEVEL_NODE]:
581 # Even when using opportunistic locking, we require the same set of
582 # NODE_RES locks as we got NODE locks
583 self.needed_locks[locking.LEVEL_NODE_RES] = \
584 self.owned_locks(locking.LEVEL_NODE)
586 def _RunAllocator(self):
587 """Run the allocator based on input opcode.
590 if self.op.opportunistic_locking:
591 # Only consider nodes for which a lock is held
592 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
594 node_whitelist = None
596 #TODO Export network to iallocator so that it chooses a pnode
597 # in a nodegroup that has the desired network connected to
598 req = _CreateInstanceAllocRequest(self.op, self.disks,
599 self.nics, self.be_full,
601 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
603 ial.Run(self.op.iallocator)
606 # When opportunistic locks are used only a temporary failure is generated
607 if self.op.opportunistic_locking:
608 ecode = errors.ECODE_TEMP_NORES
610 ecode = errors.ECODE_NORES
612 raise errors.OpPrereqError("Can't compute nodes using"
613 " iallocator '%s': %s" %
614 (self.op.iallocator, ial.info),
617 self.op.pnode = ial.result[0]
618 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
619 self.op.instance_name, self.op.iallocator,
620 utils.CommaJoin(ial.result))
622 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
624 if req.RequiredNodes() == 2:
625 self.op.snode = ial.result[1]
627 def BuildHooksEnv(self):
630 This runs on master, primary and secondary nodes of the instance.
634 "ADD_MODE": self.op.mode,
636 if self.op.mode == constants.INSTANCE_IMPORT:
637 env["SRC_NODE"] = self.op.src_node
638 env["SRC_PATH"] = self.op.src_path
639 env["SRC_IMAGES"] = self.src_images
641 env.update(BuildInstanceHookEnv(
642 name=self.op.instance_name,
643 primary_node=self.op.pnode,
644 secondary_nodes=self.secondaries,
645 status=self.op.start,
646 os_type=self.op.os_type,
647 minmem=self.be_full[constants.BE_MINMEM],
648 maxmem=self.be_full[constants.BE_MAXMEM],
649 vcpus=self.be_full[constants.BE_VCPUS],
650 nics=NICListToTuple(self, self.nics),
651 disk_template=self.op.disk_template,
652 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
653 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
654 for d in self.disks],
657 hypervisor_name=self.op.hypervisor,
663 def BuildHooksNodes(self):
664 """Build hooks nodes.
667 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
670 def _ReadExportInfo(self):
671 """Reads the export information from disk.
673 It will override the opcode source node and path with the actual
674 information, if these two were not specified before.
676 @return: the export information
679 assert self.op.mode == constants.INSTANCE_IMPORT
681 src_node = self.op.src_node
682 src_path = self.op.src_path
685 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
686 exp_list = self.rpc.call_export_list(locked_nodes)
688 for node in exp_list:
689 if exp_list[node].fail_msg:
691 if src_path in exp_list[node].payload:
693 self.op.src_node = src_node = node
694 self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
698 raise errors.OpPrereqError("No export found for relative path %s" %
699 src_path, errors.ECODE_INVAL)
701 CheckNodeOnline(self, src_node)
702 result = self.rpc.call_export_info(src_node, src_path)
703 result.Raise("No export or invalid export found in dir %s" % src_path)
705 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
706 if not export_info.has_section(constants.INISECT_EXP):
707 raise errors.ProgrammerError("Corrupted export config",
708 errors.ECODE_ENVIRON)
710 ei_version = export_info.get(constants.INISECT_EXP, "version")
711 if (int(ei_version) != constants.EXPORT_VERSION):
712 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
713 (ei_version, constants.EXPORT_VERSION),
714 errors.ECODE_ENVIRON)
717 def _ReadExportParams(self, einfo):
718 """Use export parameters as defaults.
720 In case the opcode doesn't specify (as in override) some instance
721 parameters, then try to use them from the export information, if
725 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
727 if self.op.disk_template is None:
728 if einfo.has_option(constants.INISECT_INS, "disk_template"):
729 self.op.disk_template = einfo.get(constants.INISECT_INS,
731 if self.op.disk_template not in constants.DISK_TEMPLATES:
732 raise errors.OpPrereqError("Disk template specified in configuration"
733 " file is not one of the allowed values:"
735 " ".join(constants.DISK_TEMPLATES),
738 raise errors.OpPrereqError("No disk template specified and the export"
739 " is missing the disk_template information",
742 if not self.op.disks:
744 # TODO: import the disk iv_name too
745 for idx in range(constants.MAX_DISKS):
746 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
747 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
748 disks.append({constants.IDISK_SIZE: disk_sz})
749 self.op.disks = disks
750 if not disks and self.op.disk_template != constants.DT_DISKLESS:
751 raise errors.OpPrereqError("No disk info specified and the export"
752 " is missing the disk information",
757 for idx in range(constants.MAX_NICS):
758 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
760 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
761 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
768 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
769 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
771 if (self.op.hypervisor is None and
772 einfo.has_option(constants.INISECT_INS, "hypervisor")):
773 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
775 if einfo.has_section(constants.INISECT_HYP):
776 # use the export parameters but do not override the ones
777 # specified by the user
778 for name, value in einfo.items(constants.INISECT_HYP):
779 if name not in self.op.hvparams:
780 self.op.hvparams[name] = value
782 if einfo.has_section(constants.INISECT_BEP):
783 # use the parameters, without overriding
784 for name, value in einfo.items(constants.INISECT_BEP):
785 if name not in self.op.beparams:
786 self.op.beparams[name] = value
787 # Compatibility for the old "memory" be param
788 if name == constants.BE_MEMORY:
789 if constants.BE_MAXMEM not in self.op.beparams:
790 self.op.beparams[constants.BE_MAXMEM] = value
791 if constants.BE_MINMEM not in self.op.beparams:
792 self.op.beparams[constants.BE_MINMEM] = value
794 # try to read the parameters old style, from the main section
795 for name in constants.BES_PARAMETERS:
796 if (name not in self.op.beparams and
797 einfo.has_option(constants.INISECT_INS, name)):
798 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
800 if einfo.has_section(constants.INISECT_OSP):
801 # use the parameters, without overriding
802 for name, value in einfo.items(constants.INISECT_OSP):
803 if name not in self.op.osparams:
804 self.op.osparams[name] = value
806 def _RevertToDefaults(self, cluster):
807 """Revert the instance parameters to the default values.
811 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
812 for name in self.op.hvparams.keys():
813 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
814 del self.op.hvparams[name]
816 be_defs = cluster.SimpleFillBE({})
817 for name in self.op.beparams.keys():
818 if name in be_defs and be_defs[name] == self.op.beparams[name]:
819 del self.op.beparams[name]
821 nic_defs = cluster.SimpleFillNIC({})
822 for nic in self.op.nics:
823 for name in constants.NICS_PARAMETERS:
824 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
827 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
828 for name in self.op.osparams.keys():
829 if name in os_defs and os_defs[name] == self.op.osparams[name]:
830 del self.op.osparams[name]
832 def _CalculateFileStorageDir(self):
833 """Calculate final instance file storage dir.
836 # file storage dir calculation/check
837 self.instance_file_storage_dir = None
838 if self.op.disk_template in constants.DTS_FILEBASED:
839 # build the full file storage dir path
842 if self.op.disk_template == constants.DT_SHARED_FILE:
843 get_fsd_fn = self.cfg.GetSharedFileStorageDir
845 get_fsd_fn = self.cfg.GetFileStorageDir
847 cfg_storagedir = get_fsd_fn()
848 if not cfg_storagedir:
849 raise errors.OpPrereqError("Cluster file storage dir not defined",
851 joinargs.append(cfg_storagedir)
853 if self.op.file_storage_dir is not None:
854 joinargs.append(self.op.file_storage_dir)
856 joinargs.append(self.op.instance_name)
858 # pylint: disable=W0142
859 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
861 def CheckPrereq(self): # pylint: disable=R0914
862 """Check prerequisites.
865 # Check that the optimistically acquired groups are correct wrt the
867 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
868 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
869 cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
870 if not owned_groups.issuperset(cur_groups):
871 raise errors.OpPrereqError("New instance %s's node groups changed since"
872 " locks were acquired, current groups are"
873 " are '%s', owning groups '%s'; retry the"
875 (self.op.instance_name,
876 utils.CommaJoin(cur_groups),
877 utils.CommaJoin(owned_groups)),
880 self._CalculateFileStorageDir()
882 if self.op.mode == constants.INSTANCE_IMPORT:
883 export_info = self._ReadExportInfo()
884 self._ReadExportParams(export_info)
885 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
887 self._old_instance_name = None
889 if (not self.cfg.GetVGName() and
890 self.op.disk_template not in constants.DTS_NOT_LVM):
891 raise errors.OpPrereqError("Cluster does not support lvm-based"
892 " instances", errors.ECODE_STATE)
894 if (self.op.hypervisor is None or
895 self.op.hypervisor == constants.VALUE_AUTO):
896 self.op.hypervisor = self.cfg.GetHypervisorType()
898 cluster = self.cfg.GetClusterInfo()
899 enabled_hvs = cluster.enabled_hypervisors
900 if self.op.hypervisor not in enabled_hvs:
901 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
903 (self.op.hypervisor, ",".join(enabled_hvs)),
907 for tag in self.op.tags:
908 objects.TaggableObject.ValidateTag(tag)
910 # check hypervisor parameter syntax (locally)
911 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
912 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
914 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
915 hv_type.CheckParameterSyntax(filled_hvp)
916 self.hv_full = filled_hvp
917 # check that we don't specify global parameters on an instance
918 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
919 "instance", "cluster")
921 # fill and remember the beparams dict
922 self.be_full = _ComputeFullBeParams(self.op, cluster)
924 # build os parameters
925 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
927 # now that hvp/bep are in final format, let's reset to defaults,
929 if self.op.identify_defaults:
930 self._RevertToDefaults(cluster)
933 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
936 # disk checks/pre-build
937 default_vg = self.cfg.GetVGName()
938 self.disks = ComputeDisks(self.op, default_vg)
940 if self.op.mode == constants.INSTANCE_IMPORT:
942 for idx in range(len(self.disks)):
943 option = "disk%d_dump" % idx
944 if export_info.has_option(constants.INISECT_INS, option):
945 # FIXME: are the old os-es, disk sizes, etc. useful?
946 export_name = export_info.get(constants.INISECT_INS, option)
947 image = utils.PathJoin(self.op.src_path, export_name)
948 disk_images.append(image)
950 disk_images.append(False)
952 self.src_images = disk_images
954 if self.op.instance_name == self._old_instance_name:
955 for idx, nic in enumerate(self.nics):
956 if nic.mac == constants.VALUE_AUTO:
957 nic_mac_ini = "nic%d_mac" % idx
958 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
960 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
962 # ip ping checks (we use the same ip that was resolved in ExpandNames)
964 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
965 raise errors.OpPrereqError("IP %s of instance %s already in use" %
966 (self.check_ip, self.op.instance_name),
967 errors.ECODE_NOTUNIQUE)
969 #### mac address generation
970 # By generating here the mac address both the allocator and the hooks get
971 # the real final mac address rather than the 'auto' or 'generate' value.
972 # There is a race condition between the generation and the instance object
973 # creation, which means that we know the mac is valid now, but we're not
974 # sure it will be when we actually add the instance. If things go bad
975 # adding the instance will abort because of a duplicate mac, and the
976 # creation job will fail.
977 for nic in self.nics:
978 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
979 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
983 if self.op.iallocator is not None:
986 # Release all unneeded node locks
987 keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
988 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
989 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
990 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
991 # Release all unneeded group locks
992 ReleaseLocks(self, locking.LEVEL_NODEGROUP,
993 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
995 assert (self.owned_locks(locking.LEVEL_NODE) ==
996 self.owned_locks(locking.LEVEL_NODE_RES)), \
997 "Node locks differ from node resource locks"
999 #### node related checks
1001 # check primary node
1002 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
1003 assert self.pnode is not None, \
1004 "Cannot retrieve locked node %s" % self.op.pnode
1006 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1007 pnode.name, errors.ECODE_STATE)
1009 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1010 pnode.name, errors.ECODE_STATE)
1011 if not pnode.vm_capable:
1012 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1013 " '%s'" % pnode.name, errors.ECODE_STATE)
1015 self.secondaries = []
1017 # Fill in any IPs from IP pools. This must happen here, because we need to
1018 # know the nic's primary node, as specified by the iallocator
1019 for idx, nic in enumerate(self.nics):
1020 net_uuid = nic.network
1021 if net_uuid is not None:
1022 nobj = self.cfg.GetNetwork(net_uuid)
1023 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
1024 if netparams is None:
1025 raise errors.OpPrereqError("No netparams found for network"
1026 " %s. Propably not connected to"
1027 " node's %s nodegroup" %
1028 (nobj.name, self.pnode.name),
1030 self.LogInfo("NIC/%d inherits netparams %s" %
1031 (idx, netparams.values()))
1032 nic.nicparams = dict(netparams)
1033 if nic.ip is not None:
1034 if nic.ip.lower() == constants.NIC_IP_POOL:
1036 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1037 except errors.ReservationError:
1038 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1039 " from the address pool" % idx,
1041 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1044 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1045 except errors.ReservationError:
1046 raise errors.OpPrereqError("IP address %s already in use"
1047 " or does not belong to network %s" %
1048 (nic.ip, nobj.name),
1049 errors.ECODE_NOTUNIQUE)
1051 # net is None, ip None or given
1052 elif self.op.conflicts_check:
1053 _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1055 # mirror node verification
1056 if self.op.disk_template in constants.DTS_INT_MIRROR:
1057 if self.op.snode == pnode.name:
1058 raise errors.OpPrereqError("The secondary node cannot be the"
1059 " primary node", errors.ECODE_INVAL)
1060 CheckNodeOnline(self, self.op.snode)
1061 CheckNodeNotDrained(self, self.op.snode)
1062 CheckNodeVmCapable(self, self.op.snode)
1063 self.secondaries.append(self.op.snode)
1065 snode = self.cfg.GetNodeInfo(self.op.snode)
1066 if pnode.group != snode.group:
1067 self.LogWarning("The primary and secondary nodes are in two"
1068 " different node groups; the disk parameters"
1069 " from the first disk's node group will be"
1072 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1074 if self.op.disk_template in constants.DTS_INT_MIRROR:
1076 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1077 if compat.any(map(has_es, nodes)):
1078 raise errors.OpPrereqError("Disk template %s not supported with"
1079 " exclusive storage" % self.op.disk_template,
1082 nodenames = [pnode.name] + self.secondaries
1084 if not self.adopt_disks:
1085 if self.op.disk_template == constants.DT_RBD:
1086 # _CheckRADOSFreeSpace() is just a placeholder.
1087 # Any function that checks prerequisites can be placed here.
1088 # Check if there is enough space on the RADOS cluster.
1089 CheckRADOSFreeSpace()
1090 elif self.op.disk_template == constants.DT_EXT:
1091 # FIXME: Function that checks prereqs if needed
1094 # Check lv size requirements, if not adopting
1095 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1096 CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1098 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1099 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1100 disk[constants.IDISK_ADOPT])
1101 for disk in self.disks])
1102 if len(all_lvs) != len(self.disks):
1103 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1105 for lv_name in all_lvs:
1107 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1108 # to ReserveLV uses the same syntax
1109 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1110 except errors.ReservationError:
1111 raise errors.OpPrereqError("LV named %s used by another instance" %
1112 lv_name, errors.ECODE_NOTUNIQUE)
1114 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1115 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1117 node_lvs = self.rpc.call_lv_list([pnode.name],
1118 vg_names.payload.keys())[pnode.name]
1119 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1120 node_lvs = node_lvs.payload
1122 delta = all_lvs.difference(node_lvs.keys())
1124 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1125 utils.CommaJoin(delta),
1127 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1129 raise errors.OpPrereqError("Online logical volumes found, cannot"
1130 " adopt: %s" % utils.CommaJoin(online_lvs),
1132 # update the size of disk based on what is found
1133 for dsk in self.disks:
1134 dsk[constants.IDISK_SIZE] = \
1135 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1136 dsk[constants.IDISK_ADOPT])][0]))
1138 elif self.op.disk_template == constants.DT_BLOCK:
1139 # Normalize and de-duplicate device paths
1140 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1141 for disk in self.disks])
1142 if len(all_disks) != len(self.disks):
1143 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1145 baddisks = [d for d in all_disks
1146 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1148 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1149 " cannot be adopted" %
1150 (utils.CommaJoin(baddisks),
1151 constants.ADOPTABLE_BLOCKDEV_ROOT),
1154 node_disks = self.rpc.call_bdev_sizes([pnode.name],
1155 list(all_disks))[pnode.name]
1156 node_disks.Raise("Cannot get block device information from node %s" %
1158 node_disks = node_disks.payload
1159 delta = all_disks.difference(node_disks.keys())
1161 raise errors.OpPrereqError("Missing block device(s): %s" %
1162 utils.CommaJoin(delta),
1164 for dsk in self.disks:
1165 dsk[constants.IDISK_SIZE] = \
1166 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1168 # Verify instance specs
1169 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1171 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1172 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1173 constants.ISPEC_DISK_COUNT: len(self.disks),
1174 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1175 for disk in self.disks],
1176 constants.ISPEC_NIC_COUNT: len(self.nics),
1177 constants.ISPEC_SPINDLE_USE: spindle_use,
1180 group_info = self.cfg.GetNodeGroup(pnode.group)
1181 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1182 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1183 self.op.disk_template)
1184 if not self.op.ignore_ipolicy and res:
1185 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1186 (pnode.group, group_info.name, utils.CommaJoin(res)))
1187 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1189 CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1191 CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1192 # check OS parameters (remotely)
1193 CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1195 CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1197 #TODO: _CheckExtParams (remotely)
1198 # Check parameters for extstorage
1200 # memory check on primary node
1201 #TODO(dynmem): use MINMEM for checking
1203 CheckNodeFreeMemory(self, self.pnode.name,
1204 "creating instance %s" % self.op.instance_name,
1205 self.be_full[constants.BE_MAXMEM],
1208 self.dry_run_result = list(nodenames)
1210 def Exec(self, feedback_fn):
1211 """Create and add the instance to the cluster.
1214 instance = self.op.instance_name
1215 pnode_name = self.pnode.name
1217 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1218 self.owned_locks(locking.LEVEL_NODE)), \
1219 "Node locks differ from node resource locks"
1220 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1222 ht_kind = self.op.hypervisor
1223 if ht_kind in constants.HTS_REQ_PORT:
1224 network_port = self.cfg.AllocatePort()
1228 # This is ugly but we got a chicken-egg problem here
1229 # We can only take the group disk parameters, as the instance
1230 # has no disks yet (we are generating them right here).
1231 node = self.cfg.GetNodeInfo(pnode_name)
1232 nodegroup = self.cfg.GetNodeGroup(node.group)
1233 disks = GenerateDiskTemplate(self,
1234 self.op.disk_template,
1235 instance, pnode_name,
1238 self.instance_file_storage_dir,
1239 self.op.file_driver,
1242 self.cfg.GetGroupDiskParams(nodegroup))
1244 iobj = objects.Instance(name=instance, os=self.op.os_type,
1245 primary_node=pnode_name,
1246 nics=self.nics, disks=disks,
1247 disk_template=self.op.disk_template,
1249 admin_state=constants.ADMINST_DOWN,
1250 network_port=network_port,
1251 beparams=self.op.beparams,
1252 hvparams=self.op.hvparams,
1253 hypervisor=self.op.hypervisor,
1254 osparams=self.op.osparams,
1258 for tag in self.op.tags:
1261 if self.adopt_disks:
1262 if self.op.disk_template == constants.DT_PLAIN:
1263 # rename LVs to the newly-generated names; we need to construct
1264 # 'fake' LV disks with the old data, plus the new unique_id
1265 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1267 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1268 rename_to.append(t_dsk.logical_id)
1269 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1270 self.cfg.SetDiskID(t_dsk, pnode_name)
1271 result = self.rpc.call_blockdev_rename(pnode_name,
1272 zip(tmp_disks, rename_to))
1273 result.Raise("Failed to rename adoped LVs")
1275 feedback_fn("* creating instance disks...")
1277 CreateDisks(self, iobj)
1278 except errors.OpExecError:
1279 self.LogWarning("Device creation failed")
1280 self.cfg.ReleaseDRBDMinors(instance)
1283 feedback_fn("adding instance %s to cluster config" % instance)
1285 self.cfg.AddInstance(iobj, self.proc.GetECId())
1287 # Declare that we don't want to remove the instance lock anymore, as we've
1288 # added the instance to the config
1289 del self.remove_locks[locking.LEVEL_INSTANCE]
1291 if self.op.mode == constants.INSTANCE_IMPORT:
1292 # Release unused nodes
1293 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1296 ReleaseLocks(self, locking.LEVEL_NODE)
1299 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1300 feedback_fn("* wiping instance disks...")
1302 WipeDisks(self, iobj)
1303 except errors.OpExecError, err:
1304 logging.exception("Wiping disks failed")
1305 self.LogWarning("Wiping instance disks failed (%s)", err)
1309 # Something is already wrong with the disks, don't do anything else
1311 elif self.op.wait_for_sync:
1312 disk_abort = not WaitForSync(self, iobj)
1313 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1314 # make sure the disks are not degraded (still sync-ing is ok)
1315 feedback_fn("* checking mirrors status")
1316 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1321 RemoveDisks(self, iobj)
1322 self.cfg.RemoveInstance(iobj.name)
1323 # Make sure the instance lock gets removed
1324 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1325 raise errors.OpExecError("There are some degraded disks for"
1328 # instance disks are now active
1329 iobj.disks_active = True
1331 # Release all node resource locks
1332 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1334 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1335 # we need to set the disks ID to the primary node, since the
1336 # preceding code might or might have not done it, depending on
1337 # disk template and other options
1338 for disk in iobj.disks:
1339 self.cfg.SetDiskID(disk, pnode_name)
1340 if self.op.mode == constants.INSTANCE_CREATE:
1341 if not self.op.no_install:
1342 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1343 not self.op.wait_for_sync)
1345 feedback_fn("* pausing disk sync to install instance OS")
1346 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1349 for idx, success in enumerate(result.payload):
1351 logging.warn("pause-sync of instance %s for disk %d failed",
1354 feedback_fn("* running the instance OS create scripts...")
1355 # FIXME: pass debug option from opcode to backend
1357 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1358 self.op.debug_level)
1360 feedback_fn("* resuming disk sync")
1361 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1364 for idx, success in enumerate(result.payload):
1366 logging.warn("resume-sync of instance %s for disk %d failed",
1369 os_add_result.Raise("Could not add os for instance %s"
1370 " on node %s" % (instance, pnode_name))
1373 if self.op.mode == constants.INSTANCE_IMPORT:
1374 feedback_fn("* running the instance OS import scripts...")
1378 for idx, image in enumerate(self.src_images):
1382 # FIXME: pass debug option from opcode to backend
1383 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1384 constants.IEIO_FILE, (image, ),
1385 constants.IEIO_SCRIPT,
1386 (iobj.disks[idx], idx),
1388 transfers.append(dt)
1391 masterd.instance.TransferInstanceData(self, feedback_fn,
1392 self.op.src_node, pnode_name,
1393 self.pnode.secondary_ip,
1395 if not compat.all(import_result):
1396 self.LogWarning("Some disks for instance %s on node %s were not"
1397 " imported successfully" % (instance, pnode_name))
1399 rename_from = self._old_instance_name
1401 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1402 feedback_fn("* preparing remote import...")
1403 # The source cluster will stop the instance before attempting to make
1404 # a connection. In some cases stopping an instance can take a long
1405 # time, hence the shutdown timeout is added to the connection
1407 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1408 self.op.source_shutdown_timeout)
1409 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1411 assert iobj.primary_node == self.pnode.name
1413 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1414 self.source_x509_ca,
1415 self._cds, timeouts)
1416 if not compat.all(disk_results):
1417 # TODO: Should the instance still be started, even if some disks
1418 # failed to import (valid for local imports, too)?
1419 self.LogWarning("Some disks for instance %s on node %s were not"
1420 " imported successfully" % (instance, pnode_name))
1422 rename_from = self.source_instance_name
1425 # also checked in the prereq part
1426 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1429 # Run rename script on newly imported instance
1430 assert iobj.name == instance
1431 feedback_fn("Running rename script for %s" % instance)
1432 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1434 self.op.debug_level)
1436 self.LogWarning("Failed to run rename script for %s on node"
1437 " %s: %s" % (instance, pnode_name, result.fail_msg))
1439 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1442 iobj.admin_state = constants.ADMINST_UP
1443 self.cfg.Update(iobj, feedback_fn)
1444 logging.info("Starting instance %s on node %s", instance, pnode_name)
1445 feedback_fn("* starting instance...")
1446 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1447 False, self.op.reason)
1448 result.Raise("Could not start instance")
1450 return list(iobj.all_nodes)
1453 class LUInstanceRename(LogicalUnit):
1454 """Rename an instance.
1457 HPATH = "instance-rename"
1458 HTYPE = constants.HTYPE_INSTANCE
1460 def CheckArguments(self):
1464 if self.op.ip_check and not self.op.name_check:
1465 # TODO: make the ip check more flexible and not depend on the name check
1466 raise errors.OpPrereqError("IP address check requires a name check",
1469 def BuildHooksEnv(self):
1472 This runs on master, primary and secondary nodes of the instance.
1475 env = BuildInstanceHookEnvByObject(self, self.instance)
1476 env["INSTANCE_NEW_NAME"] = self.op.new_name
1479 def BuildHooksNodes(self):
1480 """Build hooks nodes.
1483 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1486 def CheckPrereq(self):
1487 """Check prerequisites.
1489 This checks that the instance is in the cluster and is not running.
1492 self.op.instance_name = ExpandInstanceName(self.cfg,
1493 self.op.instance_name)
1494 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1495 assert instance is not None
1496 CheckNodeOnline(self, instance.primary_node)
1497 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1498 msg="cannot rename")
1499 self.instance = instance
1501 new_name = self.op.new_name
1502 if self.op.name_check:
1503 hostname = _CheckHostnameSane(self, new_name)
1504 new_name = self.op.new_name = hostname.name
1505 if (self.op.ip_check and
1506 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1507 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1508 (hostname.ip, new_name),
1509 errors.ECODE_NOTUNIQUE)
1511 instance_list = self.cfg.GetInstanceList()
1512 if new_name in instance_list and new_name != instance.name:
1513 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1514 new_name, errors.ECODE_EXISTS)
1516 def Exec(self, feedback_fn):
1517 """Rename the instance.
1520 inst = self.instance
1521 old_name = inst.name
1523 rename_file_storage = False
1524 if (inst.disk_template in constants.DTS_FILEBASED and
1525 self.op.new_name != inst.name):
1526 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1527 rename_file_storage = True
1529 self.cfg.RenameInstance(inst.name, self.op.new_name)
1530 # Change the instance lock. This is definitely safe while we hold the BGL.
1531 # Otherwise the new lock would have to be added in acquired mode.
1533 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1534 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1535 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1537 # re-read the instance from the configuration after rename
1538 inst = self.cfg.GetInstanceInfo(self.op.new_name)
1540 if rename_file_storage:
1541 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1542 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1543 old_file_storage_dir,
1544 new_file_storage_dir)
1545 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1546 " (but the instance has been renamed in Ganeti)" %
1547 (inst.primary_node, old_file_storage_dir,
1548 new_file_storage_dir))
1550 StartInstanceDisks(self, inst, None)
1551 # update info on disks
1552 info = GetInstanceInfoText(inst)
1553 for (idx, disk) in enumerate(inst.disks):
1554 for node in inst.all_nodes:
1555 self.cfg.SetDiskID(disk, node)
1556 result = self.rpc.call_blockdev_setinfo(node, disk, info)
1558 self.LogWarning("Error setting info on node %s for disk %s: %s",
1559 node, idx, result.fail_msg)
1561 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1562 old_name, self.op.debug_level)
1563 msg = result.fail_msg
1565 msg = ("Could not run OS rename script for instance %s on node %s"
1566 " (but the instance has been renamed in Ganeti): %s" %
1567 (inst.name, inst.primary_node, msg))
1568 self.LogWarning(msg)
1570 ShutdownInstanceDisks(self, inst)
1575 class LUInstanceRemove(LogicalUnit):
1576 """Remove an instance.
1579 HPATH = "instance-remove"
1580 HTYPE = constants.HTYPE_INSTANCE
1583 def ExpandNames(self):
1584 self._ExpandAndLockInstance()
1585 self.needed_locks[locking.LEVEL_NODE] = []
1586 self.needed_locks[locking.LEVEL_NODE_RES] = []
1587 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1589 def DeclareLocks(self, level):
1590 if level == locking.LEVEL_NODE:
1591 self._LockInstancesNodes()
1592 elif level == locking.LEVEL_NODE_RES:
1594 self.needed_locks[locking.LEVEL_NODE_RES] = \
1595 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1597 def BuildHooksEnv(self):
1600 This runs on master, primary and secondary nodes of the instance.
1603 env = BuildInstanceHookEnvByObject(self, self.instance)
1604 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1607 def BuildHooksNodes(self):
1608 """Build hooks nodes.
1611 nl = [self.cfg.GetMasterNode()]
1612 nl_post = list(self.instance.all_nodes) + nl
1613 return (nl, nl_post)
1615 def CheckPrereq(self):
1616 """Check prerequisites.
1618 This checks that the instance is in the cluster.
1621 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1622 assert self.instance is not None, \
1623 "Cannot retrieve locked instance %s" % self.op.instance_name
1625 def Exec(self, feedback_fn):
1626 """Remove the instance.
1629 instance = self.instance
1630 logging.info("Shutting down instance %s on node %s",
1631 instance.name, instance.primary_node)
1633 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1634 self.op.shutdown_timeout,
1636 msg = result.fail_msg
1638 if self.op.ignore_failures:
1639 feedback_fn("Warning: can't shutdown instance: %s" % msg)
1641 raise errors.OpExecError("Could not shutdown instance %s on"
1643 (instance.name, instance.primary_node, msg))
1645 assert (self.owned_locks(locking.LEVEL_NODE) ==
1646 self.owned_locks(locking.LEVEL_NODE_RES))
1647 assert not (set(instance.all_nodes) -
1648 self.owned_locks(locking.LEVEL_NODE)), \
1649 "Not owning correct locks"
1651 RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1654 class LUInstanceMove(LogicalUnit):
1655 """Move an instance by data-copying.
1658 HPATH = "instance-move"
1659 HTYPE = constants.HTYPE_INSTANCE
1662 def ExpandNames(self):
1663 self._ExpandAndLockInstance()
1664 target_node = ExpandNodeName(self.cfg, self.op.target_node)
1665 self.op.target_node = target_node
1666 self.needed_locks[locking.LEVEL_NODE] = [target_node]
1667 self.needed_locks[locking.LEVEL_NODE_RES] = []
1668 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1670 def DeclareLocks(self, level):
1671 if level == locking.LEVEL_NODE:
1672 self._LockInstancesNodes(primary_only=True)
1673 elif level == locking.LEVEL_NODE_RES:
1675 self.needed_locks[locking.LEVEL_NODE_RES] = \
1676 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1678 def BuildHooksEnv(self):
1681 This runs on master, primary and secondary nodes of the instance.
1685 "TARGET_NODE": self.op.target_node,
1686 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1688 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1691 def BuildHooksNodes(self):
1692 """Build hooks nodes.
1696 self.cfg.GetMasterNode(),
1697 self.instance.primary_node,
1698 self.op.target_node,
1702 def CheckPrereq(self):
1703 """Check prerequisites.
1705 This checks that the instance is in the cluster.
1708 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1709 assert self.instance is not None, \
1710 "Cannot retrieve locked instance %s" % self.op.instance_name
1712 if instance.disk_template not in constants.DTS_COPYABLE:
1713 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1714 instance.disk_template, errors.ECODE_STATE)
1716 node = self.cfg.GetNodeInfo(self.op.target_node)
1717 assert node is not None, \
1718 "Cannot retrieve locked node %s" % self.op.target_node
1720 self.target_node = target_node = node.name
1722 if target_node == instance.primary_node:
1723 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1724 (instance.name, target_node),
1727 bep = self.cfg.GetClusterInfo().FillBE(instance)
1729 for idx, dsk in enumerate(instance.disks):
1730 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1731 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1732 " cannot copy" % idx, errors.ECODE_STATE)
1734 CheckNodeOnline(self, target_node)
1735 CheckNodeNotDrained(self, target_node)
1736 CheckNodeVmCapable(self, target_node)
1737 cluster = self.cfg.GetClusterInfo()
1738 group_info = self.cfg.GetNodeGroup(node.group)
1739 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1740 CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1741 ignore=self.op.ignore_ipolicy)
1743 if instance.admin_state == constants.ADMINST_UP:
1744 # check memory requirements on the secondary node
1745 CheckNodeFreeMemory(self, target_node,
1746 "failing over instance %s" %
1747 instance.name, bep[constants.BE_MAXMEM],
1748 instance.hypervisor)
1750 self.LogInfo("Not checking memory on the secondary node as"
1751 " instance will not be started")
1753 # check bridge existance
1754 CheckInstanceBridgesExist(self, instance, node=target_node)
1756 def Exec(self, feedback_fn):
1757 """Move an instance.
1759 The move is done by shutting it down on its present node, copying
1760 the data over (slow) and starting it on the new node.
1763 instance = self.instance
1765 source_node = instance.primary_node
1766 target_node = self.target_node
1768 self.LogInfo("Shutting down instance %s on source node %s",
1769 instance.name, source_node)
1771 assert (self.owned_locks(locking.LEVEL_NODE) ==
1772 self.owned_locks(locking.LEVEL_NODE_RES))
1774 result = self.rpc.call_instance_shutdown(source_node, instance,
1775 self.op.shutdown_timeout,
1777 msg = result.fail_msg
1779 if self.op.ignore_consistency:
1780 self.LogWarning("Could not shutdown instance %s on node %s."
1781 " Proceeding anyway. Please make sure node"
1782 " %s is down. Error details: %s",
1783 instance.name, source_node, source_node, msg)
1785 raise errors.OpExecError("Could not shutdown instance %s on"
1787 (instance.name, source_node, msg))
1789 # create the target disks
1791 CreateDisks(self, instance, target_node=target_node)
1792 except errors.OpExecError:
1793 self.LogWarning("Device creation failed")
1794 self.cfg.ReleaseDRBDMinors(instance.name)
1797 cluster_name = self.cfg.GetClusterInfo().cluster_name
1800 # activate, get path, copy the data over
1801 for idx, disk in enumerate(instance.disks):
1802 self.LogInfo("Copying data for disk %d", idx)
1803 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1804 instance.name, True, idx)
1806 self.LogWarning("Can't assemble newly created disk %d: %s",
1807 idx, result.fail_msg)
1808 errs.append(result.fail_msg)
1810 dev_path = result.payload
1811 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1812 target_node, dev_path,
1815 self.LogWarning("Can't copy data over for disk %d: %s",
1816 idx, result.fail_msg)
1817 errs.append(result.fail_msg)
1821 self.LogWarning("Some disks failed to copy, aborting")
1823 RemoveDisks(self, instance, target_node=target_node)
1825 self.cfg.ReleaseDRBDMinors(instance.name)
1826 raise errors.OpExecError("Errors during disk copy: %s" %
1829 instance.primary_node = target_node
1830 self.cfg.Update(instance, feedback_fn)
1832 self.LogInfo("Removing the disks on the original node")
1833 RemoveDisks(self, instance, target_node=source_node)
1835 # Only start the instance if it's marked as up
1836 if instance.admin_state == constants.ADMINST_UP:
1837 self.LogInfo("Starting instance %s on node %s",
1838 instance.name, target_node)
1840 disks_ok, _ = AssembleInstanceDisks(self, instance,
1841 ignore_secondaries=True)
1843 ShutdownInstanceDisks(self, instance)
1844 raise errors.OpExecError("Can't activate the instance's disks")
1846 result = self.rpc.call_instance_start(target_node,
1847 (instance, None, None), False,
1849 msg = result.fail_msg
1851 ShutdownInstanceDisks(self, instance)
1852 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1853 (instance.name, target_node, msg))
1856 class LUInstanceMultiAlloc(NoHooksLU):
1857 """Allocates multiple instances at the same time.
1862 def CheckArguments(self):
1867 for inst in self.op.instances:
1868 if inst.iallocator is not None:
1869 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1870 " instance objects", errors.ECODE_INVAL)
1871 nodes.append(bool(inst.pnode))
1872 if inst.disk_template in constants.DTS_INT_MIRROR:
1873 nodes.append(bool(inst.snode))
1875 has_nodes = compat.any(nodes)
1876 if compat.all(nodes) ^ has_nodes:
1877 raise errors.OpPrereqError("There are instance objects providing"
1878 " pnode/snode while others do not",
1881 if not has_nodes and self.op.iallocator is None:
1882 default_iallocator = self.cfg.GetDefaultIAllocator()
1883 if default_iallocator:
1884 self.op.iallocator = default_iallocator
1886 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1887 " given and no cluster-wide default"
1888 " iallocator found; please specify either"
1889 " an iallocator or nodes on the instances"
1890 " or set a cluster-wide default iallocator",
1893 _CheckOpportunisticLocking(self.op)
1895 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1897 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1898 utils.CommaJoin(dups), errors.ECODE_INVAL)
1900 def ExpandNames(self):
1901 """Calculate the locks.
1904 self.share_locks = ShareAll()
1905 self.needed_locks = {
1906 # iallocator will select nodes and even if no iallocator is used,
1907 # collisions with LUInstanceCreate should be avoided
1908 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1911 if self.op.iallocator:
1912 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1913 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1915 if self.op.opportunistic_locking:
1916 self.opportunistic_locks[locking.LEVEL_NODE] = True
1919 for inst in self.op.instances:
1920 inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1921 nodeslist.append(inst.pnode)
1922 if inst.snode is not None:
1923 inst.snode = ExpandNodeName(self.cfg, inst.snode)
1924 nodeslist.append(inst.snode)
1926 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1927 # Lock resources of instance's primary and secondary nodes (copy to
1928 # prevent accidential modification)
1929 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1931 def DeclareLocks(self, level):
1932 if level == locking.LEVEL_NODE_RES and \
1933 self.opportunistic_locks[locking.LEVEL_NODE]:
1934 # Even when using opportunistic locking, we require the same set of
1935 # NODE_RES locks as we got NODE locks
1936 self.needed_locks[locking.LEVEL_NODE_RES] = \
1937 self.owned_locks(locking.LEVEL_NODE)
1939 def CheckPrereq(self):
1940 """Check prerequisite.
1943 if self.op.iallocator:
1944 cluster = self.cfg.GetClusterInfo()
1945 default_vg = self.cfg.GetVGName()
1946 ec_id = self.proc.GetECId()
1948 if self.op.opportunistic_locking:
1949 # Only consider nodes for which a lock is held
1950 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1952 node_whitelist = None
1954 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1955 _ComputeNics(op, cluster, None,
1957 _ComputeFullBeParams(op, cluster),
1959 for op in self.op.instances]
1961 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1962 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1964 ial.Run(self.op.iallocator)
1967 raise errors.OpPrereqError("Can't compute nodes using"
1968 " iallocator '%s': %s" %
1969 (self.op.iallocator, ial.info),
1972 self.ia_result = ial.result
1975 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1976 constants.JOB_IDS_KEY: [],
1979 def _ConstructPartialResult(self):
1980 """Contructs the partial result.
1983 if self.op.iallocator:
1984 (allocatable, failed_insts) = self.ia_result
1985 allocatable_insts = map(compat.fst, allocatable)
1987 allocatable_insts = [op.instance_name for op in self.op.instances]
1991 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
1992 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
1995 def Exec(self, feedback_fn):
1996 """Executes the opcode.
2000 if self.op.iallocator:
2001 op2inst = dict((op.instance_name, op) for op in self.op.instances)
2002 (allocatable, failed) = self.ia_result
2004 for (name, nodes) in allocatable:
2005 op = op2inst.pop(name)
2008 (op.pnode, op.snode) = nodes
2014 missing = set(op2inst.keys()) - set(failed)
2015 assert not missing, \
2016 "Iallocator did return incomplete result: %s" % \
2017 utils.CommaJoin(missing)
2019 jobs.extend([op] for op in self.op.instances)
2021 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2024 class _InstNicModPrivate:
2025 """Data structure for network interface modifications.
2027 Used by L{LUInstanceSetParams}.
2035 def _PrepareContainerMods(mods, private_fn):
2036 """Prepares a list of container modifications by adding a private data field.
2038 @type mods: list of tuples; (operation, index, parameters)
2039 @param mods: List of modifications
2040 @type private_fn: callable or None
2041 @param private_fn: Callable for constructing a private data field for a
2046 if private_fn is None:
2051 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2054 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
2055 """Checks if nodes have enough physical CPUs
2057 This function checks if all given nodes have the needed number of
2058 physical CPUs. In case any node has less CPUs or we cannot get the
2059 information from the node, this function raises an OpPrereqError
2062 @type lu: C{LogicalUnit}
2063 @param lu: a logical unit from which we get configuration data
2064 @type nodenames: C{list}
2065 @param nodenames: the list of node names to check
2066 @type requested: C{int}
2067 @param requested: the minimum acceptable number of physical CPUs
2068 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2069 or we cannot check the node
2072 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2073 for node in nodenames:
2074 info = nodeinfo[node]
2075 info.Raise("Cannot get current information from node %s" % node,
2076 prereq=True, ecode=errors.ECODE_ENVIRON)
2077 (_, _, (hv_info, )) = info.payload
2078 num_cpus = hv_info.get("cpu_total", None)
2079 if not isinstance(num_cpus, int):
2080 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2081 " on node %s, result was '%s'" %
2082 (node, num_cpus), errors.ECODE_ENVIRON)
2083 if requested > num_cpus:
2084 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2085 "required" % (node, num_cpus, requested),
2089 def GetItemFromContainer(identifier, kind, container):
2090 """Return the item refered by the identifier.
2092 @type identifier: string
2093 @param identifier: Item index or name or UUID
2095 @param kind: One-word item description
2096 @type container: list
2097 @param container: Container to get the item from
2102 idx = int(identifier)
2105 absidx = len(container) - 1
2107 raise IndexError("Not accepting negative indices other than -1")
2108 elif idx > len(container):
2109 raise IndexError("Got %s index %s, but there are only %s" %
2110 (kind, idx, len(container)))
2113 return (absidx, container[idx])
2117 for idx, item in enumerate(container):
2118 if item.uuid == identifier or item.name == identifier:
2121 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2122 (kind, identifier), errors.ECODE_NOENT)
2125 def _ApplyContainerMods(kind, container, chgdesc, mods,
2126 create_fn, modify_fn, remove_fn):
2127 """Applies descriptions in C{mods} to C{container}.
2130 @param kind: One-word item description
2131 @type container: list
2132 @param container: Container to modify
2133 @type chgdesc: None or list
2134 @param chgdesc: List of applied changes
2136 @param mods: Modifications as returned by L{_PrepareContainerMods}
2137 @type create_fn: callable
2138 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2139 receives absolute item index, parameters and private data object as added
2140 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2142 @type modify_fn: callable
2143 @param modify_fn: Callback for modifying an existing item
2144 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2145 and private data object as added by L{_PrepareContainerMods}, returns
2147 @type remove_fn: callable
2148 @param remove_fn: Callback on removing item; receives absolute item index,
2149 item and private data object as added by L{_PrepareContainerMods}
2152 for (op, identifier, params, private) in mods:
2155 if op == constants.DDM_ADD:
2156 # Calculate where item will be added
2157 # When adding an item, identifier can only be an index
2159 idx = int(identifier)
2161 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2162 " identifier for %s" % constants.DDM_ADD,
2165 addidx = len(container)
2168 raise IndexError("Not accepting negative indices other than -1")
2169 elif idx > len(container):
2170 raise IndexError("Got %s index %s, but there are only %s" %
2171 (kind, idx, len(container)))
2174 if create_fn is None:
2177 (item, changes) = create_fn(addidx, params, private)
2180 container.append(item)
2183 assert idx <= len(container)
2184 # list.insert does so before the specified index
2185 container.insert(idx, item)
2187 # Retrieve existing item
2188 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2190 if op == constants.DDM_REMOVE:
2193 if remove_fn is not None:
2194 remove_fn(absidx, item, private)
2196 changes = [("%s/%s" % (kind, absidx), "remove")]
2198 assert container[absidx] == item
2199 del container[absidx]
2200 elif op == constants.DDM_MODIFY:
2201 if modify_fn is not None:
2202 changes = modify_fn(absidx, item, params, private)
2204 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2206 assert _TApplyContModsCbChanges(changes)
2208 if not (chgdesc is None or changes is None):
2209 chgdesc.extend(changes)
2212 def _UpdateIvNames(base_index, disks):
2213 """Updates the C{iv_name} attribute of disks.
2215 @type disks: list of L{objects.Disk}
2218 for (idx, disk) in enumerate(disks):
2219 disk.iv_name = "disk/%s" % (base_index + idx, )
2222 class LUInstanceSetParams(LogicalUnit):
2223 """Modifies an instances's parameters.
2226 HPATH = "instance-modify"
2227 HTYPE = constants.HTYPE_INSTANCE
2231 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2232 assert ht.TList(mods)
2233 assert not mods or len(mods[0]) in (2, 3)
2235 if mods and len(mods[0]) == 2:
2239 for op, params in mods:
2240 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2241 result.append((op, -1, params))
2245 raise errors.OpPrereqError("Only one %s add or remove operation is"
2246 " supported at a time" % kind,
2249 result.append((constants.DDM_MODIFY, op, params))
2251 assert verify_fn(result)
2258 def _CheckMods(kind, mods, key_types, item_fn):
2259 """Ensures requested disk/NIC modifications are valid.
2262 for (op, _, params) in mods:
2263 assert ht.TDict(params)
2265 # If 'key_types' is an empty dict, we assume we have an
2266 # 'ext' template and thus do not ForceDictType
2268 utils.ForceDictType(params, key_types)
2270 if op == constants.DDM_REMOVE:
2272 raise errors.OpPrereqError("No settings should be passed when"
2273 " removing a %s" % kind,
2275 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2278 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2280 def _VerifyDiskModification(self, op, params):
2281 """Verifies a disk modification.
2284 if op == constants.DDM_ADD:
2285 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2286 if mode not in constants.DISK_ACCESS_SET:
2287 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2290 size = params.get(constants.IDISK_SIZE, None)
2292 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2293 constants.IDISK_SIZE, errors.ECODE_INVAL)
2297 except (TypeError, ValueError), err:
2298 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2301 params[constants.IDISK_SIZE] = size
2302 name = params.get(constants.IDISK_NAME, None)
2303 if name is not None and name.lower() == constants.VALUE_NONE:
2304 params[constants.IDISK_NAME] = None
2306 elif op == constants.DDM_MODIFY:
2307 if constants.IDISK_SIZE in params:
2308 raise errors.OpPrereqError("Disk size change not possible, use"
2309 " grow-disk", errors.ECODE_INVAL)
2311 # Disk modification supports changing only the disk name and mode.
2312 # Changing arbitrary parameters is allowed only for ext disk template",
2313 if self.instance.disk_template != constants.DT_EXT:
2314 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2316 name = params.get(constants.IDISK_NAME, None)
2317 if name is not None and name.lower() == constants.VALUE_NONE:
2318 params[constants.IDISK_NAME] = None
2321 def _VerifyNicModification(op, params):
2322 """Verifies a network interface modification.
2325 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2326 ip = params.get(constants.INIC_IP, None)
2327 name = params.get(constants.INIC_NAME, None)
2328 req_net = params.get(constants.INIC_NETWORK, None)
2329 link = params.get(constants.NIC_LINK, None)
2330 mode = params.get(constants.NIC_MODE, None)
2331 if name is not None and name.lower() == constants.VALUE_NONE:
2332 params[constants.INIC_NAME] = None
2333 if req_net is not None:
2334 if req_net.lower() == constants.VALUE_NONE:
2335 params[constants.INIC_NETWORK] = None
2337 elif link is not None or mode is not None:
2338 raise errors.OpPrereqError("If network is given"
2339 " mode or link should not",
2342 if op == constants.DDM_ADD:
2343 macaddr = params.get(constants.INIC_MAC, None)
2345 params[constants.INIC_MAC] = constants.VALUE_AUTO
2348 if ip.lower() == constants.VALUE_NONE:
2349 params[constants.INIC_IP] = None
2351 if ip.lower() == constants.NIC_IP_POOL:
2352 if op == constants.DDM_ADD and req_net is None:
2353 raise errors.OpPrereqError("If ip=pool, parameter network"
2357 if not netutils.IPAddress.IsValid(ip):
2358 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2361 if constants.INIC_MAC in params:
2362 macaddr = params[constants.INIC_MAC]
2363 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2364 macaddr = utils.NormalizeAndValidateMac(macaddr)
2366 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2367 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2368 " modifying an existing NIC",
2371 def CheckArguments(self):
2372 if not (self.op.nics or self.op.disks or self.op.disk_template or
2373 self.op.hvparams or self.op.beparams or self.op.os_name or
2374 self.op.osparams or self.op.offline is not None or
2375 self.op.runtime_mem or self.op.pnode):
2376 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2378 if self.op.hvparams:
2379 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2380 "hypervisor", "instance", "cluster")
2382 self.op.disks = self._UpgradeDiskNicMods(
2383 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2384 self.op.nics = self._UpgradeDiskNicMods(
2385 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2387 if self.op.disks and self.op.disk_template is not None:
2388 raise errors.OpPrereqError("Disk template conversion and other disk"
2389 " changes not supported at the same time",
2392 if (self.op.disk_template and
2393 self.op.disk_template in constants.DTS_INT_MIRROR and
2394 self.op.remote_node is None):
2395 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2396 " one requires specifying a secondary node",
2399 # Check NIC modifications
2400 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2401 self._VerifyNicModification)
2404 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2406 def ExpandNames(self):
2407 self._ExpandAndLockInstance()
2408 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2409 # Can't even acquire node locks in shared mode as upcoming changes in
2410 # Ganeti 2.6 will start to modify the node object on disk conversion
2411 self.needed_locks[locking.LEVEL_NODE] = []
2412 self.needed_locks[locking.LEVEL_NODE_RES] = []
2413 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2414 # Look node group to look up the ipolicy
2415 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2417 def DeclareLocks(self, level):
2418 if level == locking.LEVEL_NODEGROUP:
2419 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2420 # Acquire locks for the instance's nodegroups optimistically. Needs
2421 # to be verified in CheckPrereq
2422 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2423 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2424 elif level == locking.LEVEL_NODE:
2425 self._LockInstancesNodes()
2426 if self.op.disk_template and self.op.remote_node:
2427 self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2428 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2429 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2431 self.needed_locks[locking.LEVEL_NODE_RES] = \
2432 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2434 def BuildHooksEnv(self):
2437 This runs on the master, primary and secondaries.
2441 if constants.BE_MINMEM in self.be_new:
2442 args["minmem"] = self.be_new[constants.BE_MINMEM]
2443 if constants.BE_MAXMEM in self.be_new:
2444 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2445 if constants.BE_VCPUS in self.be_new:
2446 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2447 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2448 # information at all.
2450 if self._new_nics is not None:
2453 for nic in self._new_nics:
2454 n = copy.deepcopy(nic)
2455 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2456 n.nicparams = nicparams
2457 nics.append(NICToTuple(self, n))
2461 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2462 if self.op.disk_template:
2463 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2464 if self.op.runtime_mem:
2465 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2469 def BuildHooksNodes(self):
2470 """Build hooks nodes.
2473 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2476 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2477 old_params, cluster, pnode):
2479 update_params_dict = dict([(key, params[key])
2480 for key in constants.NICS_PARAMETERS
2483 req_link = update_params_dict.get(constants.NIC_LINK, None)
2484 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2487 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2488 if new_net_uuid_or_name:
2489 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2490 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2493 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2496 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2498 raise errors.OpPrereqError("No netparams found for the network"
2499 " %s, probably not connected" %
2500 new_net_obj.name, errors.ECODE_INVAL)
2501 new_params = dict(netparams)
2503 new_params = GetUpdatedParams(old_params, update_params_dict)
2505 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2507 new_filled_params = cluster.SimpleFillNIC(new_params)
2508 objects.NIC.CheckParameterSyntax(new_filled_params)
2510 new_mode = new_filled_params[constants.NIC_MODE]
2511 if new_mode == constants.NIC_MODE_BRIDGED:
2512 bridge = new_filled_params[constants.NIC_LINK]
2513 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2515 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2517 self.warn.append(msg)
2519 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2521 elif new_mode == constants.NIC_MODE_ROUTED:
2522 ip = params.get(constants.INIC_IP, old_ip)
2524 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2525 " on a routed NIC", errors.ECODE_INVAL)
2527 elif new_mode == constants.NIC_MODE_OVS:
2528 # TODO: check OVS link
2529 self.LogInfo("OVS links are currently not checked for correctness")
2531 if constants.INIC_MAC in params:
2532 mac = params[constants.INIC_MAC]
2534 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2536 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2537 # otherwise generate the MAC address
2538 params[constants.INIC_MAC] = \
2539 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2541 # or validate/reserve the current one
2543 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2544 except errors.ReservationError:
2545 raise errors.OpPrereqError("MAC address '%s' already in use"
2546 " in cluster" % mac,
2547 errors.ECODE_NOTUNIQUE)
2548 elif new_net_uuid != old_net_uuid:
2550 def get_net_prefix(net_uuid):
2553 nobj = self.cfg.GetNetwork(net_uuid)
2554 mac_prefix = nobj.mac_prefix
2558 new_prefix = get_net_prefix(new_net_uuid)
2559 old_prefix = get_net_prefix(old_net_uuid)
2560 if old_prefix != new_prefix:
2561 params[constants.INIC_MAC] = \
2562 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2564 # if there is a change in (ip, network) tuple
2565 new_ip = params.get(constants.INIC_IP, old_ip)
2566 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2568 # if IP is pool then require a network and generate one IP
2569 if new_ip.lower() == constants.NIC_IP_POOL:
2572 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2573 except errors.ReservationError:
2574 raise errors.OpPrereqError("Unable to get a free IP"
2575 " from the address pool",
2577 self.LogInfo("Chose IP %s from network %s",
2580 params[constants.INIC_IP] = new_ip
2582 raise errors.OpPrereqError("ip=pool, but no network found",
2584 # Reserve new IP if in the new network if any
2587 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2588 self.LogInfo("Reserving IP %s in network %s",
2589 new_ip, new_net_obj.name)
2590 except errors.ReservationError:
2591 raise errors.OpPrereqError("IP %s not available in network %s" %
2592 (new_ip, new_net_obj.name),
2593 errors.ECODE_NOTUNIQUE)
2594 # new network is None so check if new IP is a conflicting IP
2595 elif self.op.conflicts_check:
2596 _CheckForConflictingIp(self, new_ip, pnode)
2598 # release old IP if old network is not None
2599 if old_ip and old_net_uuid:
2601 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2602 except errors.AddressPoolError:
2603 logging.warning("Release IP %s not contained in network %s",
2604 old_ip, old_net_obj.name)
2606 # there are no changes in (ip, network) tuple and old network is not None
2607 elif (old_net_uuid is not None and
2608 (req_link is not None or req_mode is not None)):
2609 raise errors.OpPrereqError("Not allowed to change link or mode of"
2610 " a NIC that is connected to a network",
2613 private.params = new_params
2614 private.filled = new_filled_params
2616 def _PreCheckDiskTemplate(self, pnode_info):
2617 """CheckPrereq checks related to a new disk template."""
2618 # Arguments are passed to avoid configuration lookups
2619 instance = self.instance
2620 pnode = instance.primary_node
2621 cluster = self.cluster
2622 if instance.disk_template == self.op.disk_template:
2623 raise errors.OpPrereqError("Instance already has disk template %s" %
2624 instance.disk_template, errors.ECODE_INVAL)
2626 if (instance.disk_template,
2627 self.op.disk_template) not in self._DISK_CONVERSIONS:
2628 raise errors.OpPrereqError("Unsupported disk template conversion from"
2629 " %s to %s" % (instance.disk_template,
2630 self.op.disk_template),
2632 CheckInstanceState(self, instance, INSTANCE_DOWN,
2633 msg="cannot change disk template")
2634 if self.op.disk_template in constants.DTS_INT_MIRROR:
2635 if self.op.remote_node == pnode:
2636 raise errors.OpPrereqError("Given new secondary node %s is the same"
2637 " as the primary node of the instance" %
2638 self.op.remote_node, errors.ECODE_STATE)
2639 CheckNodeOnline(self, self.op.remote_node)
2640 CheckNodeNotDrained(self, self.op.remote_node)
2641 # FIXME: here we assume that the old instance type is DT_PLAIN
2642 assert instance.disk_template == constants.DT_PLAIN
2643 disks = [{constants.IDISK_SIZE: d.size,
2644 constants.IDISK_VG: d.logical_id[0]}
2645 for d in instance.disks]
2646 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2647 CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2649 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2650 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2651 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2653 CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2654 ignore=self.op.ignore_ipolicy)
2655 if pnode_info.group != snode_info.group:
2656 self.LogWarning("The primary and secondary nodes are in two"
2657 " different node groups; the disk parameters"
2658 " from the first disk's node group will be"
2661 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2662 # Make sure none of the nodes require exclusive storage
2663 nodes = [pnode_info]
2664 if self.op.disk_template in constants.DTS_INT_MIRROR:
2666 nodes.append(snode_info)
2667 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2668 if compat.any(map(has_es, nodes)):
2669 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2670 " storage is enabled" % (instance.disk_template,
2671 self.op.disk_template))
2672 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2674 def CheckPrereq(self):
2675 """Check prerequisites.
2677 This only checks the instance list against the existing names.
2680 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2681 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2683 cluster = self.cluster = self.cfg.GetClusterInfo()
2684 assert self.instance is not None, \
2685 "Cannot retrieve locked instance %s" % self.op.instance_name
2687 pnode = instance.primary_node
2691 if (self.op.pnode is not None and self.op.pnode != pnode and
2693 # verify that the instance is not up
2694 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2695 instance.hypervisor)
2696 if instance_info.fail_msg:
2697 self.warn.append("Can't get instance runtime information: %s" %
2698 instance_info.fail_msg)
2699 elif instance_info.payload:
2700 raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2703 assert pnode in self.owned_locks(locking.LEVEL_NODE)
2704 nodelist = list(instance.all_nodes)
2705 pnode_info = self.cfg.GetNodeInfo(pnode)
2706 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2708 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2709 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2710 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2712 # dictionary with instance information after the modification
2715 # Check disk modifications. This is done here and not in CheckArguments
2716 # (as with NICs), because we need to know the instance's disk template
2717 if instance.disk_template == constants.DT_EXT:
2718 self._CheckMods("disk", self.op.disks, {},
2719 self._VerifyDiskModification)
2721 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2722 self._VerifyDiskModification)
2724 # Prepare disk/NIC modifications
2725 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2726 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2728 # Check the validity of the `provider' parameter
2729 if instance.disk_template in constants.DT_EXT:
2730 for mod in self.diskmod:
2731 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2732 if mod[0] == constants.DDM_ADD:
2733 if ext_provider is None:
2734 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2735 " '%s' missing, during disk add" %
2737 constants.IDISK_PROVIDER),
2739 elif mod[0] == constants.DDM_MODIFY:
2741 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2743 constants.IDISK_PROVIDER,
2746 for mod in self.diskmod:
2747 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2748 if ext_provider is not None:
2749 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2750 " instances of type '%s'" %
2751 (constants.IDISK_PROVIDER,
2756 if self.op.os_name and not self.op.force:
2757 CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2758 self.op.force_variant)
2759 instance_os = self.op.os_name
2761 instance_os = instance.os
2763 assert not (self.op.disk_template and self.op.disks), \
2764 "Can't modify disk template and apply disk changes at the same time"
2766 if self.op.disk_template:
2767 self._PreCheckDiskTemplate(pnode_info)
2769 # hvparams processing
2770 if self.op.hvparams:
2771 hv_type = instance.hypervisor
2772 i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2773 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2774 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2777 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2778 CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2779 self.hv_proposed = self.hv_new = hv_new # the new actual values
2780 self.hv_inst = i_hvdict # the new dict (without defaults)
2782 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2784 self.hv_new = self.hv_inst = {}
2786 # beparams processing
2787 if self.op.beparams:
2788 i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2790 objects.UpgradeBeParams(i_bedict)
2791 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2792 be_new = cluster.SimpleFillBE(i_bedict)
2793 self.be_proposed = self.be_new = be_new # the new actual values
2794 self.be_inst = i_bedict # the new dict (without defaults)
2796 self.be_new = self.be_inst = {}
2797 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2798 be_old = cluster.FillBE(instance)
2800 # CPU param validation -- checking every time a parameter is
2801 # changed to cover all cases where either CPU mask or vcpus have
2803 if (constants.BE_VCPUS in self.be_proposed and
2804 constants.HV_CPU_MASK in self.hv_proposed):
2806 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2807 # Verify mask is consistent with number of vCPUs. Can skip this
2808 # test if only 1 entry in the CPU mask, which means same mask
2809 # is applied to all vCPUs.
2810 if (len(cpu_list) > 1 and
2811 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2812 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2814 (self.be_proposed[constants.BE_VCPUS],
2815 self.hv_proposed[constants.HV_CPU_MASK]),
2818 # Only perform this test if a new CPU mask is given
2819 if constants.HV_CPU_MASK in self.hv_new:
2820 # Calculate the largest CPU number requested
2821 max_requested_cpu = max(map(max, cpu_list))
2822 # Check that all of the instance's nodes have enough physical CPUs to
2823 # satisfy the requested CPU mask
2824 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2825 max_requested_cpu + 1, instance.hypervisor)
2827 # osparams processing
2828 if self.op.osparams:
2829 i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2830 CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2831 self.os_inst = i_osdict # the new dict (without defaults)
2835 #TODO(dynmem): do the appropriate check involving MINMEM
2836 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2837 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2838 mem_check_list = [pnode]
2839 if be_new[constants.BE_AUTO_BALANCE]:
2840 # either we changed auto_balance to yes or it was from before
2841 mem_check_list.extend(instance.secondary_nodes)
2842 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2843 instance.hypervisor)
2844 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2845 [instance.hypervisor], False)
2846 pninfo = nodeinfo[pnode]
2847 msg = pninfo.fail_msg
2849 # Assume the primary node is unreachable and go ahead
2850 self.warn.append("Can't get info from primary node %s: %s" %
2853 (_, _, (pnhvinfo, )) = pninfo.payload
2854 if not isinstance(pnhvinfo.get("memory_free", None), int):
2855 self.warn.append("Node data from primary node %s doesn't contain"
2856 " free memory information" % pnode)
2857 elif instance_info.fail_msg:
2858 self.warn.append("Can't get instance runtime information: %s" %
2859 instance_info.fail_msg)
2861 if instance_info.payload:
2862 current_mem = int(instance_info.payload["memory"])
2864 # Assume instance not running
2865 # (there is a slight race condition here, but it's not very
2866 # probable, and we have no other way to check)
2867 # TODO: Describe race condition
2869 #TODO(dynmem): do the appropriate check involving MINMEM
2870 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2871 pnhvinfo["memory_free"])
2873 raise errors.OpPrereqError("This change will prevent the instance"
2874 " from starting, due to %d MB of memory"
2875 " missing on its primary node" %
2876 miss_mem, errors.ECODE_NORES)
2878 if be_new[constants.BE_AUTO_BALANCE]:
2879 for node, nres in nodeinfo.items():
2880 if node not in instance.secondary_nodes:
2882 nres.Raise("Can't get info from secondary node %s" % node,
2883 prereq=True, ecode=errors.ECODE_STATE)
2884 (_, _, (nhvinfo, )) = nres.payload
2885 if not isinstance(nhvinfo.get("memory_free", None), int):
2886 raise errors.OpPrereqError("Secondary node %s didn't return free"
2887 " memory information" % node,
2889 #TODO(dynmem): do the appropriate check involving MINMEM
2890 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2891 raise errors.OpPrereqError("This change will prevent the instance"
2892 " from failover to its secondary node"
2893 " %s, due to not enough memory" % node,
2896 if self.op.runtime_mem:
2897 remote_info = self.rpc.call_instance_info(instance.primary_node,
2899 instance.hypervisor)
2900 remote_info.Raise("Error checking node %s" % instance.primary_node)
2901 if not remote_info.payload: # not running already
2902 raise errors.OpPrereqError("Instance %s is not running" %
2903 instance.name, errors.ECODE_STATE)
2905 current_memory = remote_info.payload["memory"]
2906 if (not self.op.force and
2907 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2908 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2909 raise errors.OpPrereqError("Instance %s must have memory between %d"
2910 " and %d MB of memory unless --force is"
2913 self.be_proposed[constants.BE_MINMEM],
2914 self.be_proposed[constants.BE_MAXMEM]),
2917 delta = self.op.runtime_mem - current_memory
2919 CheckNodeFreeMemory(self, instance.primary_node,
2920 "ballooning memory for instance %s" %
2921 instance.name, delta, instance.hypervisor)
2923 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2924 raise errors.OpPrereqError("Disk operations not supported for"
2925 " diskless instances", errors.ECODE_INVAL)
2927 def _PrepareNicCreate(_, params, private):
2928 self._PrepareNicModification(params, private, None, None,
2932 def _PrepareNicMod(_, nic, params, private):
2933 self._PrepareNicModification(params, private, nic.ip, nic.network,
2934 nic.nicparams, cluster, pnode)
2937 def _PrepareNicRemove(_, params, __):
2939 net = params.network
2940 if net is not None and ip is not None:
2941 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2943 # Verify NIC changes (operating on copy)
2944 nics = instance.nics[:]
2945 _ApplyContainerMods("NIC", nics, None, self.nicmod,
2946 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2947 if len(nics) > constants.MAX_NICS:
2948 raise errors.OpPrereqError("Instance has too many network interfaces"
2949 " (%d), cannot add more" % constants.MAX_NICS,
2952 def _PrepareDiskMod(_, disk, params, __):
2953 disk.name = params.get(constants.IDISK_NAME, None)
2955 # Verify disk changes (operating on a copy)
2956 disks = copy.deepcopy(instance.disks)
2957 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2958 _PrepareDiskMod, None)
2959 utils.ValidateDeviceNames("disk", disks)
2960 if len(disks) > constants.MAX_DISKS:
2961 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2962 " more" % constants.MAX_DISKS,
2964 disk_sizes = [disk.size for disk in instance.disks]
2965 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2966 self.diskmod if op == constants.DDM_ADD)
2967 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2968 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2970 if self.op.offline is not None and self.op.offline:
2971 CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2972 msg="can't change to offline")
2974 # Pre-compute NIC changes (necessary to use result in hooks)
2975 self._nic_chgdesc = []
2977 # Operate on copies as this is still in prereq
2978 nics = [nic.Copy() for nic in instance.nics]
2979 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2980 self._CreateNewNic, self._ApplyNicMods, None)
2981 # Verify that NIC names are unique and valid
2982 utils.ValidateDeviceNames("NIC", nics)
2983 self._new_nics = nics
2984 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2986 self._new_nics = None
2987 ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2989 if not self.op.ignore_ipolicy:
2990 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2993 # Fill ispec with backend parameters
2994 ispec[constants.ISPEC_SPINDLE_USE] = \
2995 self.be_new.get(constants.BE_SPINDLE_USE, None)
2996 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2999 # Copy ispec to verify parameters with min/max values separately
3000 if self.op.disk_template:
3001 new_disk_template = self.op.disk_template
3003 new_disk_template = instance.disk_template
3004 ispec_max = ispec.copy()
3005 ispec_max[constants.ISPEC_MEM_SIZE] = \
3006 self.be_new.get(constants.BE_MAXMEM, None)
3007 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3009 ispec_min = ispec.copy()
3010 ispec_min[constants.ISPEC_MEM_SIZE] = \
3011 self.be_new.get(constants.BE_MINMEM, None)
3012 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3015 if (res_max or res_min):
3016 # FIXME: Improve error message by including information about whether
3017 # the upper or lower limit of the parameter fails the ipolicy.
3018 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3019 (group_info, group_info.name,
3020 utils.CommaJoin(set(res_max + res_min))))
3021 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3023 def _ConvertPlainToDrbd(self, feedback_fn):
3024 """Converts an instance from plain to drbd.
3027 feedback_fn("Converting template to drbd")
3028 instance = self.instance
3029 pnode = instance.primary_node
3030 snode = self.op.remote_node
3032 assert instance.disk_template == constants.DT_PLAIN
3034 # create a fake disk info for _GenerateDiskTemplate
3035 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3036 constants.IDISK_VG: d.logical_id[0],
3037 constants.IDISK_NAME: d.name}
3038 for d in instance.disks]
3039 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3040 instance.name, pnode, [snode],
3041 disk_info, None, None, 0, feedback_fn,
3043 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3045 p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3046 s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3047 info = GetInstanceInfoText(instance)
3048 feedback_fn("Creating additional volumes...")
3049 # first, create the missing data and meta devices
3050 for disk in anno_disks:
3051 # unfortunately this is... not too nice
3052 CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3053 info, True, p_excl_stor)
3054 for child in disk.children:
3055 CreateSingleBlockDev(self, snode, instance, child, info, True,
3057 # at this stage, all new LVs have been created, we can rename the
3059 feedback_fn("Renaming original volumes...")
3060 rename_list = [(o, n.children[0].logical_id)
3061 for (o, n) in zip(instance.disks, new_disks)]
3062 result = self.rpc.call_blockdev_rename(pnode, rename_list)
3063 result.Raise("Failed to rename original LVs")
3065 feedback_fn("Initializing DRBD devices...")
3066 # all child devices are in place, we can now create the DRBD devices
3068 for disk in anno_disks:
3069 for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3070 f_create = node == pnode
3071 CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3073 except errors.GenericError, e:
3074 feedback_fn("Initializing of DRBD devices failed;"
3075 " renaming back original volumes...")
3076 for disk in new_disks:
3077 self.cfg.SetDiskID(disk, pnode)
3078 rename_back_list = [(n.children[0], o.logical_id)
3079 for (n, o) in zip(new_disks, instance.disks)]
3080 result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3081 result.Raise("Failed to rename LVs back after error %s" % str(e))
3084 # at this point, the instance has been modified
3085 instance.disk_template = constants.DT_DRBD8
3086 instance.disks = new_disks
3087 self.cfg.Update(instance, feedback_fn)
3089 # Release node locks while waiting for sync
3090 ReleaseLocks(self, locking.LEVEL_NODE)
3092 # disks are created, waiting for sync
3093 disk_abort = not WaitForSync(self, instance,
3094 oneshot=not self.op.wait_for_sync)
3096 raise errors.OpExecError("There are some degraded disks for"
3097 " this instance, please cleanup manually")
3099 # Node resource locks will be released by caller
3101 def _ConvertDrbdToPlain(self, feedback_fn):
3102 """Converts an instance from drbd to plain.
3105 instance = self.instance
3107 assert len(instance.secondary_nodes) == 1
3108 assert instance.disk_template == constants.DT_DRBD8
3110 pnode = instance.primary_node
3111 snode = instance.secondary_nodes[0]
3112 feedback_fn("Converting template to plain")
3114 old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3115 new_disks = [d.children[0] for d in instance.disks]
3117 # copy over size, mode and name
3118 for parent, child in zip(old_disks, new_disks):
3119 child.size = parent.size
3120 child.mode = parent.mode
3121 child.name = parent.name
3123 # this is a DRBD disk, return its port to the pool
3124 # NOTE: this must be done right before the call to cfg.Update!
3125 for disk in old_disks:
3126 tcp_port = disk.logical_id[2]
3127 self.cfg.AddTcpUdpPort(tcp_port)
3129 # update instance structure
3130 instance.disks = new_disks
3131 instance.disk_template = constants.DT_PLAIN
3132 _UpdateIvNames(0, instance.disks)
3133 self.cfg.Update(instance, feedback_fn)
3135 # Release locks in case removing disks takes a while
3136 ReleaseLocks(self, locking.LEVEL_NODE)
3138 feedback_fn("Removing volumes on the secondary node...")
3139 for disk in old_disks:
3140 self.cfg.SetDiskID(disk, snode)
3141 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3143 self.LogWarning("Could not remove block device %s on node %s,"
3144 " continuing anyway: %s", disk.iv_name, snode, msg)
3146 feedback_fn("Removing unneeded volumes on the primary node...")
3147 for idx, disk in enumerate(old_disks):
3148 meta = disk.children[1]
3149 self.cfg.SetDiskID(meta, pnode)
3150 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3152 self.LogWarning("Could not remove metadata for disk %d on node %s,"
3153 " continuing anyway: %s", idx, pnode, msg)
3155 def _CreateNewDisk(self, idx, params, _):
3156 """Creates a new disk.
3159 instance = self.instance
3162 if instance.disk_template in constants.DTS_FILEBASED:
3163 (file_driver, file_path) = instance.disks[0].logical_id
3164 file_path = os.path.dirname(file_path)
3166 file_driver = file_path = None
3169 GenerateDiskTemplate(self, instance.disk_template, instance.name,
3170 instance.primary_node, instance.secondary_nodes,
3171 [params], file_path, file_driver, idx,
3172 self.Log, self.diskparams)[0]
3174 new_disks = CreateDisks(self, instance, disks=[disk])
3176 if self.cluster.prealloc_wipe_disks:
3178 WipeOrCleanupDisks(self, instance,
3179 disks=[(idx, disk, 0)],
3183 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3186 def _ModifyDisk(self, idx, disk, params, _):
3191 if constants.IDISK_MODE in params:
3192 disk.mode = params.get(constants.IDISK_MODE)
3193 changes.append(("disk.mode/%d" % idx, disk.mode))
3195 if constants.IDISK_NAME in params:
3196 disk.name = params.get(constants.IDISK_NAME)
3197 changes.append(("disk.name/%d" % idx, disk.name))
3199 # Modify arbitrary params in case instance template is ext
3200 for key, value in params.iteritems():
3201 if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3202 self.instance.disk_template == constants.DT_EXT):
3203 # stolen from GetUpdatedParams: default means reset/delete
3204 if value.lower() == constants.VALUE_DEFAULT:
3206 del disk.params[key]
3210 disk.params[key] = value
3211 changes.append(("disk.params:%s/%d" % (key, idx), value))
3215 def _RemoveDisk(self, idx, root, _):
3219 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3220 for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3221 self.cfg.SetDiskID(disk, node)
3222 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3224 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3225 " continuing anyway", idx, node, msg)
3227 # if this is a DRBD disk, return its port to the pool
3228 if root.dev_type in constants.LDS_DRBD:
3229 self.cfg.AddTcpUdpPort(root.logical_id[2])
3231 def _CreateNewNic(self, idx, params, private):
3232 """Creates data structure for a new network interface.
3235 mac = params[constants.INIC_MAC]
3236 ip = params.get(constants.INIC_IP, None)
3237 net = params.get(constants.INIC_NETWORK, None)
3238 name = params.get(constants.INIC_NAME, None)
3239 net_uuid = self.cfg.LookupNetwork(net)
3240 #TODO: not private.filled?? can a nic have no nicparams??
3241 nicparams = private.filled
3242 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3243 nicparams=nicparams)
3244 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3248 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3249 (mac, ip, private.filled[constants.NIC_MODE],
3250 private.filled[constants.NIC_LINK],
3254 def _ApplyNicMods(self, idx, nic, params, private):
3255 """Modifies a network interface.
3260 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3262 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3263 setattr(nic, key, params[key])
3265 new_net = params.get(constants.INIC_NETWORK, nic.network)
3266 new_net_uuid = self.cfg.LookupNetwork(new_net)
3267 if new_net_uuid != nic.network:
3268 changes.append(("nic.network/%d" % idx, new_net))
3269 nic.network = new_net_uuid
3272 nic.nicparams = private.filled
3274 for (key, val) in nic.nicparams.items():
3275 changes.append(("nic.%s/%d" % (key, idx), val))
3279 def Exec(self, feedback_fn):
3280 """Modifies an instance.
3282 All parameters take effect only at the next restart of the instance.
3285 # Process here the warnings from CheckPrereq, as we don't have a
3286 # feedback_fn there.
3287 # TODO: Replace with self.LogWarning
3288 for warn in self.warn:
3289 feedback_fn("WARNING: %s" % warn)
3291 assert ((self.op.disk_template is None) ^
3292 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3293 "Not owning any node resource locks"
3296 instance = self.instance
3300 instance.primary_node = self.op.pnode
3303 if self.op.runtime_mem:
3304 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3306 self.op.runtime_mem)
3307 rpcres.Raise("Cannot modify instance runtime memory")
3308 result.append(("runtime_memory", self.op.runtime_mem))
3310 # Apply disk changes
3311 _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3312 self._CreateNewDisk, self._ModifyDisk,
3314 _UpdateIvNames(0, instance.disks)
3316 if self.op.disk_template:
3318 check_nodes = set(instance.all_nodes)
3319 if self.op.remote_node:
3320 check_nodes.add(self.op.remote_node)
3321 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3322 owned = self.owned_locks(level)
3323 assert not (check_nodes - owned), \
3324 ("Not owning the correct locks, owning %r, expected at least %r" %
3325 (owned, check_nodes))
3327 r_shut = ShutdownInstanceDisks(self, instance)
3329 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3330 " proceed with disk template conversion")
3331 mode = (instance.disk_template, self.op.disk_template)
3333 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3335 self.cfg.ReleaseDRBDMinors(instance.name)
3337 result.append(("disk_template", self.op.disk_template))
3339 assert instance.disk_template == self.op.disk_template, \
3340 ("Expected disk template '%s', found '%s'" %
3341 (self.op.disk_template, instance.disk_template))
3343 # Release node and resource locks if there are any (they might already have
3344 # been released during disk conversion)
3345 ReleaseLocks(self, locking.LEVEL_NODE)
3346 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3349 if self._new_nics is not None:
3350 instance.nics = self._new_nics
3351 result.extend(self._nic_chgdesc)
3354 if self.op.hvparams:
3355 instance.hvparams = self.hv_inst
3356 for key, val in self.op.hvparams.iteritems():
3357 result.append(("hv/%s" % key, val))
3360 if self.op.beparams:
3361 instance.beparams = self.be_inst
3362 for key, val in self.op.beparams.iteritems():
3363 result.append(("be/%s" % key, val))
3367 instance.os = self.op.os_name
3370 if self.op.osparams:
3371 instance.osparams = self.os_inst
3372 for key, val in self.op.osparams.iteritems():
3373 result.append(("os/%s" % key, val))
3375 if self.op.offline is None:
3378 elif self.op.offline:
3379 # Mark instance as offline
3380 self.cfg.MarkInstanceOffline(instance.name)
3381 result.append(("admin_state", constants.ADMINST_OFFLINE))
3383 # Mark instance as online, but stopped
3384 self.cfg.MarkInstanceDown(instance.name)
3385 result.append(("admin_state", constants.ADMINST_DOWN))
3387 self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3389 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3390 self.owned_locks(locking.LEVEL_NODE)), \
3391 "All node locks should have been released by now"
3395 _DISK_CONVERSIONS = {
3396 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3397 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3401 class LUInstanceChangeGroup(LogicalUnit):
3402 HPATH = "instance-change-group"
3403 HTYPE = constants.HTYPE_INSTANCE
3406 def ExpandNames(self):
3407 self.share_locks = ShareAll()
3409 self.needed_locks = {
3410 locking.LEVEL_NODEGROUP: [],
3411 locking.LEVEL_NODE: [],
3412 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3415 self._ExpandAndLockInstance()
3417 if self.op.target_groups:
3418 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3419 self.op.target_groups)
3421 self.req_target_uuids = None
3423 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3425 def DeclareLocks(self, level):
3426 if level == locking.LEVEL_NODEGROUP:
3427 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3429 if self.req_target_uuids:
3430 lock_groups = set(self.req_target_uuids)
3432 # Lock all groups used by instance optimistically; this requires going
3433 # via the node before it's locked, requiring verification later on
3434 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3435 lock_groups.update(instance_groups)
3437 # No target groups, need to lock all of them
3438 lock_groups = locking.ALL_SET
3440 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3442 elif level == locking.LEVEL_NODE:
3443 if self.req_target_uuids:
3444 # Lock all nodes used by instances
3445 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3446 self._LockInstancesNodes()
3448 # Lock all nodes in all potential target groups
3449 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3450 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3451 member_nodes = [node_name
3452 for group in lock_groups
3453 for node_name in self.cfg.GetNodeGroup(group).members]
3454 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3456 # Lock all nodes as all groups are potential targets
3457 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3459 def CheckPrereq(self):
3460 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3461 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3462 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3464 assert (self.req_target_uuids is None or
3465 owned_groups.issuperset(self.req_target_uuids))
3466 assert owned_instances == set([self.op.instance_name])
3468 # Get instance information
3469 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3471 # Check if node groups for locked instance are still correct
3472 assert owned_nodes.issuperset(self.instance.all_nodes), \
3473 ("Instance %s's nodes changed while we kept the lock" %
3474 self.op.instance_name)
3476 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3479 if self.req_target_uuids:
3480 # User requested specific target groups
3481 self.target_uuids = frozenset(self.req_target_uuids)
3483 # All groups except those used by the instance are potential targets
3484 self.target_uuids = owned_groups - inst_groups
3486 conflicting_groups = self.target_uuids & inst_groups
3487 if conflicting_groups:
3488 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3489 " used by the instance '%s'" %
3490 (utils.CommaJoin(conflicting_groups),
3491 self.op.instance_name),
3494 if not self.target_uuids:
3495 raise errors.OpPrereqError("There are no possible target groups",
3498 def BuildHooksEnv(self):
3502 assert self.target_uuids
3505 "TARGET_GROUPS": " ".join(self.target_uuids),
3508 env.update(BuildInstanceHookEnvByObject(self, self.instance))
3512 def BuildHooksNodes(self):
3513 """Build hooks nodes.
3516 mn = self.cfg.GetMasterNode()
3519 def Exec(self, feedback_fn):
3520 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3522 assert instances == [self.op.instance_name], "Instance not locked"
3524 req = iallocator.IAReqGroupChange(instances=instances,
3525 target_groups=list(self.target_uuids))
3526 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3528 ial.Run(self.op.iallocator)
3531 raise errors.OpPrereqError("Can't compute solution for changing group of"
3532 " instance '%s' using iallocator '%s': %s" %
3533 (self.op.instance_name, self.op.iallocator,
3534 ial.info), errors.ECODE_NORES)
3536 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3538 self.LogInfo("Iallocator returned %s job(s) for changing group of"
3539 " instance '%s'", len(jobs), self.op.instance_name)
3541 return ResultWithJobs(jobs)