4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Logical units dealing with instances."""
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58 CheckSpindlesExclusiveStorage
59 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66 import ganeti.masterd.instance
69 #: Type description for changes as returned by L{_ApplyContainerMods}'s
71 _TApplyContModsCbChanges = \
72 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
78 def _CheckHostnameSane(lu, name):
79 """Ensures that a given hostname resolves to a 'sane' name.
81 The given name is required to be a prefix of the resolved hostname,
82 to prevent accidental mismatches.
84 @param lu: the logical unit on behalf of which we're checking
85 @param name: the name we should resolve and check
86 @return: the resolved hostname object
89 hostname = netutils.GetHostname(name=name)
90 if hostname.name != name:
91 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92 if not utils.MatchNameComponent(name, [hostname.name]):
93 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94 " same as given hostname '%s'") %
95 (hostname.name, name), errors.ECODE_INVAL)
99 def _CheckOpportunisticLocking(op):
100 """Generate error if opportunistic locking is not possible.
103 if op.opportunistic_locking and not op.iallocator:
104 raise errors.OpPrereqError("Opportunistic locking is only available in"
105 " combination with an instance allocator",
109 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110 """Wrapper around IAReqInstanceAlloc.
112 @param op: The instance opcode
113 @param disks: The computed disks
114 @param nics: The computed nics
115 @param beparams: The full filled beparams
116 @param node_name_whitelist: List of nodes which should appear as online to the
117 allocator (unless the node is already marked offline)
119 @returns: A filled L{iallocator.IAReqInstanceAlloc}
122 spindle_use = beparams[constants.BE_SPINDLE_USE]
123 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124 disk_template=op.disk_template,
127 vcpus=beparams[constants.BE_VCPUS],
128 memory=beparams[constants.BE_MAXMEM],
129 spindle_use=spindle_use,
131 nics=[n.ToDict() for n in nics],
132 hypervisor=op.hypervisor,
133 node_whitelist=node_name_whitelist)
136 def _ComputeFullBeParams(op, cluster):
137 """Computes the full beparams.
139 @param op: The instance opcode
140 @param cluster: The cluster config object
142 @return: The fully filled beparams
145 default_beparams = cluster.beparams[constants.PP_DEFAULT]
146 for param, value in op.beparams.iteritems():
147 if value == constants.VALUE_AUTO:
148 op.beparams[param] = default_beparams[param]
149 objects.UpgradeBeParams(op.beparams)
150 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151 return cluster.SimpleFillBE(op.beparams)
154 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155 """Computes the nics.
157 @param op: The instance opcode
158 @param cluster: Cluster configuration object
159 @param default_ip: The default ip to assign
160 @param cfg: An instance of the configuration object
161 @param ec_id: Execution context ID
163 @returns: The build up nics
168 nic_mode_req = nic.get(constants.INIC_MODE, None)
169 nic_mode = nic_mode_req
170 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173 net = nic.get(constants.INIC_NETWORK, None)
174 link = nic.get(constants.NIC_LINK, None)
175 ip = nic.get(constants.INIC_IP, None)
177 if net is None or net.lower() == constants.VALUE_NONE:
180 if nic_mode_req is not None or link is not None:
181 raise errors.OpPrereqError("If network is given, no mode or link"
182 " is allowed to be passed",
186 if ip is None or ip.lower() == constants.VALUE_NONE:
188 elif ip.lower() == constants.VALUE_AUTO:
189 if not op.name_check:
190 raise errors.OpPrereqError("IP address set to auto but name checks"
191 " have been skipped",
195 # We defer pool operations until later, so that the iallocator has
196 # filled in the instance's node(s) dimara
197 if ip.lower() == constants.NIC_IP_POOL:
199 raise errors.OpPrereqError("if ip=pool, parameter network"
200 " must be passed too",
203 elif not netutils.IPAddress.IsValid(ip):
204 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
209 # TODO: check the ip address for uniqueness
210 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
211 raise errors.OpPrereqError("Routed nic mode requires an ip address",
214 # MAC address verification
215 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
216 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
217 mac = utils.NormalizeAndValidateMac(mac)
220 # TODO: We need to factor this out
221 cfg.ReserveMAC(mac, ec_id)
222 except errors.ReservationError:
223 raise errors.OpPrereqError("MAC address %s already in use"
225 errors.ECODE_NOTUNIQUE)
227 # Build nic parameters
230 nicparams[constants.NIC_MODE] = nic_mode
232 nicparams[constants.NIC_LINK] = link
234 check_params = cluster.SimpleFillNIC(nicparams)
235 objects.NIC.CheckParameterSyntax(check_params)
236 net_uuid = cfg.LookupNetwork(net)
237 name = nic.get(constants.INIC_NAME, None)
238 if name is not None and name.lower() == constants.VALUE_NONE:
240 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
241 network=net_uuid, nicparams=nicparams)
242 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
248 def _CheckForConflictingIp(lu, ip, node_uuid):
249 """In case of conflicting IP address raise error.
252 @param ip: IP address
253 @type node_uuid: string
254 @param node_uuid: node UUID
257 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
258 if conf_net is not None:
259 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
260 " network %s, but the target NIC does not." %
267 def _ComputeIPolicyInstanceSpecViolation(
268 ipolicy, instance_spec, disk_template,
269 _compute_fn=ComputeIPolicySpecViolation):
270 """Compute if instance specs meets the specs of ipolicy.
273 @param ipolicy: The ipolicy to verify against
274 @param instance_spec: dict
275 @param instance_spec: The instance spec to verify
276 @type disk_template: string
277 @param disk_template: the disk template of the instance
278 @param _compute_fn: The function to verify ipolicy (unittest only)
279 @see: L{ComputeIPolicySpecViolation}
282 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
283 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
284 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
285 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
286 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
287 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
289 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
290 disk_sizes, spindle_use, disk_template)
293 def _CheckOSVariant(os_obj, name):
294 """Check whether an OS name conforms to the os variants specification.
296 @type os_obj: L{objects.OS}
297 @param os_obj: OS object to check
299 @param name: OS name passed by the user, to check for validity
302 variant = objects.OS.GetVariant(name)
303 if not os_obj.supported_variants:
305 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
306 " passed)" % (os_obj.name, variant),
310 raise errors.OpPrereqError("OS name must include a variant",
313 if variant not in os_obj.supported_variants:
314 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
317 class LUInstanceCreate(LogicalUnit):
318 """Create an instance.
321 HPATH = "instance-add"
322 HTYPE = constants.HTYPE_INSTANCE
325 def _CheckDiskTemplateValid(self):
326 """Checks validity of disk template.
329 cluster = self.cfg.GetClusterInfo()
330 if self.op.disk_template is None:
331 # FIXME: It would be better to take the default disk template from the
332 # ipolicy, but for the ipolicy we need the primary node, which we get from
333 # the iallocator, which wants the disk template as input. To solve this
334 # chicken-and-egg problem, it should be possible to specify just a node
335 # group from the iallocator and take the ipolicy from that.
336 self.op.disk_template = cluster.enabled_disk_templates[0]
337 if not self.op.disk_template in cluster.enabled_disk_templates:
338 raise errors.OpPrereqError("Cannot create an instance with disk template"
339 " '%s', because it is not enabled in the"
340 " cluster. Enabled disk templates are: %s." %
341 (self.op.disk_template,
342 ",".join(cluster.enabled_disk_templates)))
344 def _CheckDiskArguments(self):
345 """Checks validity of disk-related arguments.
348 # check that disk's names are unique and valid
349 utils.ValidateDeviceNames("disk", self.op.disks)
351 self._CheckDiskTemplateValid()
353 # check disks. parameter names and consistent adopt/no-adopt strategy
354 has_adopt = has_no_adopt = False
355 for disk in self.op.disks:
356 if self.op.disk_template != constants.DT_EXT:
357 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
358 if constants.IDISK_ADOPT in disk:
362 if has_adopt and has_no_adopt:
363 raise errors.OpPrereqError("Either all disks are adopted or none is",
366 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
367 raise errors.OpPrereqError("Disk adoption is not supported for the"
368 " '%s' disk template" %
369 self.op.disk_template,
371 if self.op.iallocator is not None:
372 raise errors.OpPrereqError("Disk adoption not allowed with an"
373 " iallocator script", errors.ECODE_INVAL)
374 if self.op.mode == constants.INSTANCE_IMPORT:
375 raise errors.OpPrereqError("Disk adoption not allowed for"
376 " instance import", errors.ECODE_INVAL)
378 if self.op.disk_template in constants.DTS_MUST_ADOPT:
379 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
380 " but no 'adopt' parameter given" %
381 self.op.disk_template,
384 self.adopt_disks = has_adopt
386 def CheckArguments(self):
390 # do not require name_check to ease forward/backward compatibility
392 if self.op.no_install and self.op.start:
393 self.LogInfo("No-installation mode selected, disabling startup")
394 self.op.start = False
395 # validate/normalize the instance name
396 self.op.instance_name = \
397 netutils.Hostname.GetNormalizedName(self.op.instance_name)
399 if self.op.ip_check and not self.op.name_check:
400 # TODO: make the ip check more flexible and not depend on the name check
401 raise errors.OpPrereqError("Cannot do IP address check without a name"
402 " check", errors.ECODE_INVAL)
404 # check nics' parameter names
405 for nic in self.op.nics:
406 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
407 # check that NIC's parameters names are unique and valid
408 utils.ValidateDeviceNames("NIC", self.op.nics)
410 self._CheckDiskArguments()
412 # instance name verification
413 if self.op.name_check:
414 self.hostname = _CheckHostnameSane(self, self.op.instance_name)
415 self.op.instance_name = self.hostname.name
416 # used in CheckPrereq for ip ping check
417 self.check_ip = self.hostname.ip
421 # file storage checks
422 if (self.op.file_driver and
423 not self.op.file_driver in constants.FILE_DRIVER):
424 raise errors.OpPrereqError("Invalid file driver name '%s'" %
425 self.op.file_driver, errors.ECODE_INVAL)
427 if self.op.disk_template == constants.DT_FILE:
428 opcodes.RequireFileStorage()
429 elif self.op.disk_template == constants.DT_SHARED_FILE:
430 opcodes.RequireSharedFileStorage()
432 ### Node/iallocator related checks
433 CheckIAllocatorOrNode(self, "iallocator", "pnode")
435 if self.op.pnode is not None:
436 if self.op.disk_template in constants.DTS_INT_MIRROR:
437 if self.op.snode is None:
438 raise errors.OpPrereqError("The networked disk templates need"
439 " a mirror node", errors.ECODE_INVAL)
441 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
445 _CheckOpportunisticLocking(self.op)
447 self._cds = GetClusterDomainSecret()
449 if self.op.mode == constants.INSTANCE_IMPORT:
450 # On import force_variant must be True, because if we forced it at
451 # initial install, our only chance when importing it back is that it
453 self.op.force_variant = True
455 if self.op.no_install:
456 self.LogInfo("No-installation mode has no effect during import")
458 elif self.op.mode == constants.INSTANCE_CREATE:
459 if self.op.os_type is None:
460 raise errors.OpPrereqError("No guest OS specified",
462 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
463 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
464 " installation" % self.op.os_type,
466 if self.op.disk_template is None:
467 raise errors.OpPrereqError("No disk template specified",
470 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
471 # Check handshake to ensure both clusters have the same domain secret
472 src_handshake = self.op.source_handshake
473 if not src_handshake:
474 raise errors.OpPrereqError("Missing source handshake",
477 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
480 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
483 # Load and check source CA
484 self.source_x509_ca_pem = self.op.source_x509_ca
485 if not self.source_x509_ca_pem:
486 raise errors.OpPrereqError("Missing source X509 CA",
490 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
492 except OpenSSL.crypto.Error, err:
493 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
494 (err, ), errors.ECODE_INVAL)
496 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
497 if errcode is not None:
498 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
501 self.source_x509_ca = cert
503 src_instance_name = self.op.source_instance_name
504 if not src_instance_name:
505 raise errors.OpPrereqError("Missing source instance name",
508 self.source_instance_name = \
509 netutils.GetHostname(name=src_instance_name).name
512 raise errors.OpPrereqError("Invalid instance creation mode %r" %
513 self.op.mode, errors.ECODE_INVAL)
515 def ExpandNames(self):
516 """ExpandNames for CreateInstance.
518 Figure out the right locks for instance creation.
521 self.needed_locks = {}
523 # this is just a preventive check, but someone might still add this
524 # instance in the meantime, and creation will fail at lock-add time
525 if self.op.instance_name in\
526 [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
527 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
528 self.op.instance_name, errors.ECODE_EXISTS)
530 self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
532 if self.op.iallocator:
533 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
534 # specifying a group on instance creation and then selecting nodes from
536 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
537 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
539 if self.op.opportunistic_locking:
540 self.opportunistic_locks[locking.LEVEL_NODE] = True
541 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
543 (self.op.pnode_uuid, self.op.pnode) = \
544 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
545 nodelist = [self.op.pnode_uuid]
546 if self.op.snode is not None:
547 (self.op.snode_uuid, self.op.snode) = \
548 ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
549 nodelist.append(self.op.snode_uuid)
550 self.needed_locks[locking.LEVEL_NODE] = nodelist
552 # in case of import lock the source node too
553 if self.op.mode == constants.INSTANCE_IMPORT:
554 src_node = self.op.src_node
555 src_path = self.op.src_path
558 self.op.src_path = src_path = self.op.instance_name
561 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
562 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
563 self.op.src_node = None
564 if os.path.isabs(src_path):
565 raise errors.OpPrereqError("Importing an instance from a path"
566 " requires a source node option",
569 (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
570 ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
571 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
572 self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
573 if not os.path.isabs(src_path):
574 self.op.src_path = src_path = \
575 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
577 self.needed_locks[locking.LEVEL_NODE_RES] = \
578 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
580 def _RunAllocator(self):
581 """Run the allocator based on input opcode.
584 if self.op.opportunistic_locking:
585 # Only consider nodes for which a lock is held
586 node_name_whitelist = self.cfg.GetNodeNames(
587 self.owned_locks(locking.LEVEL_NODE))
589 node_name_whitelist = None
591 #TODO Export network to iallocator so that it chooses a pnode
592 # in a nodegroup that has the desired network connected to
593 req = _CreateInstanceAllocRequest(self.op, self.disks,
594 self.nics, self.be_full,
596 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
598 ial.Run(self.op.iallocator)
601 # When opportunistic locks are used only a temporary failure is generated
602 if self.op.opportunistic_locking:
603 ecode = errors.ECODE_TEMP_NORES
605 ecode = errors.ECODE_NORES
607 raise errors.OpPrereqError("Can't compute nodes using"
608 " iallocator '%s': %s" %
609 (self.op.iallocator, ial.info),
612 (self.op.pnode_uuid, self.op.pnode) = \
613 ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
614 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
615 self.op.instance_name, self.op.iallocator,
616 utils.CommaJoin(ial.result))
618 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
620 if req.RequiredNodes() == 2:
621 (self.op.snode_uuid, self.op.snode) = \
622 ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
624 def BuildHooksEnv(self):
627 This runs on master, primary and secondary nodes of the instance.
631 "ADD_MODE": self.op.mode,
633 if self.op.mode == constants.INSTANCE_IMPORT:
634 env["SRC_NODE"] = self.op.src_node
635 env["SRC_PATH"] = self.op.src_path
636 env["SRC_IMAGES"] = self.src_images
638 env.update(BuildInstanceHookEnv(
639 name=self.op.instance_name,
640 primary_node_name=self.op.pnode,
641 secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
642 status=self.op.start,
643 os_type=self.op.os_type,
644 minmem=self.be_full[constants.BE_MINMEM],
645 maxmem=self.be_full[constants.BE_MAXMEM],
646 vcpus=self.be_full[constants.BE_VCPUS],
647 nics=NICListToTuple(self, self.nics),
648 disk_template=self.op.disk_template,
649 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
650 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
651 for d in self.disks],
654 hypervisor_name=self.op.hypervisor,
660 def BuildHooksNodes(self):
661 """Build hooks nodes.
664 nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
667 def _ReadExportInfo(self):
668 """Reads the export information from disk.
670 It will override the opcode source node and path with the actual
671 information, if these two were not specified before.
673 @return: the export information
676 assert self.op.mode == constants.INSTANCE_IMPORT
678 if self.op.src_node_uuid is None:
679 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
680 exp_list = self.rpc.call_export_list(locked_nodes)
682 for node in exp_list:
683 if exp_list[node].fail_msg:
685 if self.op.src_path in exp_list[node].payload:
687 self.op.src_node = node
688 self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
689 self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
693 raise errors.OpPrereqError("No export found for relative path %s" %
694 self.op.src_path, errors.ECODE_INVAL)
696 CheckNodeOnline(self, self.op.src_node_uuid)
697 result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
698 result.Raise("No export or invalid export found in dir %s" %
701 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
702 if not export_info.has_section(constants.INISECT_EXP):
703 raise errors.ProgrammerError("Corrupted export config",
704 errors.ECODE_ENVIRON)
706 ei_version = export_info.get(constants.INISECT_EXP, "version")
707 if int(ei_version) != constants.EXPORT_VERSION:
708 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
709 (ei_version, constants.EXPORT_VERSION),
710 errors.ECODE_ENVIRON)
713 def _ReadExportParams(self, einfo):
714 """Use export parameters as defaults.
716 In case the opcode doesn't specify (as in override) some instance
717 parameters, then try to use them from the export information, if
721 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
723 if not self.op.disks:
725 # TODO: import the disk iv_name too
726 for idx in range(constants.MAX_DISKS):
727 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
728 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
729 disks.append({constants.IDISK_SIZE: disk_sz})
730 self.op.disks = disks
731 if not disks and self.op.disk_template != constants.DT_DISKLESS:
732 raise errors.OpPrereqError("No disk info specified and the export"
733 " is missing the disk information",
738 for idx in range(constants.MAX_NICS):
739 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
741 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
742 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
749 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
750 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
752 if (self.op.hypervisor is None and
753 einfo.has_option(constants.INISECT_INS, "hypervisor")):
754 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
756 if einfo.has_section(constants.INISECT_HYP):
757 # use the export parameters but do not override the ones
758 # specified by the user
759 for name, value in einfo.items(constants.INISECT_HYP):
760 if name not in self.op.hvparams:
761 self.op.hvparams[name] = value
763 if einfo.has_section(constants.INISECT_BEP):
764 # use the parameters, without overriding
765 for name, value in einfo.items(constants.INISECT_BEP):
766 if name not in self.op.beparams:
767 self.op.beparams[name] = value
768 # Compatibility for the old "memory" be param
769 if name == constants.BE_MEMORY:
770 if constants.BE_MAXMEM not in self.op.beparams:
771 self.op.beparams[constants.BE_MAXMEM] = value
772 if constants.BE_MINMEM not in self.op.beparams:
773 self.op.beparams[constants.BE_MINMEM] = value
775 # try to read the parameters old style, from the main section
776 for name in constants.BES_PARAMETERS:
777 if (name not in self.op.beparams and
778 einfo.has_option(constants.INISECT_INS, name)):
779 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
781 if einfo.has_section(constants.INISECT_OSP):
782 # use the parameters, without overriding
783 for name, value in einfo.items(constants.INISECT_OSP):
784 if name not in self.op.osparams:
785 self.op.osparams[name] = value
787 def _RevertToDefaults(self, cluster):
788 """Revert the instance parameters to the default values.
792 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
793 for name in self.op.hvparams.keys():
794 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
795 del self.op.hvparams[name]
797 be_defs = cluster.SimpleFillBE({})
798 for name in self.op.beparams.keys():
799 if name in be_defs and be_defs[name] == self.op.beparams[name]:
800 del self.op.beparams[name]
802 nic_defs = cluster.SimpleFillNIC({})
803 for nic in self.op.nics:
804 for name in constants.NICS_PARAMETERS:
805 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
808 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
809 for name in self.op.osparams.keys():
810 if name in os_defs and os_defs[name] == self.op.osparams[name]:
811 del self.op.osparams[name]
813 def _CalculateFileStorageDir(self):
814 """Calculate final instance file storage dir.
817 # file storage dir calculation/check
818 self.instance_file_storage_dir = None
819 if self.op.disk_template in constants.DTS_FILEBASED:
820 # build the full file storage dir path
823 if self.op.disk_template == constants.DT_SHARED_FILE:
824 get_fsd_fn = self.cfg.GetSharedFileStorageDir
826 get_fsd_fn = self.cfg.GetFileStorageDir
828 cfg_storagedir = get_fsd_fn()
829 if not cfg_storagedir:
830 raise errors.OpPrereqError("Cluster file storage dir not defined",
832 joinargs.append(cfg_storagedir)
834 if self.op.file_storage_dir is not None:
835 joinargs.append(self.op.file_storage_dir)
837 joinargs.append(self.op.instance_name)
839 # pylint: disable=W0142
840 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
842 def CheckPrereq(self): # pylint: disable=R0914
843 """Check prerequisites.
846 self._CalculateFileStorageDir()
848 if self.op.mode == constants.INSTANCE_IMPORT:
849 export_info = self._ReadExportInfo()
850 self._ReadExportParams(export_info)
851 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
853 self._old_instance_name = None
855 if (not self.cfg.GetVGName() and
856 self.op.disk_template not in constants.DTS_NOT_LVM):
857 raise errors.OpPrereqError("Cluster does not support lvm-based"
858 " instances", errors.ECODE_STATE)
860 if (self.op.hypervisor is None or
861 self.op.hypervisor == constants.VALUE_AUTO):
862 self.op.hypervisor = self.cfg.GetHypervisorType()
864 cluster = self.cfg.GetClusterInfo()
865 enabled_hvs = cluster.enabled_hypervisors
866 if self.op.hypervisor not in enabled_hvs:
867 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
869 (self.op.hypervisor, ",".join(enabled_hvs)),
873 for tag in self.op.tags:
874 objects.TaggableObject.ValidateTag(tag)
876 # check hypervisor parameter syntax (locally)
877 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
878 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
880 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
881 hv_type.CheckParameterSyntax(filled_hvp)
882 self.hv_full = filled_hvp
883 # check that we don't specify global parameters on an instance
884 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
885 "instance", "cluster")
887 # fill and remember the beparams dict
888 self.be_full = _ComputeFullBeParams(self.op, cluster)
890 # build os parameters
891 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
893 # now that hvp/bep are in final format, let's reset to defaults,
895 if self.op.identify_defaults:
896 self._RevertToDefaults(cluster)
899 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
902 # disk checks/pre-build
903 default_vg = self.cfg.GetVGName()
904 self.disks = ComputeDisks(self.op, default_vg)
906 if self.op.mode == constants.INSTANCE_IMPORT:
908 for idx in range(len(self.disks)):
909 option = "disk%d_dump" % idx
910 if export_info.has_option(constants.INISECT_INS, option):
911 # FIXME: are the old os-es, disk sizes, etc. useful?
912 export_name = export_info.get(constants.INISECT_INS, option)
913 image = utils.PathJoin(self.op.src_path, export_name)
914 disk_images.append(image)
916 disk_images.append(False)
918 self.src_images = disk_images
920 if self.op.instance_name == self._old_instance_name:
921 for idx, nic in enumerate(self.nics):
922 if nic.mac == constants.VALUE_AUTO:
923 nic_mac_ini = "nic%d_mac" % idx
924 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
926 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
928 # ip ping checks (we use the same ip that was resolved in ExpandNames)
930 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
931 raise errors.OpPrereqError("IP %s of instance %s already in use" %
932 (self.check_ip, self.op.instance_name),
933 errors.ECODE_NOTUNIQUE)
935 #### mac address generation
936 # By generating here the mac address both the allocator and the hooks get
937 # the real final mac address rather than the 'auto' or 'generate' value.
938 # There is a race condition between the generation and the instance object
939 # creation, which means that we know the mac is valid now, but we're not
940 # sure it will be when we actually add the instance. If things go bad
941 # adding the instance will abort because of a duplicate mac, and the
942 # creation job will fail.
943 for nic in self.nics:
944 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
945 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
949 if self.op.iallocator is not None:
952 # Release all unneeded node locks
953 keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
954 self.op.src_node_uuid])
955 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
956 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
957 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
959 assert (self.owned_locks(locking.LEVEL_NODE) ==
960 self.owned_locks(locking.LEVEL_NODE_RES)), \
961 "Node locks differ from node resource locks"
963 #### node related checks
966 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
967 assert self.pnode is not None, \
968 "Cannot retrieve locked node %s" % self.op.pnode_uuid
970 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
971 pnode.name, errors.ECODE_STATE)
973 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
974 pnode.name, errors.ECODE_STATE)
975 if not pnode.vm_capable:
976 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
977 " '%s'" % pnode.name, errors.ECODE_STATE)
979 self.secondaries = []
981 # Fill in any IPs from IP pools. This must happen here, because we need to
982 # know the nic's primary node, as specified by the iallocator
983 for idx, nic in enumerate(self.nics):
984 net_uuid = nic.network
985 if net_uuid is not None:
986 nobj = self.cfg.GetNetwork(net_uuid)
987 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
988 if netparams is None:
989 raise errors.OpPrereqError("No netparams found for network"
990 " %s. Propably not connected to"
991 " node's %s nodegroup" %
992 (nobj.name, self.pnode.name),
994 self.LogInfo("NIC/%d inherits netparams %s" %
995 (idx, netparams.values()))
996 nic.nicparams = dict(netparams)
997 if nic.ip is not None:
998 if nic.ip.lower() == constants.NIC_IP_POOL:
1000 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1001 except errors.ReservationError:
1002 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1003 " from the address pool" % idx,
1005 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1008 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1009 except errors.ReservationError:
1010 raise errors.OpPrereqError("IP address %s already in use"
1011 " or does not belong to network %s" %
1012 (nic.ip, nobj.name),
1013 errors.ECODE_NOTUNIQUE)
1015 # net is None, ip None or given
1016 elif self.op.conflicts_check:
1017 _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1019 # mirror node verification
1020 if self.op.disk_template in constants.DTS_INT_MIRROR:
1021 if self.op.snode_uuid == pnode.uuid:
1022 raise errors.OpPrereqError("The secondary node cannot be the"
1023 " primary node", errors.ECODE_INVAL)
1024 CheckNodeOnline(self, self.op.snode_uuid)
1025 CheckNodeNotDrained(self, self.op.snode_uuid)
1026 CheckNodeVmCapable(self, self.op.snode_uuid)
1027 self.secondaries.append(self.op.snode_uuid)
1029 snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1030 if pnode.group != snode.group:
1031 self.LogWarning("The primary and secondary nodes are in two"
1032 " different node groups; the disk parameters"
1033 " from the first disk's node group will be"
1037 if self.op.disk_template in constants.DTS_INT_MIRROR:
1039 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1040 excl_stor = compat.any(map(has_es, nodes))
1041 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1042 raise errors.OpPrereqError("Disk template %s not supported with"
1043 " exclusive storage" % self.op.disk_template,
1045 for disk in self.disks:
1046 CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1048 node_uuids = [pnode.uuid] + self.secondaries
1050 if not self.adopt_disks:
1051 if self.op.disk_template == constants.DT_RBD:
1052 # _CheckRADOSFreeSpace() is just a placeholder.
1053 # Any function that checks prerequisites can be placed here.
1054 # Check if there is enough space on the RADOS cluster.
1055 CheckRADOSFreeSpace()
1056 elif self.op.disk_template == constants.DT_EXT:
1057 # FIXME: Function that checks prereqs if needed
1059 elif self.op.disk_template in utils.GetLvmDiskTemplates():
1060 # Check lv size requirements, if not adopting
1061 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1062 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1064 # FIXME: add checks for other, non-adopting, non-lvm disk templates
1067 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1068 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1069 disk[constants.IDISK_ADOPT])
1070 for disk in self.disks])
1071 if len(all_lvs) != len(self.disks):
1072 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1074 for lv_name in all_lvs:
1076 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1077 # to ReserveLV uses the same syntax
1078 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1079 except errors.ReservationError:
1080 raise errors.OpPrereqError("LV named %s used by another instance" %
1081 lv_name, errors.ECODE_NOTUNIQUE)
1083 vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1084 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1086 node_lvs = self.rpc.call_lv_list([pnode.uuid],
1087 vg_names.payload.keys())[pnode.uuid]
1088 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1089 node_lvs = node_lvs.payload
1091 delta = all_lvs.difference(node_lvs.keys())
1093 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1094 utils.CommaJoin(delta),
1096 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1098 raise errors.OpPrereqError("Online logical volumes found, cannot"
1099 " adopt: %s" % utils.CommaJoin(online_lvs),
1101 # update the size of disk based on what is found
1102 for dsk in self.disks:
1103 dsk[constants.IDISK_SIZE] = \
1104 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1105 dsk[constants.IDISK_ADOPT])][0]))
1107 elif self.op.disk_template == constants.DT_BLOCK:
1108 # Normalize and de-duplicate device paths
1109 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1110 for disk in self.disks])
1111 if len(all_disks) != len(self.disks):
1112 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1114 baddisks = [d for d in all_disks
1115 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1117 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1118 " cannot be adopted" %
1119 (utils.CommaJoin(baddisks),
1120 constants.ADOPTABLE_BLOCKDEV_ROOT),
1123 node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1124 list(all_disks))[pnode.uuid]
1125 node_disks.Raise("Cannot get block device information from node %s" %
1127 node_disks = node_disks.payload
1128 delta = all_disks.difference(node_disks.keys())
1130 raise errors.OpPrereqError("Missing block device(s): %s" %
1131 utils.CommaJoin(delta),
1133 for dsk in self.disks:
1134 dsk[constants.IDISK_SIZE] = \
1135 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1137 # Verify instance specs
1138 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1140 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1141 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1142 constants.ISPEC_DISK_COUNT: len(self.disks),
1143 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1144 for disk in self.disks],
1145 constants.ISPEC_NIC_COUNT: len(self.nics),
1146 constants.ISPEC_SPINDLE_USE: spindle_use,
1149 group_info = self.cfg.GetNodeGroup(pnode.group)
1150 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1151 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1152 self.op.disk_template)
1153 if not self.op.ignore_ipolicy and res:
1154 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1155 (pnode.group, group_info.name, utils.CommaJoin(res)))
1156 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1158 CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1160 CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1161 # check OS parameters (remotely)
1162 CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1164 CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1166 #TODO: _CheckExtParams (remotely)
1167 # Check parameters for extstorage
1169 # memory check on primary node
1170 #TODO(dynmem): use MINMEM for checking
1172 hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1174 CheckNodeFreeMemory(self, self.pnode.uuid,
1175 "creating instance %s" % self.op.instance_name,
1176 self.be_full[constants.BE_MAXMEM],
1177 self.op.hypervisor, hvfull)
1179 self.dry_run_result = list(node_uuids)
1181 def Exec(self, feedback_fn):
1182 """Create and add the instance to the cluster.
1185 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1186 self.owned_locks(locking.LEVEL_NODE)), \
1187 "Node locks differ from node resource locks"
1188 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1190 ht_kind = self.op.hypervisor
1191 if ht_kind in constants.HTS_REQ_PORT:
1192 network_port = self.cfg.AllocatePort()
1196 instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1198 # This is ugly but we got a chicken-egg problem here
1199 # We can only take the group disk parameters, as the instance
1200 # has no disks yet (we are generating them right here).
1201 nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1202 disks = GenerateDiskTemplate(self,
1203 self.op.disk_template,
1204 instance_uuid, self.pnode.uuid,
1207 self.instance_file_storage_dir,
1208 self.op.file_driver,
1211 self.cfg.GetGroupDiskParams(nodegroup))
1213 iobj = objects.Instance(name=self.op.instance_name,
1216 primary_node=self.pnode.uuid,
1217 nics=self.nics, disks=disks,
1218 disk_template=self.op.disk_template,
1220 admin_state=constants.ADMINST_DOWN,
1221 network_port=network_port,
1222 beparams=self.op.beparams,
1223 hvparams=self.op.hvparams,
1224 hypervisor=self.op.hypervisor,
1225 osparams=self.op.osparams,
1229 for tag in self.op.tags:
1232 if self.adopt_disks:
1233 if self.op.disk_template == constants.DT_PLAIN:
1234 # rename LVs to the newly-generated names; we need to construct
1235 # 'fake' LV disks with the old data, plus the new unique_id
1236 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1238 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1239 rename_to.append(t_dsk.logical_id)
1240 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1241 self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1242 result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1243 zip(tmp_disks, rename_to))
1244 result.Raise("Failed to rename adoped LVs")
1246 feedback_fn("* creating instance disks...")
1248 CreateDisks(self, iobj)
1249 except errors.OpExecError:
1250 self.LogWarning("Device creation failed")
1251 self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1254 feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1256 self.cfg.AddInstance(iobj, self.proc.GetECId())
1258 # Declare that we don't want to remove the instance lock anymore, as we've
1259 # added the instance to the config
1260 del self.remove_locks[locking.LEVEL_INSTANCE]
1262 if self.op.mode == constants.INSTANCE_IMPORT:
1263 # Release unused nodes
1264 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1267 ReleaseLocks(self, locking.LEVEL_NODE)
1270 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1271 feedback_fn("* wiping instance disks...")
1273 WipeDisks(self, iobj)
1274 except errors.OpExecError, err:
1275 logging.exception("Wiping disks failed")
1276 self.LogWarning("Wiping instance disks failed (%s)", err)
1280 # Something is already wrong with the disks, don't do anything else
1282 elif self.op.wait_for_sync:
1283 disk_abort = not WaitForSync(self, iobj)
1284 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1285 # make sure the disks are not degraded (still sync-ing is ok)
1286 feedback_fn("* checking mirrors status")
1287 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1292 RemoveDisks(self, iobj)
1293 self.cfg.RemoveInstance(iobj.uuid)
1294 # Make sure the instance lock gets removed
1295 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1296 raise errors.OpExecError("There are some degraded disks for"
1299 # instance disks are now active
1300 iobj.disks_active = True
1302 # Release all node resource locks
1303 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1305 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1306 # we need to set the disks ID to the primary node, since the
1307 # preceding code might or might have not done it, depending on
1308 # disk template and other options
1309 for disk in iobj.disks:
1310 self.cfg.SetDiskID(disk, self.pnode.uuid)
1311 if self.op.mode == constants.INSTANCE_CREATE:
1312 if not self.op.no_install:
1313 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1314 not self.op.wait_for_sync)
1316 feedback_fn("* pausing disk sync to install instance OS")
1317 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1320 for idx, success in enumerate(result.payload):
1322 logging.warn("pause-sync of instance %s for disk %d failed",
1323 self.op.instance_name, idx)
1325 feedback_fn("* running the instance OS create scripts...")
1326 # FIXME: pass debug option from opcode to backend
1328 self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1329 self.op.debug_level)
1331 feedback_fn("* resuming disk sync")
1332 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1335 for idx, success in enumerate(result.payload):
1337 logging.warn("resume-sync of instance %s for disk %d failed",
1338 self.op.instance_name, idx)
1340 os_add_result.Raise("Could not add os for instance %s"
1341 " on node %s" % (self.op.instance_name,
1345 if self.op.mode == constants.INSTANCE_IMPORT:
1346 feedback_fn("* running the instance OS import scripts...")
1350 for idx, image in enumerate(self.src_images):
1354 # FIXME: pass debug option from opcode to backend
1355 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1356 constants.IEIO_FILE, (image, ),
1357 constants.IEIO_SCRIPT,
1358 (iobj.disks[idx], idx),
1360 transfers.append(dt)
1363 masterd.instance.TransferInstanceData(self, feedback_fn,
1364 self.op.src_node_uuid,
1366 self.pnode.secondary_ip,
1368 if not compat.all(import_result):
1369 self.LogWarning("Some disks for instance %s on node %s were not"
1370 " imported successfully" % (self.op.instance_name,
1373 rename_from = self._old_instance_name
1375 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1376 feedback_fn("* preparing remote import...")
1377 # The source cluster will stop the instance before attempting to make
1378 # a connection. In some cases stopping an instance can take a long
1379 # time, hence the shutdown timeout is added to the connection
1381 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1382 self.op.source_shutdown_timeout)
1383 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1385 assert iobj.primary_node == self.pnode.uuid
1387 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1388 self.source_x509_ca,
1389 self._cds, timeouts)
1390 if not compat.all(disk_results):
1391 # TODO: Should the instance still be started, even if some disks
1392 # failed to import (valid for local imports, too)?
1393 self.LogWarning("Some disks for instance %s on node %s were not"
1394 " imported successfully" % (self.op.instance_name,
1397 rename_from = self.source_instance_name
1400 # also checked in the prereq part
1401 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1404 # Run rename script on newly imported instance
1405 assert iobj.name == self.op.instance_name
1406 feedback_fn("Running rename script for %s" % self.op.instance_name)
1407 result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1409 self.op.debug_level)
1410 result.Warn("Failed to run rename script for %s on node %s" %
1411 (self.op.instance_name, self.pnode.name), self.LogWarning)
1413 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1416 iobj.admin_state = constants.ADMINST_UP
1417 self.cfg.Update(iobj, feedback_fn)
1418 logging.info("Starting instance %s on node %s", self.op.instance_name,
1420 feedback_fn("* starting instance...")
1421 result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1422 False, self.op.reason)
1423 result.Raise("Could not start instance")
1425 return list(iobj.all_nodes)
1428 class LUInstanceRename(LogicalUnit):
1429 """Rename an instance.
1432 HPATH = "instance-rename"
1433 HTYPE = constants.HTYPE_INSTANCE
1435 def CheckArguments(self):
1439 if self.op.ip_check and not self.op.name_check:
1440 # TODO: make the ip check more flexible and not depend on the name check
1441 raise errors.OpPrereqError("IP address check requires a name check",
1444 def BuildHooksEnv(self):
1447 This runs on master, primary and secondary nodes of the instance.
1450 env = BuildInstanceHookEnvByObject(self, self.instance)
1451 env["INSTANCE_NEW_NAME"] = self.op.new_name
1454 def BuildHooksNodes(self):
1455 """Build hooks nodes.
1458 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1461 def CheckPrereq(self):
1462 """Check prerequisites.
1464 This checks that the instance is in the cluster and is not running.
1467 (self.op.instance_uuid, self.op.instance_name) = \
1468 ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1469 self.op.instance_name)
1470 instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1471 assert instance is not None
1472 CheckNodeOnline(self, instance.primary_node)
1473 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1474 msg="cannot rename")
1475 self.instance = instance
1477 new_name = self.op.new_name
1478 if self.op.name_check:
1479 hostname = _CheckHostnameSane(self, new_name)
1480 new_name = self.op.new_name = hostname.name
1481 if (self.op.ip_check and
1482 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1483 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1484 (hostname.ip, new_name),
1485 errors.ECODE_NOTUNIQUE)
1487 instance_names = [inst.name for
1488 inst in self.cfg.GetAllInstancesInfo().values()]
1489 if new_name in instance_names and new_name != instance.name:
1490 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1491 new_name, errors.ECODE_EXISTS)
1493 def Exec(self, feedback_fn):
1494 """Rename the instance.
1497 old_name = self.instance.name
1499 rename_file_storage = False
1500 if (self.instance.disk_template in constants.DTS_FILEBASED and
1501 self.op.new_name != self.instance.name):
1502 old_file_storage_dir = os.path.dirname(
1503 self.instance.disks[0].logical_id[1])
1504 rename_file_storage = True
1506 self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1507 # Change the instance lock. This is definitely safe while we hold the BGL.
1508 # Otherwise the new lock would have to be added in acquired mode.
1510 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1511 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1512 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1514 # re-read the instance from the configuration after rename
1515 renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1517 if rename_file_storage:
1518 new_file_storage_dir = os.path.dirname(
1519 renamed_inst.disks[0].logical_id[1])
1520 result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1521 old_file_storage_dir,
1522 new_file_storage_dir)
1523 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1524 " (but the instance has been renamed in Ganeti)" %
1525 (self.cfg.GetNodeName(renamed_inst.primary_node),
1526 old_file_storage_dir, new_file_storage_dir))
1528 StartInstanceDisks(self, renamed_inst, None)
1529 # update info on disks
1530 info = GetInstanceInfoText(renamed_inst)
1531 for (idx, disk) in enumerate(renamed_inst.disks):
1532 for node_uuid in renamed_inst.all_nodes:
1533 self.cfg.SetDiskID(disk, node_uuid)
1534 result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1535 result.Warn("Error setting info on node %s for disk %s" %
1536 (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1538 result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1539 renamed_inst, old_name,
1540 self.op.debug_level)
1541 result.Warn("Could not run OS rename script for instance %s on node %s"
1542 " (but the instance has been renamed in Ganeti)" %
1544 self.cfg.GetNodeName(renamed_inst.primary_node)),
1547 ShutdownInstanceDisks(self, renamed_inst)
1549 return renamed_inst.name
1552 class LUInstanceRemove(LogicalUnit):
1553 """Remove an instance.
1556 HPATH = "instance-remove"
1557 HTYPE = constants.HTYPE_INSTANCE
1560 def ExpandNames(self):
1561 self._ExpandAndLockInstance()
1562 self.needed_locks[locking.LEVEL_NODE] = []
1563 self.needed_locks[locking.LEVEL_NODE_RES] = []
1564 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1566 def DeclareLocks(self, level):
1567 if level == locking.LEVEL_NODE:
1568 self._LockInstancesNodes()
1569 elif level == locking.LEVEL_NODE_RES:
1571 self.needed_locks[locking.LEVEL_NODE_RES] = \
1572 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1574 def BuildHooksEnv(self):
1577 This runs on master, primary and secondary nodes of the instance.
1580 env = BuildInstanceHookEnvByObject(self, self.instance)
1581 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1584 def BuildHooksNodes(self):
1585 """Build hooks nodes.
1588 nl = [self.cfg.GetMasterNode()]
1589 nl_post = list(self.instance.all_nodes) + nl
1590 return (nl, nl_post)
1592 def CheckPrereq(self):
1593 """Check prerequisites.
1595 This checks that the instance is in the cluster.
1598 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1599 assert self.instance is not None, \
1600 "Cannot retrieve locked instance %s" % self.op.instance_name
1602 def Exec(self, feedback_fn):
1603 """Remove the instance.
1606 logging.info("Shutting down instance %s on node %s", self.instance.name,
1607 self.cfg.GetNodeName(self.instance.primary_node))
1609 result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1611 self.op.shutdown_timeout,
1613 if self.op.ignore_failures:
1614 result.Warn("Warning: can't shutdown instance", feedback_fn)
1616 result.Raise("Could not shutdown instance %s on node %s" %
1617 (self.instance.name,
1618 self.cfg.GetNodeName(self.instance.primary_node)))
1620 assert (self.owned_locks(locking.LEVEL_NODE) ==
1621 self.owned_locks(locking.LEVEL_NODE_RES))
1622 assert not (set(self.instance.all_nodes) -
1623 self.owned_locks(locking.LEVEL_NODE)), \
1624 "Not owning correct locks"
1626 RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1629 class LUInstanceMove(LogicalUnit):
1630 """Move an instance by data-copying.
1633 HPATH = "instance-move"
1634 HTYPE = constants.HTYPE_INSTANCE
1637 def ExpandNames(self):
1638 self._ExpandAndLockInstance()
1639 (self.op.target_node_uuid, self.op.target_node) = \
1640 ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1641 self.op.target_node)
1642 self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node]
1643 self.needed_locks[locking.LEVEL_NODE_RES] = []
1644 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1646 def DeclareLocks(self, level):
1647 if level == locking.LEVEL_NODE:
1648 self._LockInstancesNodes(primary_only=True)
1649 elif level == locking.LEVEL_NODE_RES:
1651 self.needed_locks[locking.LEVEL_NODE_RES] = \
1652 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1654 def BuildHooksEnv(self):
1657 This runs on master, primary and secondary nodes of the instance.
1661 "TARGET_NODE": self.op.target_node,
1662 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1664 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1667 def BuildHooksNodes(self):
1668 """Build hooks nodes.
1672 self.cfg.GetMasterNode(),
1673 self.instance.primary_node,
1674 self.op.target_node_uuid,
1678 def CheckPrereq(self):
1679 """Check prerequisites.
1681 This checks that the instance is in the cluster.
1684 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1685 assert self.instance is not None, \
1686 "Cannot retrieve locked instance %s" % self.op.instance_name
1688 if self.instance.disk_template not in constants.DTS_COPYABLE:
1689 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1690 self.instance.disk_template,
1693 target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1694 assert target_node is not None, \
1695 "Cannot retrieve locked node %s" % self.op.target_node
1697 self.target_node_uuid = target_node.uuid
1698 if target_node.uuid == self.instance.primary_node:
1699 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1700 (self.instance.name, target_node.name),
1703 bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1705 for idx, dsk in enumerate(self.instance.disks):
1706 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1707 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1708 " cannot copy" % idx, errors.ECODE_STATE)
1710 CheckNodeOnline(self, target_node.uuid)
1711 CheckNodeNotDrained(self, target_node.uuid)
1712 CheckNodeVmCapable(self, target_node.uuid)
1713 cluster = self.cfg.GetClusterInfo()
1714 group_info = self.cfg.GetNodeGroup(target_node.group)
1715 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1716 CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1717 ignore=self.op.ignore_ipolicy)
1719 if self.instance.admin_state == constants.ADMINST_UP:
1720 # check memory requirements on the secondary node
1721 CheckNodeFreeMemory(
1722 self, target_node.uuid, "failing over instance %s" %
1723 self.instance.name, bep[constants.BE_MAXMEM],
1724 self.instance.hypervisor,
1725 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1727 self.LogInfo("Not checking memory on the secondary node as"
1728 " instance will not be started")
1730 # check bridge existance
1731 CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1733 def Exec(self, feedback_fn):
1734 """Move an instance.
1736 The move is done by shutting it down on its present node, copying
1737 the data over (slow) and starting it on the new node.
1740 source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1741 target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1743 self.LogInfo("Shutting down instance %s on source node %s",
1744 self.instance.name, source_node.name)
1746 assert (self.owned_locks(locking.LEVEL_NODE) ==
1747 self.owned_locks(locking.LEVEL_NODE_RES))
1749 result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1750 self.op.shutdown_timeout,
1752 if self.op.ignore_consistency:
1753 result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1754 " anyway. Please make sure node %s is down. Error details" %
1755 (self.instance.name, source_node.name, source_node.name),
1758 result.Raise("Could not shutdown instance %s on node %s" %
1759 (self.instance.name, source_node.name))
1761 # create the target disks
1763 CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1764 except errors.OpExecError:
1765 self.LogWarning("Device creation failed")
1766 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1769 cluster_name = self.cfg.GetClusterInfo().cluster_name
1772 # activate, get path, copy the data over
1773 for idx, disk in enumerate(self.instance.disks):
1774 self.LogInfo("Copying data for disk %d", idx)
1775 result = self.rpc.call_blockdev_assemble(
1776 target_node.uuid, (disk, self.instance), self.instance.name,
1779 self.LogWarning("Can't assemble newly created disk %d: %s",
1780 idx, result.fail_msg)
1781 errs.append(result.fail_msg)
1783 dev_path = result.payload
1784 result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1786 target_node.name, dev_path,
1789 self.LogWarning("Can't copy data over for disk %d: %s",
1790 idx, result.fail_msg)
1791 errs.append(result.fail_msg)
1795 self.LogWarning("Some disks failed to copy, aborting")
1797 RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1799 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1800 raise errors.OpExecError("Errors during disk copy: %s" %
1803 self.instance.primary_node = target_node.uuid
1804 self.cfg.Update(self.instance, feedback_fn)
1806 self.LogInfo("Removing the disks on the original node")
1807 RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1809 # Only start the instance if it's marked as up
1810 if self.instance.admin_state == constants.ADMINST_UP:
1811 self.LogInfo("Starting instance %s on node %s",
1812 self.instance.name, target_node.name)
1814 disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1815 ignore_secondaries=True)
1817 ShutdownInstanceDisks(self, self.instance)
1818 raise errors.OpExecError("Can't activate the instance's disks")
1820 result = self.rpc.call_instance_start(target_node.uuid,
1821 (self.instance, None, None), False,
1823 msg = result.fail_msg
1825 ShutdownInstanceDisks(self, self.instance)
1826 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1827 (self.instance.name, target_node.name, msg))
1830 class LUInstanceMultiAlloc(NoHooksLU):
1831 """Allocates multiple instances at the same time.
1836 def CheckArguments(self):
1841 for inst in self.op.instances:
1842 if inst.iallocator is not None:
1843 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1844 " instance objects", errors.ECODE_INVAL)
1845 nodes.append(bool(inst.pnode))
1846 if inst.disk_template in constants.DTS_INT_MIRROR:
1847 nodes.append(bool(inst.snode))
1849 has_nodes = compat.any(nodes)
1850 if compat.all(nodes) ^ has_nodes:
1851 raise errors.OpPrereqError("There are instance objects providing"
1852 " pnode/snode while others do not",
1855 if self.op.iallocator is None:
1856 default_iallocator = self.cfg.GetDefaultIAllocator()
1857 if default_iallocator and has_nodes:
1858 self.op.iallocator = default_iallocator
1860 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1861 " given and no cluster-wide default"
1862 " iallocator found; please specify either"
1863 " an iallocator or nodes on the instances"
1864 " or set a cluster-wide default iallocator",
1867 _CheckOpportunisticLocking(self.op)
1869 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1871 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1872 utils.CommaJoin(dups), errors.ECODE_INVAL)
1874 def ExpandNames(self):
1875 """Calculate the locks.
1878 self.share_locks = ShareAll()
1879 self.needed_locks = {
1880 # iallocator will select nodes and even if no iallocator is used,
1881 # collisions with LUInstanceCreate should be avoided
1882 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1885 if self.op.iallocator:
1886 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1887 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1889 if self.op.opportunistic_locking:
1890 self.opportunistic_locks[locking.LEVEL_NODE] = True
1891 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1894 for inst in self.op.instances:
1895 (inst.pnode_uuid, inst.pnode) = \
1896 ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1897 nodeslist.append(inst.pnode)
1898 if inst.snode is not None:
1899 (inst.snode_uuid, inst.snode) = \
1900 ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1901 nodeslist.append(inst.snode)
1903 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1904 # Lock resources of instance's primary and secondary nodes (copy to
1905 # prevent accidential modification)
1906 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1908 def CheckPrereq(self):
1909 """Check prerequisite.
1912 cluster = self.cfg.GetClusterInfo()
1913 default_vg = self.cfg.GetVGName()
1914 ec_id = self.proc.GetECId()
1916 if self.op.opportunistic_locking:
1917 # Only consider nodes for which a lock is held
1918 node_whitelist = self.cfg.GetNodeNames(
1919 list(self.owned_locks(locking.LEVEL_NODE)))
1921 node_whitelist = None
1923 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1924 _ComputeNics(op, cluster, None,
1926 _ComputeFullBeParams(op, cluster),
1928 for op in self.op.instances]
1930 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1931 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1933 ial.Run(self.op.iallocator)
1936 raise errors.OpPrereqError("Can't compute nodes using"
1937 " iallocator '%s': %s" %
1938 (self.op.iallocator, ial.info),
1941 self.ia_result = ial.result
1944 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1945 constants.JOB_IDS_KEY: [],
1948 def _ConstructPartialResult(self):
1949 """Contructs the partial result.
1952 (allocatable, failed) = self.ia_result
1954 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1955 map(compat.fst, allocatable),
1956 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1959 def Exec(self, feedback_fn):
1960 """Executes the opcode.
1963 op2inst = dict((op.instance_name, op) for op in self.op.instances)
1964 (allocatable, failed) = self.ia_result
1967 for (name, node_names) in allocatable:
1968 op = op2inst.pop(name)
1970 (op.pnode_uuid, op.pnode) = \
1971 ExpandNodeUuidAndName(self.cfg, None, node_names[0])
1972 if len(node_names) > 1:
1973 (op.snode_uuid, op.snode) = \
1974 ExpandNodeUuidAndName(self.cfg, None, node_names[1])
1978 missing = set(op2inst.keys()) - set(failed)
1979 assert not missing, \
1980 "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1982 return ResultWithJobs(jobs, **self._ConstructPartialResult())
1985 class _InstNicModPrivate:
1986 """Data structure for network interface modifications.
1988 Used by L{LUInstanceSetParams}.
1996 def _PrepareContainerMods(mods, private_fn):
1997 """Prepares a list of container modifications by adding a private data field.
1999 @type mods: list of tuples; (operation, index, parameters)
2000 @param mods: List of modifications
2001 @type private_fn: callable or None
2002 @param private_fn: Callable for constructing a private data field for a
2007 if private_fn is None:
2012 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2015 def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2016 """Checks if nodes have enough physical CPUs
2018 This function checks if all given nodes have the needed number of
2019 physical CPUs. In case any node has less CPUs or we cannot get the
2020 information from the node, this function raises an OpPrereqError
2023 @type lu: C{LogicalUnit}
2024 @param lu: a logical unit from which we get configuration data
2025 @type node_uuids: C{list}
2026 @param node_uuids: the list of node UUIDs to check
2027 @type requested: C{int}
2028 @param requested: the minimum acceptable number of physical CPUs
2029 @type hypervisor_specs: list of pairs (string, dict of strings)
2030 @param hypervisor_specs: list of hypervisor specifications in
2031 pairs (hypervisor_name, hvparams)
2032 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2033 or we cannot check the node
2036 nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2037 for node_uuid in node_uuids:
2038 info = nodeinfo[node_uuid]
2039 node_name = lu.cfg.GetNodeName(node_uuid)
2040 info.Raise("Cannot get current information from node %s" % node_name,
2041 prereq=True, ecode=errors.ECODE_ENVIRON)
2042 (_, _, (hv_info, )) = info.payload
2043 num_cpus = hv_info.get("cpu_total", None)
2044 if not isinstance(num_cpus, int):
2045 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2046 " on node %s, result was '%s'" %
2047 (node_name, num_cpus), errors.ECODE_ENVIRON)
2048 if requested > num_cpus:
2049 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2050 "required" % (node_name, num_cpus, requested),
2054 def GetItemFromContainer(identifier, kind, container):
2055 """Return the item refered by the identifier.
2057 @type identifier: string
2058 @param identifier: Item index or name or UUID
2060 @param kind: One-word item description
2061 @type container: list
2062 @param container: Container to get the item from
2067 idx = int(identifier)
2070 absidx = len(container) - 1
2072 raise IndexError("Not accepting negative indices other than -1")
2073 elif idx > len(container):
2074 raise IndexError("Got %s index %s, but there are only %s" %
2075 (kind, idx, len(container)))
2078 return (absidx, container[idx])
2082 for idx, item in enumerate(container):
2083 if item.uuid == identifier or item.name == identifier:
2086 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2087 (kind, identifier), errors.ECODE_NOENT)
2090 def _ApplyContainerMods(kind, container, chgdesc, mods,
2091 create_fn, modify_fn, remove_fn):
2092 """Applies descriptions in C{mods} to C{container}.
2095 @param kind: One-word item description
2096 @type container: list
2097 @param container: Container to modify
2098 @type chgdesc: None or list
2099 @param chgdesc: List of applied changes
2101 @param mods: Modifications as returned by L{_PrepareContainerMods}
2102 @type create_fn: callable
2103 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2104 receives absolute item index, parameters and private data object as added
2105 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2107 @type modify_fn: callable
2108 @param modify_fn: Callback for modifying an existing item
2109 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2110 and private data object as added by L{_PrepareContainerMods}, returns
2112 @type remove_fn: callable
2113 @param remove_fn: Callback on removing item; receives absolute item index,
2114 item and private data object as added by L{_PrepareContainerMods}
2117 for (op, identifier, params, private) in mods:
2120 if op == constants.DDM_ADD:
2121 # Calculate where item will be added
2122 # When adding an item, identifier can only be an index
2124 idx = int(identifier)
2126 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2127 " identifier for %s" % constants.DDM_ADD,
2130 addidx = len(container)
2133 raise IndexError("Not accepting negative indices other than -1")
2134 elif idx > len(container):
2135 raise IndexError("Got %s index %s, but there are only %s" %
2136 (kind, idx, len(container)))
2139 if create_fn is None:
2142 (item, changes) = create_fn(addidx, params, private)
2145 container.append(item)
2148 assert idx <= len(container)
2149 # list.insert does so before the specified index
2150 container.insert(idx, item)
2152 # Retrieve existing item
2153 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2155 if op == constants.DDM_REMOVE:
2158 if remove_fn is not None:
2159 remove_fn(absidx, item, private)
2161 changes = [("%s/%s" % (kind, absidx), "remove")]
2163 assert container[absidx] == item
2164 del container[absidx]
2165 elif op == constants.DDM_MODIFY:
2166 if modify_fn is not None:
2167 changes = modify_fn(absidx, item, params, private)
2169 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2171 assert _TApplyContModsCbChanges(changes)
2173 if not (chgdesc is None or changes is None):
2174 chgdesc.extend(changes)
2177 def _UpdateIvNames(base_index, disks):
2178 """Updates the C{iv_name} attribute of disks.
2180 @type disks: list of L{objects.Disk}
2183 for (idx, disk) in enumerate(disks):
2184 disk.iv_name = "disk/%s" % (base_index + idx, )
2187 class LUInstanceSetParams(LogicalUnit):
2188 """Modifies an instances's parameters.
2191 HPATH = "instance-modify"
2192 HTYPE = constants.HTYPE_INSTANCE
2196 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2197 assert ht.TList(mods)
2198 assert not mods or len(mods[0]) in (2, 3)
2200 if mods and len(mods[0]) == 2:
2204 for op, params in mods:
2205 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2206 result.append((op, -1, params))
2210 raise errors.OpPrereqError("Only one %s add or remove operation is"
2211 " supported at a time" % kind,
2214 result.append((constants.DDM_MODIFY, op, params))
2216 assert verify_fn(result)
2223 def _CheckMods(kind, mods, key_types, item_fn):
2224 """Ensures requested disk/NIC modifications are valid.
2227 for (op, _, params) in mods:
2228 assert ht.TDict(params)
2230 # If 'key_types' is an empty dict, we assume we have an
2231 # 'ext' template and thus do not ForceDictType
2233 utils.ForceDictType(params, key_types)
2235 if op == constants.DDM_REMOVE:
2237 raise errors.OpPrereqError("No settings should be passed when"
2238 " removing a %s" % kind,
2240 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2243 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2246 def _VerifyDiskModification(op, params, excl_stor):
2247 """Verifies a disk modification.
2250 if op == constants.DDM_ADD:
2251 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2252 if mode not in constants.DISK_ACCESS_SET:
2253 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2256 size = params.get(constants.IDISK_SIZE, None)
2258 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2259 constants.IDISK_SIZE, errors.ECODE_INVAL)
2263 except (TypeError, ValueError), err:
2264 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2267 params[constants.IDISK_SIZE] = size
2268 name = params.get(constants.IDISK_NAME, None)
2269 if name is not None and name.lower() == constants.VALUE_NONE:
2270 params[constants.IDISK_NAME] = None
2272 CheckSpindlesExclusiveStorage(params, excl_stor, True)
2274 elif op == constants.DDM_MODIFY:
2275 if constants.IDISK_SIZE in params:
2276 raise errors.OpPrereqError("Disk size change not possible, use"
2277 " grow-disk", errors.ECODE_INVAL)
2279 raise errors.OpPrereqError("Disk modification doesn't support"
2280 " additional arbitrary parameters",
2282 name = params.get(constants.IDISK_NAME, None)
2283 if name is not None and name.lower() == constants.VALUE_NONE:
2284 params[constants.IDISK_NAME] = None
2287 def _VerifyNicModification(op, params):
2288 """Verifies a network interface modification.
2291 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2292 ip = params.get(constants.INIC_IP, None)
2293 name = params.get(constants.INIC_NAME, None)
2294 req_net = params.get(constants.INIC_NETWORK, None)
2295 link = params.get(constants.NIC_LINK, None)
2296 mode = params.get(constants.NIC_MODE, None)
2297 if name is not None and name.lower() == constants.VALUE_NONE:
2298 params[constants.INIC_NAME] = None
2299 if req_net is not None:
2300 if req_net.lower() == constants.VALUE_NONE:
2301 params[constants.INIC_NETWORK] = None
2303 elif link is not None or mode is not None:
2304 raise errors.OpPrereqError("If network is given"
2305 " mode or link should not",
2308 if op == constants.DDM_ADD:
2309 macaddr = params.get(constants.INIC_MAC, None)
2311 params[constants.INIC_MAC] = constants.VALUE_AUTO
2314 if ip.lower() == constants.VALUE_NONE:
2315 params[constants.INIC_IP] = None
2317 if ip.lower() == constants.NIC_IP_POOL:
2318 if op == constants.DDM_ADD and req_net is None:
2319 raise errors.OpPrereqError("If ip=pool, parameter network"
2323 if not netutils.IPAddress.IsValid(ip):
2324 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2327 if constants.INIC_MAC in params:
2328 macaddr = params[constants.INIC_MAC]
2329 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2330 macaddr = utils.NormalizeAndValidateMac(macaddr)
2332 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2333 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2334 " modifying an existing NIC",
2337 def CheckArguments(self):
2338 if not (self.op.nics or self.op.disks or self.op.disk_template or
2339 self.op.hvparams or self.op.beparams or self.op.os_name or
2340 self.op.offline is not None or self.op.runtime_mem or
2342 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2344 if self.op.hvparams:
2345 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2346 "hypervisor", "instance", "cluster")
2348 self.op.disks = self._UpgradeDiskNicMods(
2349 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2350 self.op.nics = self._UpgradeDiskNicMods(
2351 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2353 if self.op.disks and self.op.disk_template is not None:
2354 raise errors.OpPrereqError("Disk template conversion and other disk"
2355 " changes not supported at the same time",
2358 if (self.op.disk_template and
2359 self.op.disk_template in constants.DTS_INT_MIRROR and
2360 self.op.remote_node is None):
2361 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2362 " one requires specifying a secondary node",
2365 # Check NIC modifications
2366 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2367 self._VerifyNicModification)
2370 (self.op.pnode_uuid, self.op.pnode) = \
2371 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2373 def ExpandNames(self):
2374 self._ExpandAndLockInstance()
2375 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2376 # Can't even acquire node locks in shared mode as upcoming changes in
2377 # Ganeti 2.6 will start to modify the node object on disk conversion
2378 self.needed_locks[locking.LEVEL_NODE] = []
2379 self.needed_locks[locking.LEVEL_NODE_RES] = []
2380 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2381 # Look node group to look up the ipolicy
2382 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2384 def DeclareLocks(self, level):
2385 if level == locking.LEVEL_NODEGROUP:
2386 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2387 # Acquire locks for the instance's nodegroups optimistically. Needs
2388 # to be verified in CheckPrereq
2389 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2390 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2391 elif level == locking.LEVEL_NODE:
2392 self._LockInstancesNodes()
2393 if self.op.disk_template and self.op.remote_node:
2394 (self.op.remote_node_uuid, self.op.remote_node) = \
2395 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2396 self.op.remote_node)
2397 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2398 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2400 self.needed_locks[locking.LEVEL_NODE_RES] = \
2401 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2403 def BuildHooksEnv(self):
2406 This runs on the master, primary and secondaries.
2410 if constants.BE_MINMEM in self.be_new:
2411 args["minmem"] = self.be_new[constants.BE_MINMEM]
2412 if constants.BE_MAXMEM in self.be_new:
2413 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2414 if constants.BE_VCPUS in self.be_new:
2415 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2416 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2417 # information at all.
2419 if self._new_nics is not None:
2422 for nic in self._new_nics:
2423 n = copy.deepcopy(nic)
2424 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2425 n.nicparams = nicparams
2426 nics.append(NICToTuple(self, n))
2430 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2431 if self.op.disk_template:
2432 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2433 if self.op.runtime_mem:
2434 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2438 def BuildHooksNodes(self):
2439 """Build hooks nodes.
2442 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2445 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2446 old_params, cluster, pnode_uuid):
2448 update_params_dict = dict([(key, params[key])
2449 for key in constants.NICS_PARAMETERS
2452 req_link = update_params_dict.get(constants.NIC_LINK, None)
2453 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2456 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2457 if new_net_uuid_or_name:
2458 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2459 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2462 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2465 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2467 raise errors.OpPrereqError("No netparams found for the network"
2468 " %s, probably not connected" %
2469 new_net_obj.name, errors.ECODE_INVAL)
2470 new_params = dict(netparams)
2472 new_params = GetUpdatedParams(old_params, update_params_dict)
2474 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2476 new_filled_params = cluster.SimpleFillNIC(new_params)
2477 objects.NIC.CheckParameterSyntax(new_filled_params)
2479 new_mode = new_filled_params[constants.NIC_MODE]
2480 if new_mode == constants.NIC_MODE_BRIDGED:
2481 bridge = new_filled_params[constants.NIC_LINK]
2482 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2484 msg = "Error checking bridges on node '%s': %s" % \
2485 (self.cfg.GetNodeName(pnode_uuid), msg)
2487 self.warn.append(msg)
2489 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2491 elif new_mode == constants.NIC_MODE_ROUTED:
2492 ip = params.get(constants.INIC_IP, old_ip)
2494 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2495 " on a routed NIC", errors.ECODE_INVAL)
2497 elif new_mode == constants.NIC_MODE_OVS:
2498 # TODO: check OVS link
2499 self.LogInfo("OVS links are currently not checked for correctness")
2501 if constants.INIC_MAC in params:
2502 mac = params[constants.INIC_MAC]
2504 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2506 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2507 # otherwise generate the MAC address
2508 params[constants.INIC_MAC] = \
2509 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2511 # or validate/reserve the current one
2513 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2514 except errors.ReservationError:
2515 raise errors.OpPrereqError("MAC address '%s' already in use"
2516 " in cluster" % mac,
2517 errors.ECODE_NOTUNIQUE)
2518 elif new_net_uuid != old_net_uuid:
2520 def get_net_prefix(net_uuid):
2523 nobj = self.cfg.GetNetwork(net_uuid)
2524 mac_prefix = nobj.mac_prefix
2528 new_prefix = get_net_prefix(new_net_uuid)
2529 old_prefix = get_net_prefix(old_net_uuid)
2530 if old_prefix != new_prefix:
2531 params[constants.INIC_MAC] = \
2532 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2534 # if there is a change in (ip, network) tuple
2535 new_ip = params.get(constants.INIC_IP, old_ip)
2536 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2538 # if IP is pool then require a network and generate one IP
2539 if new_ip.lower() == constants.NIC_IP_POOL:
2542 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2543 except errors.ReservationError:
2544 raise errors.OpPrereqError("Unable to get a free IP"
2545 " from the address pool",
2547 self.LogInfo("Chose IP %s from network %s",
2550 params[constants.INIC_IP] = new_ip
2552 raise errors.OpPrereqError("ip=pool, but no network found",
2554 # Reserve new IP if in the new network if any
2557 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2558 self.LogInfo("Reserving IP %s in network %s",
2559 new_ip, new_net_obj.name)
2560 except errors.ReservationError:
2561 raise errors.OpPrereqError("IP %s not available in network %s" %
2562 (new_ip, new_net_obj.name),
2563 errors.ECODE_NOTUNIQUE)
2564 # new network is None so check if new IP is a conflicting IP
2565 elif self.op.conflicts_check:
2566 _CheckForConflictingIp(self, new_ip, pnode_uuid)
2568 # release old IP if old network is not None
2569 if old_ip and old_net_uuid:
2571 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2572 except errors.AddressPoolError:
2573 logging.warning("Release IP %s not contained in network %s",
2574 old_ip, old_net_obj.name)
2576 # there are no changes in (ip, network) tuple and old network is not None
2577 elif (old_net_uuid is not None and
2578 (req_link is not None or req_mode is not None)):
2579 raise errors.OpPrereqError("Not allowed to change link or mode of"
2580 " a NIC that is connected to a network",
2583 private.params = new_params
2584 private.filled = new_filled_params
2586 def _PreCheckDiskTemplate(self, pnode_info):
2587 """CheckPrereq checks related to a new disk template."""
2588 # Arguments are passed to avoid configuration lookups
2589 pnode_uuid = self.instance.primary_node
2590 if self.instance.disk_template == self.op.disk_template:
2591 raise errors.OpPrereqError("Instance already has disk template %s" %
2592 self.instance.disk_template,
2595 if (self.instance.disk_template,
2596 self.op.disk_template) not in self._DISK_CONVERSIONS:
2597 raise errors.OpPrereqError("Unsupported disk template conversion from"
2598 " %s to %s" % (self.instance.disk_template,
2599 self.op.disk_template),
2601 CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2602 msg="cannot change disk template")
2603 if self.op.disk_template in constants.DTS_INT_MIRROR:
2604 if self.op.remote_node_uuid == pnode_uuid:
2605 raise errors.OpPrereqError("Given new secondary node %s is the same"
2606 " as the primary node of the instance" %
2607 self.op.remote_node, errors.ECODE_STATE)
2608 CheckNodeOnline(self, self.op.remote_node_uuid)
2609 CheckNodeNotDrained(self, self.op.remote_node_uuid)
2610 # FIXME: here we assume that the old instance type is DT_PLAIN
2611 assert self.instance.disk_template == constants.DT_PLAIN
2612 disks = [{constants.IDISK_SIZE: d.size,
2613 constants.IDISK_VG: d.logical_id[0]}
2614 for d in self.instance.disks]
2615 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2616 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2618 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2619 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2620 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2622 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2623 ignore=self.op.ignore_ipolicy)
2624 if pnode_info.group != snode_info.group:
2625 self.LogWarning("The primary and secondary nodes are in two"
2626 " different node groups; the disk parameters"
2627 " from the first disk's node group will be"
2630 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2631 # Make sure none of the nodes require exclusive storage
2632 nodes = [pnode_info]
2633 if self.op.disk_template in constants.DTS_INT_MIRROR:
2635 nodes.append(snode_info)
2636 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2637 if compat.any(map(has_es, nodes)):
2638 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2639 " storage is enabled" % (self.instance.disk_template,
2640 self.op.disk_template))
2641 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2643 def _PreCheckDisks(self, ispec):
2644 """CheckPrereq checks related to disk changes.
2647 @param ispec: instance specs to be updated with the new disks
2650 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2652 excl_stor = compat.any(
2653 rpc.GetExclusiveStorageForNodes(self.cfg,
2654 self.instance.all_nodes).values()
2657 # Check disk modifications. This is done here and not in CheckArguments
2658 # (as with NICs), because we need to know the instance's disk template
2659 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2660 if self.instance.disk_template == constants.DT_EXT:
2661 self._CheckMods("disk", self.op.disks, {}, ver_fn)
2663 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2666 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2668 # Check the validity of the `provider' parameter
2669 if self.instance.disk_template in constants.DT_EXT:
2670 for mod in self.diskmod:
2671 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2672 if mod[0] == constants.DDM_ADD:
2673 if ext_provider is None:
2674 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2675 " '%s' missing, during disk add" %
2677 constants.IDISK_PROVIDER),
2679 elif mod[0] == constants.DDM_MODIFY:
2681 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2683 constants.IDISK_PROVIDER,
2686 for mod in self.diskmod:
2687 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2688 if ext_provider is not None:
2689 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2690 " instances of type '%s'" %
2691 (constants.IDISK_PROVIDER,
2695 if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2696 raise errors.OpPrereqError("Disk operations not supported for"
2697 " diskless instances", errors.ECODE_INVAL)
2699 def _PrepareDiskMod(_, disk, params, __):
2700 disk.name = params.get(constants.IDISK_NAME, None)
2702 # Verify disk changes (operating on a copy)
2703 disks = copy.deepcopy(self.instance.disks)
2704 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2705 _PrepareDiskMod, None)
2706 utils.ValidateDeviceNames("disk", disks)
2707 if len(disks) > constants.MAX_DISKS:
2708 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2709 " more" % constants.MAX_DISKS,
2711 disk_sizes = [disk.size for disk in self.instance.disks]
2712 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2713 self.diskmod if op == constants.DDM_ADD)
2714 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2715 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2717 if self.op.offline is not None and self.op.offline:
2718 CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2719 msg="can't change to offline")
2721 def CheckPrereq(self):
2722 """Check prerequisites.
2724 This only checks the instance list against the existing names.
2727 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2728 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2729 self.cluster = self.cfg.GetClusterInfo()
2731 assert self.instance is not None, \
2732 "Cannot retrieve locked instance %s" % self.op.instance_name
2734 pnode_uuid = self.instance.primary_node
2738 if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2740 # verify that the instance is not up
2741 instance_info = self.rpc.call_instance_info(
2742 pnode_uuid, self.instance.name, self.instance.hypervisor,
2743 self.instance.hvparams)
2744 if instance_info.fail_msg:
2745 self.warn.append("Can't get instance runtime information: %s" %
2746 instance_info.fail_msg)
2747 elif instance_info.payload:
2748 raise errors.OpPrereqError("Instance is still running on %s" %
2749 self.cfg.GetNodeName(pnode_uuid),
2752 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2753 node_uuids = list(self.instance.all_nodes)
2754 pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2756 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2757 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2758 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2760 # dictionary with instance information after the modification
2763 # Prepare NIC modifications
2764 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2767 if self.op.os_name and not self.op.force:
2768 CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2769 self.op.force_variant)
2770 instance_os = self.op.os_name
2772 instance_os = self.instance.os
2774 assert not (self.op.disk_template and self.op.disks), \
2775 "Can't modify disk template and apply disk changes at the same time"
2777 if self.op.disk_template:
2778 self._PreCheckDiskTemplate(pnode_info)
2780 self._PreCheckDisks(ispec)
2782 # hvparams processing
2783 if self.op.hvparams:
2784 hv_type = self.instance.hypervisor
2785 i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2786 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2787 hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2790 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2791 CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2792 self.hv_proposed = self.hv_new = hv_new # the new actual values
2793 self.hv_inst = i_hvdict # the new dict (without defaults)
2795 self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2797 self.instance.hvparams)
2798 self.hv_new = self.hv_inst = {}
2800 # beparams processing
2801 if self.op.beparams:
2802 i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2804 objects.UpgradeBeParams(i_bedict)
2805 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2806 be_new = self.cluster.SimpleFillBE(i_bedict)
2807 self.be_proposed = self.be_new = be_new # the new actual values
2808 self.be_inst = i_bedict # the new dict (without defaults)
2810 self.be_new = self.be_inst = {}
2811 self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2812 be_old = self.cluster.FillBE(self.instance)
2814 # CPU param validation -- checking every time a parameter is
2815 # changed to cover all cases where either CPU mask or vcpus have
2817 if (constants.BE_VCPUS in self.be_proposed and
2818 constants.HV_CPU_MASK in self.hv_proposed):
2820 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2821 # Verify mask is consistent with number of vCPUs. Can skip this
2822 # test if only 1 entry in the CPU mask, which means same mask
2823 # is applied to all vCPUs.
2824 if (len(cpu_list) > 1 and
2825 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2826 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2828 (self.be_proposed[constants.BE_VCPUS],
2829 self.hv_proposed[constants.HV_CPU_MASK]),
2832 # Only perform this test if a new CPU mask is given
2833 if constants.HV_CPU_MASK in self.hv_new:
2834 # Calculate the largest CPU number requested
2835 max_requested_cpu = max(map(max, cpu_list))
2836 # Check that all of the instance's nodes have enough physical CPUs to
2837 # satisfy the requested CPU mask
2838 hvspecs = [(self.instance.hypervisor,
2839 self.cfg.GetClusterInfo()
2840 .hvparams[self.instance.hypervisor])]
2841 _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2842 max_requested_cpu + 1,
2845 # osparams processing
2846 if self.op.osparams:
2847 i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2848 CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2849 self.os_inst = i_osdict # the new dict (without defaults)
2853 #TODO(dynmem): do the appropriate check involving MINMEM
2854 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2855 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2856 mem_check_list = [pnode_uuid]
2857 if be_new[constants.BE_AUTO_BALANCE]:
2858 # either we changed auto_balance to yes or it was from before
2859 mem_check_list.extend(self.instance.secondary_nodes)
2860 instance_info = self.rpc.call_instance_info(
2861 pnode_uuid, self.instance.name, self.instance.hypervisor,
2862 self.instance.hvparams)
2863 hvspecs = [(self.instance.hypervisor,
2864 self.cluster.hvparams[self.instance.hypervisor])]
2865 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2867 pninfo = nodeinfo[pnode_uuid]
2868 msg = pninfo.fail_msg
2870 # Assume the primary node is unreachable and go ahead
2871 self.warn.append("Can't get info from primary node %s: %s" %
2872 (self.cfg.GetNodeName(pnode_uuid), msg))
2874 (_, _, (pnhvinfo, )) = pninfo.payload
2875 if not isinstance(pnhvinfo.get("memory_free", None), int):
2876 self.warn.append("Node data from primary node %s doesn't contain"
2877 " free memory information" %
2878 self.cfg.GetNodeName(pnode_uuid))
2879 elif instance_info.fail_msg:
2880 self.warn.append("Can't get instance runtime information: %s" %
2881 instance_info.fail_msg)
2883 if instance_info.payload:
2884 current_mem = int(instance_info.payload["memory"])
2886 # Assume instance not running
2887 # (there is a slight race condition here, but it's not very
2888 # probable, and we have no other way to check)
2889 # TODO: Describe race condition
2891 #TODO(dynmem): do the appropriate check involving MINMEM
2892 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2893 pnhvinfo["memory_free"])
2895 raise errors.OpPrereqError("This change will prevent the instance"
2896 " from starting, due to %d MB of memory"
2897 " missing on its primary node" %
2898 miss_mem, errors.ECODE_NORES)
2900 if be_new[constants.BE_AUTO_BALANCE]:
2901 for node_uuid, nres in nodeinfo.items():
2902 if node_uuid not in self.instance.secondary_nodes:
2904 nres.Raise("Can't get info from secondary node %s" %
2905 self.cfg.GetNodeName(node_uuid), prereq=True,
2906 ecode=errors.ECODE_STATE)
2907 (_, _, (nhvinfo, )) = nres.payload
2908 if not isinstance(nhvinfo.get("memory_free", None), int):
2909 raise errors.OpPrereqError("Secondary node %s didn't return free"
2910 " memory information" %
2911 self.cfg.GetNodeName(node_uuid),
2913 #TODO(dynmem): do the appropriate check involving MINMEM
2914 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2915 raise errors.OpPrereqError("This change will prevent the instance"
2916 " from failover to its secondary node"
2917 " %s, due to not enough memory" %
2918 self.cfg.GetNodeName(node_uuid),
2921 if self.op.runtime_mem:
2922 remote_info = self.rpc.call_instance_info(
2923 self.instance.primary_node, self.instance.name,
2924 self.instance.hypervisor,
2925 self.cluster.hvparams[self.instance.hypervisor])
2926 remote_info.Raise("Error checking node %s" %
2927 self.cfg.GetNodeName(self.instance.primary_node))
2928 if not remote_info.payload: # not running already
2929 raise errors.OpPrereqError("Instance %s is not running" %
2930 self.instance.name, errors.ECODE_STATE)
2932 current_memory = remote_info.payload["memory"]
2933 if (not self.op.force and
2934 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2935 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2936 raise errors.OpPrereqError("Instance %s must have memory between %d"
2937 " and %d MB of memory unless --force is"
2939 (self.instance.name,
2940 self.be_proposed[constants.BE_MINMEM],
2941 self.be_proposed[constants.BE_MAXMEM]),
2944 delta = self.op.runtime_mem - current_memory
2946 CheckNodeFreeMemory(
2947 self, self.instance.primary_node,
2948 "ballooning memory for instance %s" % self.instance.name, delta,
2949 self.instance.hypervisor,
2950 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
2952 # make self.cluster visible in the functions below
2953 cluster = self.cluster
2955 def _PrepareNicCreate(_, params, private):
2956 self._PrepareNicModification(params, private, None, None,
2957 {}, cluster, pnode_uuid)
2960 def _PrepareNicMod(_, nic, params, private):
2961 self._PrepareNicModification(params, private, nic.ip, nic.network,
2962 nic.nicparams, cluster, pnode_uuid)
2965 def _PrepareNicRemove(_, params, __):
2967 net = params.network
2968 if net is not None and ip is not None:
2969 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2971 # Verify NIC changes (operating on copy)
2972 nics = self.instance.nics[:]
2973 _ApplyContainerMods("NIC", nics, None, self.nicmod,
2974 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2975 if len(nics) > constants.MAX_NICS:
2976 raise errors.OpPrereqError("Instance has too many network interfaces"
2977 " (%d), cannot add more" % constants.MAX_NICS,
2980 # Pre-compute NIC changes (necessary to use result in hooks)
2981 self._nic_chgdesc = []
2983 # Operate on copies as this is still in prereq
2984 nics = [nic.Copy() for nic in self.instance.nics]
2985 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2986 self._CreateNewNic, self._ApplyNicMods, None)
2987 # Verify that NIC names are unique and valid
2988 utils.ValidateDeviceNames("NIC", nics)
2989 self._new_nics = nics
2990 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2992 self._new_nics = None
2993 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
2995 if not self.op.ignore_ipolicy:
2996 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2999 # Fill ispec with backend parameters
3000 ispec[constants.ISPEC_SPINDLE_USE] = \
3001 self.be_new.get(constants.BE_SPINDLE_USE, None)
3002 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3005 # Copy ispec to verify parameters with min/max values separately
3006 if self.op.disk_template:
3007 new_disk_template = self.op.disk_template
3009 new_disk_template = self.instance.disk_template
3010 ispec_max = ispec.copy()
3011 ispec_max[constants.ISPEC_MEM_SIZE] = \
3012 self.be_new.get(constants.BE_MAXMEM, None)
3013 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3015 ispec_min = ispec.copy()
3016 ispec_min[constants.ISPEC_MEM_SIZE] = \
3017 self.be_new.get(constants.BE_MINMEM, None)
3018 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3021 if (res_max or res_min):
3022 # FIXME: Improve error message by including information about whether
3023 # the upper or lower limit of the parameter fails the ipolicy.
3024 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3025 (group_info, group_info.name,
3026 utils.CommaJoin(set(res_max + res_min))))
3027 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3029 def _ConvertPlainToDrbd(self, feedback_fn):
3030 """Converts an instance from plain to drbd.
3033 feedback_fn("Converting template to drbd")
3034 pnode_uuid = self.instance.primary_node
3035 snode_uuid = self.op.remote_node_uuid
3037 assert self.instance.disk_template == constants.DT_PLAIN
3039 # create a fake disk info for _GenerateDiskTemplate
3040 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3041 constants.IDISK_VG: d.logical_id[0],
3042 constants.IDISK_NAME: d.name}
3043 for d in self.instance.disks]
3044 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3045 self.instance.uuid, pnode_uuid,
3046 [snode_uuid], disk_info, None, None, 0,
3047 feedback_fn, self.diskparams)
3048 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3050 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3051 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3052 info = GetInstanceInfoText(self.instance)
3053 feedback_fn("Creating additional volumes...")
3054 # first, create the missing data and meta devices
3055 for disk in anno_disks:
3056 # unfortunately this is... not too nice
3057 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3058 info, True, p_excl_stor)
3059 for child in disk.children:
3060 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3062 # at this stage, all new LVs have been created, we can rename the
3064 feedback_fn("Renaming original volumes...")
3065 rename_list = [(o, n.children[0].logical_id)
3066 for (o, n) in zip(self.instance.disks, new_disks)]
3067 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3068 result.Raise("Failed to rename original LVs")
3070 feedback_fn("Initializing DRBD devices...")
3071 # all child devices are in place, we can now create the DRBD devices
3073 for disk in anno_disks:
3074 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3075 (snode_uuid, s_excl_stor)]:
3076 f_create = node_uuid == pnode_uuid
3077 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3078 f_create, excl_stor)
3079 except errors.GenericError, e:
3080 feedback_fn("Initializing of DRBD devices failed;"
3081 " renaming back original volumes...")
3082 for disk in new_disks:
3083 self.cfg.SetDiskID(disk, pnode_uuid)
3084 rename_back_list = [(n.children[0], o.logical_id)
3085 for (n, o) in zip(new_disks, self.instance.disks)]
3086 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3087 result.Raise("Failed to rename LVs back after error %s" % str(e))
3090 # at this point, the instance has been modified
3091 self.instance.disk_template = constants.DT_DRBD8
3092 self.instance.disks = new_disks
3093 self.cfg.Update(self.instance, feedback_fn)
3095 # Release node locks while waiting for sync
3096 ReleaseLocks(self, locking.LEVEL_NODE)
3098 # disks are created, waiting for sync
3099 disk_abort = not WaitForSync(self, self.instance,
3100 oneshot=not self.op.wait_for_sync)
3102 raise errors.OpExecError("There are some degraded disks for"
3103 " this instance, please cleanup manually")
3105 # Node resource locks will be released by caller
3107 def _ConvertDrbdToPlain(self, feedback_fn):
3108 """Converts an instance from drbd to plain.
3111 assert len(self.instance.secondary_nodes) == 1
3112 assert self.instance.disk_template == constants.DT_DRBD8
3114 pnode_uuid = self.instance.primary_node
3115 snode_uuid = self.instance.secondary_nodes[0]
3116 feedback_fn("Converting template to plain")
3118 old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3119 new_disks = [d.children[0] for d in self.instance.disks]
3121 # copy over size, mode and name
3122 for parent, child in zip(old_disks, new_disks):
3123 child.size = parent.size
3124 child.mode = parent.mode
3125 child.name = parent.name
3127 # this is a DRBD disk, return its port to the pool
3128 # NOTE: this must be done right before the call to cfg.Update!
3129 for disk in old_disks:
3130 tcp_port = disk.logical_id[2]
3131 self.cfg.AddTcpUdpPort(tcp_port)
3133 # update instance structure
3134 self.instance.disks = new_disks
3135 self.instance.disk_template = constants.DT_PLAIN
3136 _UpdateIvNames(0, self.instance.disks)
3137 self.cfg.Update(self.instance, feedback_fn)
3139 # Release locks in case removing disks takes a while
3140 ReleaseLocks(self, locking.LEVEL_NODE)
3142 feedback_fn("Removing volumes on the secondary node...")
3143 for disk in old_disks:
3144 self.cfg.SetDiskID(disk, snode_uuid)
3145 msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3147 self.LogWarning("Could not remove block device %s on node %s,"
3148 " continuing anyway: %s", disk.iv_name,
3149 self.cfg.GetNodeName(snode_uuid), msg)
3151 feedback_fn("Removing unneeded volumes on the primary node...")
3152 for idx, disk in enumerate(old_disks):
3153 meta = disk.children[1]
3154 self.cfg.SetDiskID(meta, pnode_uuid)
3155 msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3157 self.LogWarning("Could not remove metadata for disk %d on node %s,"
3158 " continuing anyway: %s", idx,
3159 self.cfg.GetNodeName(pnode_uuid), msg)
3161 def _CreateNewDisk(self, idx, params, _):
3162 """Creates a new disk.
3166 if self.instance.disk_template in constants.DTS_FILEBASED:
3167 (file_driver, file_path) = self.instance.disks[0].logical_id
3168 file_path = os.path.dirname(file_path)
3170 file_driver = file_path = None
3173 GenerateDiskTemplate(self, self.instance.disk_template,
3174 self.instance.uuid, self.instance.primary_node,
3175 self.instance.secondary_nodes, [params], file_path,
3176 file_driver, idx, self.Log, self.diskparams)[0]
3178 new_disks = CreateDisks(self, self.instance, disks=[disk])
3180 if self.cluster.prealloc_wipe_disks:
3182 WipeOrCleanupDisks(self, self.instance,
3183 disks=[(idx, disk, 0)],
3187 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3191 def _ModifyDisk(idx, disk, params, _):
3196 mode = params.get(constants.IDISK_MODE, None)
3199 changes.append(("disk.mode/%d" % idx, disk.mode))
3201 name = params.get(constants.IDISK_NAME, None)
3203 changes.append(("disk.name/%d" % idx, disk.name))
3207 def _RemoveDisk(self, idx, root, _):
3211 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3212 for node_uuid, disk in anno_disk.ComputeNodeTree(
3213 self.instance.primary_node):
3214 self.cfg.SetDiskID(disk, node_uuid)
3215 msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3217 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3218 " continuing anyway", idx,
3219 self.cfg.GetNodeName(node_uuid), msg)
3221 # if this is a DRBD disk, return its port to the pool
3222 if root.dev_type in constants.LDS_DRBD:
3223 self.cfg.AddTcpUdpPort(root.logical_id[2])
3225 def _CreateNewNic(self, idx, params, private):
3226 """Creates data structure for a new network interface.
3229 mac = params[constants.INIC_MAC]
3230 ip = params.get(constants.INIC_IP, None)
3231 net = params.get(constants.INIC_NETWORK, None)
3232 name = params.get(constants.INIC_NAME, None)
3233 net_uuid = self.cfg.LookupNetwork(net)
3234 #TODO: not private.filled?? can a nic have no nicparams??
3235 nicparams = private.filled
3236 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3237 nicparams=nicparams)
3238 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3242 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3243 (mac, ip, private.filled[constants.NIC_MODE],
3244 private.filled[constants.NIC_LINK],
3248 def _ApplyNicMods(self, idx, nic, params, private):
3249 """Modifies a network interface.
3254 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3256 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3257 setattr(nic, key, params[key])
3259 new_net = params.get(constants.INIC_NETWORK, nic.network)
3260 new_net_uuid = self.cfg.LookupNetwork(new_net)
3261 if new_net_uuid != nic.network:
3262 changes.append(("nic.network/%d" % idx, new_net))
3263 nic.network = new_net_uuid
3266 nic.nicparams = private.filled
3268 for (key, val) in nic.nicparams.items():
3269 changes.append(("nic.%s/%d" % (key, idx), val))
3273 def Exec(self, feedback_fn):
3274 """Modifies an instance.
3276 All parameters take effect only at the next restart of the instance.
3279 # Process here the warnings from CheckPrereq, as we don't have a
3280 # feedback_fn there.
3281 # TODO: Replace with self.LogWarning
3282 for warn in self.warn:
3283 feedback_fn("WARNING: %s" % warn)
3285 assert ((self.op.disk_template is None) ^
3286 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3287 "Not owning any node resource locks"
3292 if self.op.pnode_uuid:
3293 self.instance.primary_node = self.op.pnode_uuid
3296 if self.op.runtime_mem:
3297 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3299 self.op.runtime_mem)
3300 rpcres.Raise("Cannot modify instance runtime memory")
3301 result.append(("runtime_memory", self.op.runtime_mem))
3303 # Apply disk changes
3304 _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3305 self._CreateNewDisk, self._ModifyDisk,
3307 _UpdateIvNames(0, self.instance.disks)
3309 if self.op.disk_template:
3311 check_nodes = set(self.instance.all_nodes)
3312 if self.op.remote_node_uuid:
3313 check_nodes.add(self.op.remote_node_uuid)
3314 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3315 owned = self.owned_locks(level)
3316 assert not (check_nodes - owned), \
3317 ("Not owning the correct locks, owning %r, expected at least %r" %
3318 (owned, check_nodes))
3320 r_shut = ShutdownInstanceDisks(self, self.instance)
3322 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3323 " proceed with disk template conversion")
3324 mode = (self.instance.disk_template, self.op.disk_template)
3326 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3328 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3330 result.append(("disk_template", self.op.disk_template))
3332 assert self.instance.disk_template == self.op.disk_template, \
3333 ("Expected disk template '%s', found '%s'" %
3334 (self.op.disk_template, self.instance.disk_template))
3336 # Release node and resource locks if there are any (they might already have
3337 # been released during disk conversion)
3338 ReleaseLocks(self, locking.LEVEL_NODE)
3339 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3342 if self._new_nics is not None:
3343 self.instance.nics = self._new_nics
3344 result.extend(self._nic_chgdesc)
3347 if self.op.hvparams:
3348 self.instance.hvparams = self.hv_inst
3349 for key, val in self.op.hvparams.iteritems():
3350 result.append(("hv/%s" % key, val))
3353 if self.op.beparams:
3354 self.instance.beparams = self.be_inst
3355 for key, val in self.op.beparams.iteritems():
3356 result.append(("be/%s" % key, val))
3360 self.instance.os = self.op.os_name
3363 if self.op.osparams:
3364 self.instance.osparams = self.os_inst
3365 for key, val in self.op.osparams.iteritems():
3366 result.append(("os/%s" % key, val))
3368 if self.op.offline is None:
3371 elif self.op.offline:
3372 # Mark instance as offline
3373 self.cfg.MarkInstanceOffline(self.instance.uuid)
3374 result.append(("admin_state", constants.ADMINST_OFFLINE))
3376 # Mark instance as online, but stopped
3377 self.cfg.MarkInstanceDown(self.instance.uuid)
3378 result.append(("admin_state", constants.ADMINST_DOWN))
3380 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3382 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3383 self.owned_locks(locking.LEVEL_NODE)), \
3384 "All node locks should have been released by now"
3388 _DISK_CONVERSIONS = {
3389 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3390 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3394 class LUInstanceChangeGroup(LogicalUnit):
3395 HPATH = "instance-change-group"
3396 HTYPE = constants.HTYPE_INSTANCE
3399 def ExpandNames(self):
3400 self.share_locks = ShareAll()
3402 self.needed_locks = {
3403 locking.LEVEL_NODEGROUP: [],
3404 locking.LEVEL_NODE: [],
3405 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3408 self._ExpandAndLockInstance()
3410 if self.op.target_groups:
3411 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3412 self.op.target_groups)
3414 self.req_target_uuids = None
3416 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3418 def DeclareLocks(self, level):
3419 if level == locking.LEVEL_NODEGROUP:
3420 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3422 if self.req_target_uuids:
3423 lock_groups = set(self.req_target_uuids)
3425 # Lock all groups used by instance optimistically; this requires going
3426 # via the node before it's locked, requiring verification later on
3427 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3428 lock_groups.update(instance_groups)
3430 # No target groups, need to lock all of them
3431 lock_groups = locking.ALL_SET
3433 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3435 elif level == locking.LEVEL_NODE:
3436 if self.req_target_uuids:
3437 # Lock all nodes used by instances
3438 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3439 self._LockInstancesNodes()
3441 # Lock all nodes in all potential target groups
3442 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3443 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3444 member_nodes = [node_uuid
3445 for group in lock_groups
3446 for node_uuid in self.cfg.GetNodeGroup(group).members]
3447 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3449 # Lock all nodes as all groups are potential targets
3450 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3452 def CheckPrereq(self):
3453 owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3454 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3455 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3457 assert (self.req_target_uuids is None or
3458 owned_groups.issuperset(self.req_target_uuids))
3459 assert owned_instance_names == set([self.op.instance_name])
3461 # Get instance information
3462 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3464 # Check if node groups for locked instance are still correct
3465 assert owned_nodes.issuperset(self.instance.all_nodes), \
3466 ("Instance %s's nodes changed while we kept the lock" %
3467 self.op.instance_name)
3469 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3472 if self.req_target_uuids:
3473 # User requested specific target groups
3474 self.target_uuids = frozenset(self.req_target_uuids)
3476 # All groups except those used by the instance are potential targets
3477 self.target_uuids = owned_groups - inst_groups
3479 conflicting_groups = self.target_uuids & inst_groups
3480 if conflicting_groups:
3481 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3482 " used by the instance '%s'" %
3483 (utils.CommaJoin(conflicting_groups),
3484 self.op.instance_name),
3487 if not self.target_uuids:
3488 raise errors.OpPrereqError("There are no possible target groups",
3491 def BuildHooksEnv(self):
3495 assert self.target_uuids
3498 "TARGET_GROUPS": " ".join(self.target_uuids),
3501 env.update(BuildInstanceHookEnvByObject(self, self.instance))
3505 def BuildHooksNodes(self):
3506 """Build hooks nodes.
3509 mn = self.cfg.GetMasterNode()
3512 def Exec(self, feedback_fn):
3513 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3515 assert instances == [self.op.instance_name], "Instance not locked"
3517 req = iallocator.IAReqGroupChange(instances=instances,
3518 target_groups=list(self.target_uuids))
3519 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3521 ial.Run(self.op.iallocator)
3524 raise errors.OpPrereqError("Can't compute solution for changing group of"
3525 " instance '%s' using iallocator '%s': %s" %
3526 (self.op.instance_name, self.op.iallocator,
3527 ial.info), errors.ECODE_NORES)
3529 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3531 self.LogInfo("Iallocator returned %s job(s) for changing group of"
3532 " instance '%s'", len(jobs), self.op.instance_name)
3534 return ResultWithJobs(jobs)