4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Logical units dealing with instances."""
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import pathutils
40 from ganeti import rpc
41 from ganeti import utils
43 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45 from ganeti.cmdlib.common import INSTANCE_DOWN, \
46 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
47 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
48 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
49 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
50 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
51 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
52 CheckDiskTemplateEnabled
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58 CheckSpindlesExclusiveStorage
59 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66 import ganeti.masterd.instance
69 #: Type description for changes as returned by L{_ApplyContainerMods}'s
71 _TApplyContModsCbChanges = \
72 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
78 def _CheckHostnameSane(lu, name):
79 """Ensures that a given hostname resolves to a 'sane' name.
81 The given name is required to be a prefix of the resolved hostname,
82 to prevent accidental mismatches.
84 @param lu: the logical unit on behalf of which we're checking
85 @param name: the name we should resolve and check
86 @return: the resolved hostname object
89 hostname = netutils.GetHostname(name=name)
90 if hostname.name != name:
91 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92 if not utils.MatchNameComponent(name, [hostname.name]):
93 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94 " same as given hostname '%s'") %
95 (hostname.name, name), errors.ECODE_INVAL)
99 def _CheckOpportunisticLocking(op):
100 """Generate error if opportunistic locking is not possible.
103 if op.opportunistic_locking and not op.iallocator:
104 raise errors.OpPrereqError("Opportunistic locking is only available in"
105 " combination with an instance allocator",
109 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110 """Wrapper around IAReqInstanceAlloc.
112 @param op: The instance opcode
113 @param disks: The computed disks
114 @param nics: The computed nics
115 @param beparams: The full filled beparams
116 @param node_name_whitelist: List of nodes which should appear as online to the
117 allocator (unless the node is already marked offline)
119 @returns: A filled L{iallocator.IAReqInstanceAlloc}
122 spindle_use = beparams[constants.BE_SPINDLE_USE]
123 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124 disk_template=op.disk_template,
127 vcpus=beparams[constants.BE_VCPUS],
128 memory=beparams[constants.BE_MAXMEM],
129 spindle_use=spindle_use,
131 nics=[n.ToDict() for n in nics],
132 hypervisor=op.hypervisor,
133 node_whitelist=node_name_whitelist)
136 def _ComputeFullBeParams(op, cluster):
137 """Computes the full beparams.
139 @param op: The instance opcode
140 @param cluster: The cluster config object
142 @return: The fully filled beparams
145 default_beparams = cluster.beparams[constants.PP_DEFAULT]
146 for param, value in op.beparams.iteritems():
147 if value == constants.VALUE_AUTO:
148 op.beparams[param] = default_beparams[param]
149 objects.UpgradeBeParams(op.beparams)
150 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151 return cluster.SimpleFillBE(op.beparams)
154 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155 """Computes the nics.
157 @param op: The instance opcode
158 @param cluster: Cluster configuration object
159 @param default_ip: The default ip to assign
160 @param cfg: An instance of the configuration object
161 @param ec_id: Execution context ID
163 @returns: The build up nics
168 nic_mode_req = nic.get(constants.INIC_MODE, None)
169 nic_mode = nic_mode_req
170 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173 net = nic.get(constants.INIC_NETWORK, None)
174 link = nic.get(constants.NIC_LINK, None)
175 ip = nic.get(constants.INIC_IP, None)
176 vlan = nic.get(constants.INIC_VLAN, None)
178 if net is None or net.lower() == constants.VALUE_NONE:
181 if nic_mode_req is not None or link is not None:
182 raise errors.OpPrereqError("If network is given, no mode or link"
183 " is allowed to be passed",
186 if vlan is not None and nic_mode != constants.NIC_MODE_OVS:
187 raise errors.OpPrereqError("VLAN is given, but network mode is not"
188 " openvswitch", errors.ECODE_INVAL)
191 if ip is None or ip.lower() == constants.VALUE_NONE:
193 elif ip.lower() == constants.VALUE_AUTO:
194 if not op.name_check:
195 raise errors.OpPrereqError("IP address set to auto but name checks"
196 " have been skipped",
200 # We defer pool operations until later, so that the iallocator has
201 # filled in the instance's node(s) dimara
202 if ip.lower() == constants.NIC_IP_POOL:
204 raise errors.OpPrereqError("if ip=pool, parameter network"
205 " must be passed too",
208 elif not netutils.IPAddress.IsValid(ip):
209 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
214 # TODO: check the ip address for uniqueness
215 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
216 raise errors.OpPrereqError("Routed nic mode requires an ip address",
219 # MAC address verification
220 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
221 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
222 mac = utils.NormalizeAndValidateMac(mac)
225 # TODO: We need to factor this out
226 cfg.ReserveMAC(mac, ec_id)
227 except errors.ReservationError:
228 raise errors.OpPrereqError("MAC address %s already in use"
230 errors.ECODE_NOTUNIQUE)
232 # Build nic parameters
235 nicparams[constants.NIC_MODE] = nic_mode
237 nicparams[constants.NIC_LINK] = link
239 nicparams[constants.NIC_VLAN] = vlan
241 check_params = cluster.SimpleFillNIC(nicparams)
242 objects.NIC.CheckParameterSyntax(check_params)
243 net_uuid = cfg.LookupNetwork(net)
244 name = nic.get(constants.INIC_NAME, None)
245 if name is not None and name.lower() == constants.VALUE_NONE:
247 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
248 network=net_uuid, nicparams=nicparams)
249 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
255 def _CheckForConflictingIp(lu, ip, node_uuid):
256 """In case of conflicting IP address raise error.
259 @param ip: IP address
260 @type node_uuid: string
261 @param node_uuid: node UUID
264 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
265 if conf_net is not None:
266 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
267 " network %s, but the target NIC does not." %
274 def _ComputeIPolicyInstanceSpecViolation(
275 ipolicy, instance_spec, disk_template,
276 _compute_fn=ComputeIPolicySpecViolation):
277 """Compute if instance specs meets the specs of ipolicy.
280 @param ipolicy: The ipolicy to verify against
281 @param instance_spec: dict
282 @param instance_spec: The instance spec to verify
283 @type disk_template: string
284 @param disk_template: the disk template of the instance
285 @param _compute_fn: The function to verify ipolicy (unittest only)
286 @see: L{ComputeIPolicySpecViolation}
289 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
290 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
291 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
292 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
293 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
294 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
296 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
297 disk_sizes, spindle_use, disk_template)
300 def _CheckOSVariant(os_obj, name):
301 """Check whether an OS name conforms to the os variants specification.
303 @type os_obj: L{objects.OS}
304 @param os_obj: OS object to check
306 @param name: OS name passed by the user, to check for validity
309 variant = objects.OS.GetVariant(name)
310 if not os_obj.supported_variants:
312 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
313 " passed)" % (os_obj.name, variant),
317 raise errors.OpPrereqError("OS name must include a variant",
320 if variant not in os_obj.supported_variants:
321 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
324 class LUInstanceCreate(LogicalUnit):
325 """Create an instance.
328 HPATH = "instance-add"
329 HTYPE = constants.HTYPE_INSTANCE
332 def _CheckDiskTemplateValid(self):
333 """Checks validity of disk template.
336 cluster = self.cfg.GetClusterInfo()
337 if self.op.disk_template is None:
338 # FIXME: It would be better to take the default disk template from the
339 # ipolicy, but for the ipolicy we need the primary node, which we get from
340 # the iallocator, which wants the disk template as input. To solve this
341 # chicken-and-egg problem, it should be possible to specify just a node
342 # group from the iallocator and take the ipolicy from that.
343 self.op.disk_template = cluster.enabled_disk_templates[0]
344 CheckDiskTemplateEnabled(cluster, self.op.disk_template)
346 def _CheckDiskArguments(self):
347 """Checks validity of disk-related arguments.
350 # check that disk's names are unique and valid
351 utils.ValidateDeviceNames("disk", self.op.disks)
353 self._CheckDiskTemplateValid()
355 # check disks. parameter names and consistent adopt/no-adopt strategy
356 has_adopt = has_no_adopt = False
357 for disk in self.op.disks:
358 if self.op.disk_template != constants.DT_EXT:
359 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
360 if constants.IDISK_ADOPT in disk:
364 if has_adopt and has_no_adopt:
365 raise errors.OpPrereqError("Either all disks are adopted or none is",
368 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
369 raise errors.OpPrereqError("Disk adoption is not supported for the"
370 " '%s' disk template" %
371 self.op.disk_template,
373 if self.op.iallocator is not None:
374 raise errors.OpPrereqError("Disk adoption not allowed with an"
375 " iallocator script", errors.ECODE_INVAL)
376 if self.op.mode == constants.INSTANCE_IMPORT:
377 raise errors.OpPrereqError("Disk adoption not allowed for"
378 " instance import", errors.ECODE_INVAL)
380 if self.op.disk_template in constants.DTS_MUST_ADOPT:
381 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
382 " but no 'adopt' parameter given" %
383 self.op.disk_template,
386 self.adopt_disks = has_adopt
388 def _CheckVLANArguments(self):
389 """ Check validity of VLANs if given
392 for nic in self.op.nics:
393 vlan = nic.get(constants.INIC_VLAN, None)
396 # vlan starting with dot means single untagged vlan,
397 # might be followed by trunk (:)
398 if not vlan[1:].isdigit():
399 vlanlist = vlan[1:].split(':')
402 raise errors.OpPrereqError("Specified VLAN parameter is "
403 "invalid : %s" % vlan,
406 # Trunk - tagged only
407 vlanlist = vlan[1:].split(':')
410 raise errors.OpPrereqError("Specified VLAN parameter is invalid"
411 " : %s" % vlan, errors.ECODE_INVAL)
413 # This is the simplest case. No dots, only single digit
414 # -> Create untagged access port, dot needs to be added
415 nic[constants.INIC_VLAN] = "." + vlan
417 raise errors.OpPrereqError("Specified VLAN parameter is invalid"
418 " : %s" % vlan, errors.ECODE_INVAL)
420 def CheckArguments(self):
424 # do not require name_check to ease forward/backward compatibility
426 if self.op.no_install and self.op.start:
427 self.LogInfo("No-installation mode selected, disabling startup")
428 self.op.start = False
429 # validate/normalize the instance name
430 self.op.instance_name = \
431 netutils.Hostname.GetNormalizedName(self.op.instance_name)
433 if self.op.ip_check and not self.op.name_check:
434 # TODO: make the ip check more flexible and not depend on the name check
435 raise errors.OpPrereqError("Cannot do IP address check without a name"
436 " check", errors.ECODE_INVAL)
438 # check nics' parameter names
439 for nic in self.op.nics:
440 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
441 # check that NIC's parameters names are unique and valid
442 utils.ValidateDeviceNames("NIC", self.op.nics)
444 self._CheckVLANArguments()
446 self._CheckDiskArguments()
447 assert self.op.disk_template is not None
449 # instance name verification
450 if self.op.name_check:
451 self.hostname = _CheckHostnameSane(self, self.op.instance_name)
452 self.op.instance_name = self.hostname.name
453 # used in CheckPrereq for ip ping check
454 self.check_ip = self.hostname.ip
458 # file storage checks
459 if (self.op.file_driver and
460 not self.op.file_driver in constants.FILE_DRIVER):
461 raise errors.OpPrereqError("Invalid file driver name '%s'" %
462 self.op.file_driver, errors.ECODE_INVAL)
464 # set default file_driver if unset and required
465 if (not self.op.file_driver and
466 self.op.disk_template in [constants.DT_FILE,
467 constants.DT_SHARED_FILE]):
468 self.op.file_driver = constants.FD_LOOP
470 ### Node/iallocator related checks
471 CheckIAllocatorOrNode(self, "iallocator", "pnode")
473 if self.op.pnode is not None:
474 if self.op.disk_template in constants.DTS_INT_MIRROR:
475 if self.op.snode is None:
476 raise errors.OpPrereqError("The networked disk templates need"
477 " a mirror node", errors.ECODE_INVAL)
479 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
483 _CheckOpportunisticLocking(self.op)
485 if self.op.mode == constants.INSTANCE_IMPORT:
486 # On import force_variant must be True, because if we forced it at
487 # initial install, our only chance when importing it back is that it
489 self.op.force_variant = True
491 if self.op.no_install:
492 self.LogInfo("No-installation mode has no effect during import")
494 elif self.op.mode == constants.INSTANCE_CREATE:
495 if self.op.os_type is None:
496 raise errors.OpPrereqError("No guest OS specified",
498 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
499 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
500 " installation" % self.op.os_type,
502 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
503 self._cds = GetClusterDomainSecret()
505 # Check handshake to ensure both clusters have the same domain secret
506 src_handshake = self.op.source_handshake
507 if not src_handshake:
508 raise errors.OpPrereqError("Missing source handshake",
511 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
514 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
517 # Load and check source CA
518 self.source_x509_ca_pem = self.op.source_x509_ca
519 if not self.source_x509_ca_pem:
520 raise errors.OpPrereqError("Missing source X509 CA",
524 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
526 except OpenSSL.crypto.Error, err:
527 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
528 (err, ), errors.ECODE_INVAL)
530 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
531 if errcode is not None:
532 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
535 self.source_x509_ca = cert
537 src_instance_name = self.op.source_instance_name
538 if not src_instance_name:
539 raise errors.OpPrereqError("Missing source instance name",
542 self.source_instance_name = \
543 netutils.GetHostname(name=src_instance_name).name
546 raise errors.OpPrereqError("Invalid instance creation mode %r" %
547 self.op.mode, errors.ECODE_INVAL)
549 def ExpandNames(self):
550 """ExpandNames for CreateInstance.
552 Figure out the right locks for instance creation.
555 self.needed_locks = {}
557 # this is just a preventive check, but someone might still add this
558 # instance in the meantime, and creation will fail at lock-add time
559 if self.op.instance_name in\
560 [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
561 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
562 self.op.instance_name, errors.ECODE_EXISTS)
564 self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
566 if self.op.iallocator:
567 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
568 # specifying a group on instance creation and then selecting nodes from
570 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
571 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
573 if self.op.opportunistic_locking:
574 self.opportunistic_locks[locking.LEVEL_NODE] = True
575 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
577 (self.op.pnode_uuid, self.op.pnode) = \
578 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
579 nodelist = [self.op.pnode_uuid]
580 if self.op.snode is not None:
581 (self.op.snode_uuid, self.op.snode) = \
582 ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
583 nodelist.append(self.op.snode_uuid)
584 self.needed_locks[locking.LEVEL_NODE] = nodelist
586 # in case of import lock the source node too
587 if self.op.mode == constants.INSTANCE_IMPORT:
588 src_node = self.op.src_node
589 src_path = self.op.src_path
592 self.op.src_path = src_path = self.op.instance_name
595 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
596 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
597 self.op.src_node = None
598 if os.path.isabs(src_path):
599 raise errors.OpPrereqError("Importing an instance from a path"
600 " requires a source node option",
603 (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
604 ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
605 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
606 self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
607 if not os.path.isabs(src_path):
609 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
611 self.needed_locks[locking.LEVEL_NODE_RES] = \
612 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
614 def _RunAllocator(self):
615 """Run the allocator based on input opcode.
618 if self.op.opportunistic_locking:
619 # Only consider nodes for which a lock is held
620 node_name_whitelist = self.cfg.GetNodeNames(
621 self.owned_locks(locking.LEVEL_NODE))
623 node_name_whitelist = None
625 req = _CreateInstanceAllocRequest(self.op, self.disks,
626 self.nics, self.be_full,
628 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
630 ial.Run(self.op.iallocator)
633 # When opportunistic locks are used only a temporary failure is generated
634 if self.op.opportunistic_locking:
635 ecode = errors.ECODE_TEMP_NORES
637 ecode = errors.ECODE_NORES
639 raise errors.OpPrereqError("Can't compute nodes using"
640 " iallocator '%s': %s" %
641 (self.op.iallocator, ial.info),
644 (self.op.pnode_uuid, self.op.pnode) = \
645 ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
646 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
647 self.op.instance_name, self.op.iallocator,
648 utils.CommaJoin(ial.result))
650 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
652 if req.RequiredNodes() == 2:
653 (self.op.snode_uuid, self.op.snode) = \
654 ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
656 def BuildHooksEnv(self):
659 This runs on master, primary and secondary nodes of the instance.
663 "ADD_MODE": self.op.mode,
665 if self.op.mode == constants.INSTANCE_IMPORT:
666 env["SRC_NODE"] = self.op.src_node
667 env["SRC_PATH"] = self.op.src_path
668 env["SRC_IMAGES"] = self.src_images
670 env.update(BuildInstanceHookEnv(
671 name=self.op.instance_name,
672 primary_node_name=self.op.pnode,
673 secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
674 status=self.op.start,
675 os_type=self.op.os_type,
676 minmem=self.be_full[constants.BE_MINMEM],
677 maxmem=self.be_full[constants.BE_MAXMEM],
678 vcpus=self.be_full[constants.BE_VCPUS],
679 nics=NICListToTuple(self, self.nics),
680 disk_template=self.op.disk_template,
681 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
682 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
683 for d in self.disks],
686 hypervisor_name=self.op.hypervisor,
692 def BuildHooksNodes(self):
693 """Build hooks nodes.
696 nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
699 def _ReadExportInfo(self):
700 """Reads the export information from disk.
702 It will override the opcode source node and path with the actual
703 information, if these two were not specified before.
705 @return: the export information
708 assert self.op.mode == constants.INSTANCE_IMPORT
710 if self.op.src_node_uuid is None:
711 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
712 exp_list = self.rpc.call_export_list(locked_nodes)
714 for node_uuid in exp_list:
715 if exp_list[node_uuid].fail_msg:
717 if self.op.src_path in exp_list[node_uuid].payload:
719 self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
720 self.op.src_node_uuid = node_uuid
721 self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
725 raise errors.OpPrereqError("No export found for relative path %s" %
726 self.op.src_path, errors.ECODE_INVAL)
728 CheckNodeOnline(self, self.op.src_node_uuid)
729 result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
730 result.Raise("No export or invalid export found in dir %s" %
733 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
734 if not export_info.has_section(constants.INISECT_EXP):
735 raise errors.ProgrammerError("Corrupted export config",
736 errors.ECODE_ENVIRON)
738 ei_version = export_info.get(constants.INISECT_EXP, "version")
739 if int(ei_version) != constants.EXPORT_VERSION:
740 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
741 (ei_version, constants.EXPORT_VERSION),
742 errors.ECODE_ENVIRON)
745 def _ReadExportParams(self, einfo):
746 """Use export parameters as defaults.
748 In case the opcode doesn't specify (as in override) some instance
749 parameters, then try to use them from the export information, if
753 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
755 if not self.op.disks:
757 # TODO: import the disk iv_name too
758 for idx in range(constants.MAX_DISKS):
759 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
760 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
761 disks.append({constants.IDISK_SIZE: disk_sz})
762 self.op.disks = disks
763 if not disks and self.op.disk_template != constants.DT_DISKLESS:
764 raise errors.OpPrereqError("No disk info specified and the export"
765 " is missing the disk information",
770 for idx in range(constants.MAX_NICS):
771 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
773 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
774 nic_param_name = "nic%d_%s" % (idx, name)
775 if einfo.has_option(constants.INISECT_INS, nic_param_name):
776 v = einfo.get(constants.INISECT_INS, nic_param_name)
783 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
784 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
786 if (self.op.hypervisor is None and
787 einfo.has_option(constants.INISECT_INS, "hypervisor")):
788 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
790 if einfo.has_section(constants.INISECT_HYP):
791 # use the export parameters but do not override the ones
792 # specified by the user
793 for name, value in einfo.items(constants.INISECT_HYP):
794 if name not in self.op.hvparams:
795 self.op.hvparams[name] = value
797 if einfo.has_section(constants.INISECT_BEP):
798 # use the parameters, without overriding
799 for name, value in einfo.items(constants.INISECT_BEP):
800 if name not in self.op.beparams:
801 self.op.beparams[name] = value
802 # Compatibility for the old "memory" be param
803 if name == constants.BE_MEMORY:
804 if constants.BE_MAXMEM not in self.op.beparams:
805 self.op.beparams[constants.BE_MAXMEM] = value
806 if constants.BE_MINMEM not in self.op.beparams:
807 self.op.beparams[constants.BE_MINMEM] = value
809 # try to read the parameters old style, from the main section
810 for name in constants.BES_PARAMETERS:
811 if (name not in self.op.beparams and
812 einfo.has_option(constants.INISECT_INS, name)):
813 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
815 if einfo.has_section(constants.INISECT_OSP):
816 # use the parameters, without overriding
817 for name, value in einfo.items(constants.INISECT_OSP):
818 if name not in self.op.osparams:
819 self.op.osparams[name] = value
821 def _RevertToDefaults(self, cluster):
822 """Revert the instance parameters to the default values.
826 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
827 for name in self.op.hvparams.keys():
828 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
829 del self.op.hvparams[name]
831 be_defs = cluster.SimpleFillBE({})
832 for name in self.op.beparams.keys():
833 if name in be_defs and be_defs[name] == self.op.beparams[name]:
834 del self.op.beparams[name]
836 nic_defs = cluster.SimpleFillNIC({})
837 for nic in self.op.nics:
838 for name in constants.NICS_PARAMETERS:
839 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
842 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
843 for name in self.op.osparams.keys():
844 if name in os_defs and os_defs[name] == self.op.osparams[name]:
845 del self.op.osparams[name]
847 def _CalculateFileStorageDir(self):
848 """Calculate final instance file storage dir.
851 # file storage dir calculation/check
852 self.instance_file_storage_dir = None
853 if self.op.disk_template in constants.DTS_FILEBASED:
854 # build the full file storage dir path
857 if self.op.disk_template == constants.DT_SHARED_FILE:
858 get_fsd_fn = self.cfg.GetSharedFileStorageDir
860 get_fsd_fn = self.cfg.GetFileStorageDir
862 cfg_storagedir = get_fsd_fn()
863 if not cfg_storagedir:
864 raise errors.OpPrereqError("Cluster file storage dir not defined",
866 joinargs.append(cfg_storagedir)
868 if self.op.file_storage_dir is not None:
869 joinargs.append(self.op.file_storage_dir)
871 joinargs.append(self.op.instance_name)
873 # pylint: disable=W0142
874 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
876 def CheckPrereq(self): # pylint: disable=R0914
877 """Check prerequisites.
880 self._CalculateFileStorageDir()
882 if self.op.mode == constants.INSTANCE_IMPORT:
883 export_info = self._ReadExportInfo()
884 self._ReadExportParams(export_info)
885 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
887 self._old_instance_name = None
889 if (not self.cfg.GetVGName() and
890 self.op.disk_template not in constants.DTS_NOT_LVM):
891 raise errors.OpPrereqError("Cluster does not support lvm-based"
892 " instances", errors.ECODE_STATE)
894 if (self.op.hypervisor is None or
895 self.op.hypervisor == constants.VALUE_AUTO):
896 self.op.hypervisor = self.cfg.GetHypervisorType()
898 cluster = self.cfg.GetClusterInfo()
899 enabled_hvs = cluster.enabled_hypervisors
900 if self.op.hypervisor not in enabled_hvs:
901 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
903 (self.op.hypervisor, ",".join(enabled_hvs)),
907 for tag in self.op.tags:
908 objects.TaggableObject.ValidateTag(tag)
910 # check hypervisor parameter syntax (locally)
911 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
912 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
914 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
915 hv_type.CheckParameterSyntax(filled_hvp)
916 self.hv_full = filled_hvp
917 # check that we don't specify global parameters on an instance
918 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
919 "instance", "cluster")
921 # fill and remember the beparams dict
922 self.be_full = _ComputeFullBeParams(self.op, cluster)
924 # build os parameters
925 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
927 # now that hvp/bep are in final format, let's reset to defaults,
929 if self.op.identify_defaults:
930 self._RevertToDefaults(cluster)
933 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
936 # disk checks/pre-build
937 default_vg = self.cfg.GetVGName()
938 self.disks = ComputeDisks(self.op, default_vg)
940 if self.op.mode == constants.INSTANCE_IMPORT:
942 for idx in range(len(self.disks)):
943 option = "disk%d_dump" % idx
944 if export_info.has_option(constants.INISECT_INS, option):
945 # FIXME: are the old os-es, disk sizes, etc. useful?
946 export_name = export_info.get(constants.INISECT_INS, option)
947 image = utils.PathJoin(self.op.src_path, export_name)
948 disk_images.append(image)
950 disk_images.append(False)
952 self.src_images = disk_images
954 if self.op.instance_name == self._old_instance_name:
955 for idx, nic in enumerate(self.nics):
956 if nic.mac == constants.VALUE_AUTO:
957 nic_mac_ini = "nic%d_mac" % idx
958 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
960 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
962 # ip ping checks (we use the same ip that was resolved in ExpandNames)
964 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
965 raise errors.OpPrereqError("IP %s of instance %s already in use" %
966 (self.check_ip, self.op.instance_name),
967 errors.ECODE_NOTUNIQUE)
969 #### mac address generation
970 # By generating here the mac address both the allocator and the hooks get
971 # the real final mac address rather than the 'auto' or 'generate' value.
972 # There is a race condition between the generation and the instance object
973 # creation, which means that we know the mac is valid now, but we're not
974 # sure it will be when we actually add the instance. If things go bad
975 # adding the instance will abort because of a duplicate mac, and the
976 # creation job will fail.
977 for nic in self.nics:
978 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
979 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
983 if self.op.iallocator is not None:
986 # Release all unneeded node locks
987 keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
988 self.op.src_node_uuid])
989 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
990 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
991 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
993 assert (self.owned_locks(locking.LEVEL_NODE) ==
994 self.owned_locks(locking.LEVEL_NODE_RES)), \
995 "Node locks differ from node resource locks"
997 #### node related checks
1000 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1001 assert self.pnode is not None, \
1002 "Cannot retrieve locked node %s" % self.op.pnode_uuid
1004 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1005 pnode.name, errors.ECODE_STATE)
1007 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1008 pnode.name, errors.ECODE_STATE)
1009 if not pnode.vm_capable:
1010 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1011 " '%s'" % pnode.name, errors.ECODE_STATE)
1013 self.secondaries = []
1015 # Fill in any IPs from IP pools. This must happen here, because we need to
1016 # know the nic's primary node, as specified by the iallocator
1017 for idx, nic in enumerate(self.nics):
1018 net_uuid = nic.network
1019 if net_uuid is not None:
1020 nobj = self.cfg.GetNetwork(net_uuid)
1021 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1022 if netparams is None:
1023 raise errors.OpPrereqError("No netparams found for network"
1024 " %s. Probably not connected to"
1025 " node's %s nodegroup" %
1026 (nobj.name, self.pnode.name),
1028 self.LogInfo("NIC/%d inherits netparams %s" %
1029 (idx, netparams.values()))
1030 nic.nicparams = dict(netparams)
1031 if nic.ip is not None:
1032 if nic.ip.lower() == constants.NIC_IP_POOL:
1034 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1035 except errors.ReservationError:
1036 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1037 " from the address pool" % idx,
1039 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1042 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1043 except errors.ReservationError:
1044 raise errors.OpPrereqError("IP address %s already in use"
1045 " or does not belong to network %s" %
1046 (nic.ip, nobj.name),
1047 errors.ECODE_NOTUNIQUE)
1049 # net is None, ip None or given
1050 elif self.op.conflicts_check:
1051 _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1053 # mirror node verification
1054 if self.op.disk_template in constants.DTS_INT_MIRROR:
1055 if self.op.snode_uuid == pnode.uuid:
1056 raise errors.OpPrereqError("The secondary node cannot be the"
1057 " primary node", errors.ECODE_INVAL)
1058 CheckNodeOnline(self, self.op.snode_uuid)
1059 CheckNodeNotDrained(self, self.op.snode_uuid)
1060 CheckNodeVmCapable(self, self.op.snode_uuid)
1061 self.secondaries.append(self.op.snode_uuid)
1063 snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1064 if pnode.group != snode.group:
1065 self.LogWarning("The primary and secondary nodes are in two"
1066 " different node groups; the disk parameters"
1067 " from the first disk's node group will be"
1071 if self.op.disk_template in constants.DTS_INT_MIRROR:
1073 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1074 excl_stor = compat.any(map(has_es, nodes))
1075 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1076 raise errors.OpPrereqError("Disk template %s not supported with"
1077 " exclusive storage" % self.op.disk_template,
1079 for disk in self.disks:
1080 CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1082 node_uuids = [pnode.uuid] + self.secondaries
1084 if not self.adopt_disks:
1085 if self.op.disk_template == constants.DT_RBD:
1086 # _CheckRADOSFreeSpace() is just a placeholder.
1087 # Any function that checks prerequisites can be placed here.
1088 # Check if there is enough space on the RADOS cluster.
1089 CheckRADOSFreeSpace()
1090 elif self.op.disk_template == constants.DT_EXT:
1091 # FIXME: Function that checks prereqs if needed
1093 elif self.op.disk_template in utils.GetLvmDiskTemplates():
1094 # Check lv size requirements, if not adopting
1095 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1096 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1098 # FIXME: add checks for other, non-adopting, non-lvm disk templates
1101 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1102 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1103 disk[constants.IDISK_ADOPT])
1104 for disk in self.disks])
1105 if len(all_lvs) != len(self.disks):
1106 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1108 for lv_name in all_lvs:
1110 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1111 # to ReserveLV uses the same syntax
1112 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1113 except errors.ReservationError:
1114 raise errors.OpPrereqError("LV named %s used by another instance" %
1115 lv_name, errors.ECODE_NOTUNIQUE)
1117 vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1118 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1120 node_lvs = self.rpc.call_lv_list([pnode.uuid],
1121 vg_names.payload.keys())[pnode.uuid]
1122 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1123 node_lvs = node_lvs.payload
1125 delta = all_lvs.difference(node_lvs.keys())
1127 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1128 utils.CommaJoin(delta),
1130 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1132 raise errors.OpPrereqError("Online logical volumes found, cannot"
1133 " adopt: %s" % utils.CommaJoin(online_lvs),
1135 # update the size of disk based on what is found
1136 for dsk in self.disks:
1137 dsk[constants.IDISK_SIZE] = \
1138 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1139 dsk[constants.IDISK_ADOPT])][0]))
1141 elif self.op.disk_template == constants.DT_BLOCK:
1142 # Normalize and de-duplicate device paths
1143 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1144 for disk in self.disks])
1145 if len(all_disks) != len(self.disks):
1146 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1148 baddisks = [d for d in all_disks
1149 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1151 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1152 " cannot be adopted" %
1153 (utils.CommaJoin(baddisks),
1154 constants.ADOPTABLE_BLOCKDEV_ROOT),
1157 node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1158 list(all_disks))[pnode.uuid]
1159 node_disks.Raise("Cannot get block device information from node %s" %
1161 node_disks = node_disks.payload
1162 delta = all_disks.difference(node_disks.keys())
1164 raise errors.OpPrereqError("Missing block device(s): %s" %
1165 utils.CommaJoin(delta),
1167 for dsk in self.disks:
1168 dsk[constants.IDISK_SIZE] = \
1169 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1171 # Verify instance specs
1172 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1174 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1175 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1176 constants.ISPEC_DISK_COUNT: len(self.disks),
1177 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1178 for disk in self.disks],
1179 constants.ISPEC_NIC_COUNT: len(self.nics),
1180 constants.ISPEC_SPINDLE_USE: spindle_use,
1183 group_info = self.cfg.GetNodeGroup(pnode.group)
1184 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1185 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1186 self.op.disk_template)
1187 if not self.op.ignore_ipolicy and res:
1188 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1189 (pnode.group, group_info.name, utils.CommaJoin(res)))
1190 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1192 CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1194 CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1195 # check OS parameters (remotely)
1196 CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1198 CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1200 #TODO: _CheckExtParams (remotely)
1201 # Check parameters for extstorage
1203 # memory check on primary node
1204 #TODO(dynmem): use MINMEM for checking
1206 hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1208 CheckNodeFreeMemory(self, self.pnode.uuid,
1209 "creating instance %s" % self.op.instance_name,
1210 self.be_full[constants.BE_MAXMEM],
1211 self.op.hypervisor, hvfull)
1213 self.dry_run_result = list(node_uuids)
1215 def Exec(self, feedback_fn):
1216 """Create and add the instance to the cluster.
1219 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1220 self.owned_locks(locking.LEVEL_NODE)), \
1221 "Node locks differ from node resource locks"
1222 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1224 ht_kind = self.op.hypervisor
1225 if ht_kind in constants.HTS_REQ_PORT:
1226 network_port = self.cfg.AllocatePort()
1230 instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1232 # This is ugly but we got a chicken-egg problem here
1233 # We can only take the group disk parameters, as the instance
1234 # has no disks yet (we are generating them right here).
1235 nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1236 disks = GenerateDiskTemplate(self,
1237 self.op.disk_template,
1238 instance_uuid, self.pnode.uuid,
1241 self.instance_file_storage_dir,
1242 self.op.file_driver,
1245 self.cfg.GetGroupDiskParams(nodegroup))
1247 iobj = objects.Instance(name=self.op.instance_name,
1250 primary_node=self.pnode.uuid,
1251 nics=self.nics, disks=disks,
1252 disk_template=self.op.disk_template,
1254 admin_state=constants.ADMINST_DOWN,
1255 network_port=network_port,
1256 beparams=self.op.beparams,
1257 hvparams=self.op.hvparams,
1258 hypervisor=self.op.hypervisor,
1259 osparams=self.op.osparams,
1263 for tag in self.op.tags:
1266 if self.adopt_disks:
1267 if self.op.disk_template == constants.DT_PLAIN:
1268 # rename LVs to the newly-generated names; we need to construct
1269 # 'fake' LV disks with the old data, plus the new unique_id
1270 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1272 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1273 rename_to.append(t_dsk.logical_id)
1274 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1275 result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1276 zip(tmp_disks, rename_to))
1277 result.Raise("Failed to rename adoped LVs")
1279 feedback_fn("* creating instance disks...")
1281 CreateDisks(self, iobj)
1282 except errors.OpExecError:
1283 self.LogWarning("Device creation failed")
1284 self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1287 feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1289 self.cfg.AddInstance(iobj, self.proc.GetECId())
1291 # Declare that we don't want to remove the instance lock anymore, as we've
1292 # added the instance to the config
1293 del self.remove_locks[locking.LEVEL_INSTANCE]
1295 if self.op.mode == constants.INSTANCE_IMPORT:
1296 # Release unused nodes
1297 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1300 ReleaseLocks(self, locking.LEVEL_NODE)
1303 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1304 feedback_fn("* wiping instance disks...")
1306 WipeDisks(self, iobj)
1307 except errors.OpExecError, err:
1308 logging.exception("Wiping disks failed")
1309 self.LogWarning("Wiping instance disks failed (%s)", err)
1313 # Something is already wrong with the disks, don't do anything else
1315 elif self.op.wait_for_sync:
1316 disk_abort = not WaitForSync(self, iobj)
1317 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1318 # make sure the disks are not degraded (still sync-ing is ok)
1319 feedback_fn("* checking mirrors status")
1320 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1325 RemoveDisks(self, iobj)
1326 self.cfg.RemoveInstance(iobj.uuid)
1327 # Make sure the instance lock gets removed
1328 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1329 raise errors.OpExecError("There are some degraded disks for"
1332 # instance disks are now active
1333 iobj.disks_active = True
1335 # Release all node resource locks
1336 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1338 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1339 if self.op.mode == constants.INSTANCE_CREATE:
1340 if not self.op.no_install:
1341 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1342 not self.op.wait_for_sync)
1344 feedback_fn("* pausing disk sync to install instance OS")
1345 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1348 for idx, success in enumerate(result.payload):
1350 logging.warn("pause-sync of instance %s for disk %d failed",
1351 self.op.instance_name, idx)
1353 feedback_fn("* running the instance OS create scripts...")
1354 # FIXME: pass debug option from opcode to backend
1356 self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1357 self.op.debug_level)
1359 feedback_fn("* resuming disk sync")
1360 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1363 for idx, success in enumerate(result.payload):
1365 logging.warn("resume-sync of instance %s for disk %d failed",
1366 self.op.instance_name, idx)
1368 os_add_result.Raise("Could not add os for instance %s"
1369 " on node %s" % (self.op.instance_name,
1373 if self.op.mode == constants.INSTANCE_IMPORT:
1374 feedback_fn("* running the instance OS import scripts...")
1378 for idx, image in enumerate(self.src_images):
1382 # FIXME: pass debug option from opcode to backend
1383 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1384 constants.IEIO_FILE, (image, ),
1385 constants.IEIO_SCRIPT,
1386 ((iobj.disks[idx], iobj), idx),
1388 transfers.append(dt)
1391 masterd.instance.TransferInstanceData(self, feedback_fn,
1392 self.op.src_node_uuid,
1394 self.pnode.secondary_ip,
1396 if not compat.all(import_result):
1397 self.LogWarning("Some disks for instance %s on node %s were not"
1398 " imported successfully" % (self.op.instance_name,
1401 rename_from = self._old_instance_name
1403 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1404 feedback_fn("* preparing remote import...")
1405 # The source cluster will stop the instance before attempting to make
1406 # a connection. In some cases stopping an instance can take a long
1407 # time, hence the shutdown timeout is added to the connection
1409 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1410 self.op.source_shutdown_timeout)
1411 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1413 assert iobj.primary_node == self.pnode.uuid
1415 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1416 self.source_x509_ca,
1417 self._cds, timeouts)
1418 if not compat.all(disk_results):
1419 # TODO: Should the instance still be started, even if some disks
1420 # failed to import (valid for local imports, too)?
1421 self.LogWarning("Some disks for instance %s on node %s were not"
1422 " imported successfully" % (self.op.instance_name,
1425 rename_from = self.source_instance_name
1428 # also checked in the prereq part
1429 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1432 # Run rename script on newly imported instance
1433 assert iobj.name == self.op.instance_name
1434 feedback_fn("Running rename script for %s" % self.op.instance_name)
1435 result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1437 self.op.debug_level)
1438 result.Warn("Failed to run rename script for %s on node %s" %
1439 (self.op.instance_name, self.pnode.name), self.LogWarning)
1441 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1444 iobj.admin_state = constants.ADMINST_UP
1445 self.cfg.Update(iobj, feedback_fn)
1446 logging.info("Starting instance %s on node %s", self.op.instance_name,
1448 feedback_fn("* starting instance...")
1449 result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1450 False, self.op.reason)
1451 result.Raise("Could not start instance")
1453 return list(iobj.all_nodes)
1456 class LUInstanceRename(LogicalUnit):
1457 """Rename an instance.
1460 HPATH = "instance-rename"
1461 HTYPE = constants.HTYPE_INSTANCE
1463 def CheckArguments(self):
1467 if self.op.ip_check and not self.op.name_check:
1468 # TODO: make the ip check more flexible and not depend on the name check
1469 raise errors.OpPrereqError("IP address check requires a name check",
1472 def BuildHooksEnv(self):
1475 This runs on master, primary and secondary nodes of the instance.
1478 env = BuildInstanceHookEnvByObject(self, self.instance)
1479 env["INSTANCE_NEW_NAME"] = self.op.new_name
1482 def BuildHooksNodes(self):
1483 """Build hooks nodes.
1486 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1489 def CheckPrereq(self):
1490 """Check prerequisites.
1492 This checks that the instance is in the cluster and is not running.
1495 (self.op.instance_uuid, self.op.instance_name) = \
1496 ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1497 self.op.instance_name)
1498 instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1499 assert instance is not None
1501 # It should actually not happen that an instance is running with a disabled
1502 # disk template, but in case it does, the renaming of file-based instances
1503 # will fail horribly. Thus, we test it before.
1504 if (instance.disk_template in constants.DTS_FILEBASED and
1505 self.op.new_name != instance.name):
1506 CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1507 instance.disk_template)
1509 CheckNodeOnline(self, instance.primary_node)
1510 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1511 msg="cannot rename")
1512 self.instance = instance
1514 new_name = self.op.new_name
1515 if self.op.name_check:
1516 hostname = _CheckHostnameSane(self, new_name)
1517 new_name = self.op.new_name = hostname.name
1518 if (self.op.ip_check and
1519 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1520 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1521 (hostname.ip, new_name),
1522 errors.ECODE_NOTUNIQUE)
1524 instance_names = [inst.name for
1525 inst in self.cfg.GetAllInstancesInfo().values()]
1526 if new_name in instance_names and new_name != instance.name:
1527 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1528 new_name, errors.ECODE_EXISTS)
1530 def Exec(self, feedback_fn):
1531 """Rename the instance.
1534 old_name = self.instance.name
1536 rename_file_storage = False
1537 if (self.instance.disk_template in constants.DTS_FILEBASED and
1538 self.op.new_name != self.instance.name):
1539 old_file_storage_dir = os.path.dirname(
1540 self.instance.disks[0].logical_id[1])
1541 rename_file_storage = True
1543 self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1544 # Change the instance lock. This is definitely safe while we hold the BGL.
1545 # Otherwise the new lock would have to be added in acquired mode.
1547 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1548 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1549 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1551 # re-read the instance from the configuration after rename
1552 renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1554 if rename_file_storage:
1555 new_file_storage_dir = os.path.dirname(
1556 renamed_inst.disks[0].logical_id[1])
1557 result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1558 old_file_storage_dir,
1559 new_file_storage_dir)
1560 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1561 " (but the instance has been renamed in Ganeti)" %
1562 (self.cfg.GetNodeName(renamed_inst.primary_node),
1563 old_file_storage_dir, new_file_storage_dir))
1565 StartInstanceDisks(self, renamed_inst, None)
1566 # update info on disks
1567 info = GetInstanceInfoText(renamed_inst)
1568 for (idx, disk) in enumerate(renamed_inst.disks):
1569 for node_uuid in renamed_inst.all_nodes:
1570 result = self.rpc.call_blockdev_setinfo(node_uuid,
1571 (disk, renamed_inst), info)
1572 result.Warn("Error setting info on node %s for disk %s" %
1573 (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1575 result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1576 renamed_inst, old_name,
1577 self.op.debug_level)
1578 result.Warn("Could not run OS rename script for instance %s on node %s"
1579 " (but the instance has been renamed in Ganeti)" %
1581 self.cfg.GetNodeName(renamed_inst.primary_node)),
1584 ShutdownInstanceDisks(self, renamed_inst)
1586 return renamed_inst.name
1589 class LUInstanceRemove(LogicalUnit):
1590 """Remove an instance.
1593 HPATH = "instance-remove"
1594 HTYPE = constants.HTYPE_INSTANCE
1597 def ExpandNames(self):
1598 self._ExpandAndLockInstance()
1599 self.needed_locks[locking.LEVEL_NODE] = []
1600 self.needed_locks[locking.LEVEL_NODE_RES] = []
1601 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1603 def DeclareLocks(self, level):
1604 if level == locking.LEVEL_NODE:
1605 self._LockInstancesNodes()
1606 elif level == locking.LEVEL_NODE_RES:
1608 self.needed_locks[locking.LEVEL_NODE_RES] = \
1609 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1611 def BuildHooksEnv(self):
1614 This runs on master, primary and secondary nodes of the instance.
1617 env = BuildInstanceHookEnvByObject(self, self.instance)
1618 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1621 def BuildHooksNodes(self):
1622 """Build hooks nodes.
1625 nl = [self.cfg.GetMasterNode()]
1626 nl_post = list(self.instance.all_nodes) + nl
1627 return (nl, nl_post)
1629 def CheckPrereq(self):
1630 """Check prerequisites.
1632 This checks that the instance is in the cluster.
1635 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1636 assert self.instance is not None, \
1637 "Cannot retrieve locked instance %s" % self.op.instance_name
1639 def Exec(self, feedback_fn):
1640 """Remove the instance.
1643 logging.info("Shutting down instance %s on node %s", self.instance.name,
1644 self.cfg.GetNodeName(self.instance.primary_node))
1646 result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1648 self.op.shutdown_timeout,
1650 if self.op.ignore_failures:
1651 result.Warn("Warning: can't shutdown instance", feedback_fn)
1653 result.Raise("Could not shutdown instance %s on node %s" %
1654 (self.instance.name,
1655 self.cfg.GetNodeName(self.instance.primary_node)))
1657 assert (self.owned_locks(locking.LEVEL_NODE) ==
1658 self.owned_locks(locking.LEVEL_NODE_RES))
1659 assert not (set(self.instance.all_nodes) -
1660 self.owned_locks(locking.LEVEL_NODE)), \
1661 "Not owning correct locks"
1663 RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1666 class LUInstanceMove(LogicalUnit):
1667 """Move an instance by data-copying.
1670 HPATH = "instance-move"
1671 HTYPE = constants.HTYPE_INSTANCE
1674 def ExpandNames(self):
1675 self._ExpandAndLockInstance()
1676 (self.op.target_node_uuid, self.op.target_node) = \
1677 ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1678 self.op.target_node)
1679 self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1680 self.needed_locks[locking.LEVEL_NODE_RES] = []
1681 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1683 def DeclareLocks(self, level):
1684 if level == locking.LEVEL_NODE:
1685 self._LockInstancesNodes(primary_only=True)
1686 elif level == locking.LEVEL_NODE_RES:
1688 self.needed_locks[locking.LEVEL_NODE_RES] = \
1689 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1691 def BuildHooksEnv(self):
1694 This runs on master, primary and secondary nodes of the instance.
1698 "TARGET_NODE": self.op.target_node,
1699 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1701 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1704 def BuildHooksNodes(self):
1705 """Build hooks nodes.
1709 self.cfg.GetMasterNode(),
1710 self.instance.primary_node,
1711 self.op.target_node_uuid,
1715 def CheckPrereq(self):
1716 """Check prerequisites.
1718 This checks that the instance is in the cluster.
1721 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1722 assert self.instance is not None, \
1723 "Cannot retrieve locked instance %s" % self.op.instance_name
1725 if self.instance.disk_template not in constants.DTS_COPYABLE:
1726 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1727 self.instance.disk_template,
1730 target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1731 assert target_node is not None, \
1732 "Cannot retrieve locked node %s" % self.op.target_node
1734 self.target_node_uuid = target_node.uuid
1735 if target_node.uuid == self.instance.primary_node:
1736 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1737 (self.instance.name, target_node.name),
1740 bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1742 for idx, dsk in enumerate(self.instance.disks):
1743 if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1744 constants.DT_SHARED_FILE):
1745 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1746 " cannot copy" % idx, errors.ECODE_STATE)
1748 CheckNodeOnline(self, target_node.uuid)
1749 CheckNodeNotDrained(self, target_node.uuid)
1750 CheckNodeVmCapable(self, target_node.uuid)
1751 cluster = self.cfg.GetClusterInfo()
1752 group_info = self.cfg.GetNodeGroup(target_node.group)
1753 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1754 CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1755 ignore=self.op.ignore_ipolicy)
1757 if self.instance.admin_state == constants.ADMINST_UP:
1758 # check memory requirements on the secondary node
1759 CheckNodeFreeMemory(
1760 self, target_node.uuid, "failing over instance %s" %
1761 self.instance.name, bep[constants.BE_MAXMEM],
1762 self.instance.hypervisor,
1763 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1765 self.LogInfo("Not checking memory on the secondary node as"
1766 " instance will not be started")
1768 # check bridge existance
1769 CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1771 def Exec(self, feedback_fn):
1772 """Move an instance.
1774 The move is done by shutting it down on its present node, copying
1775 the data over (slow) and starting it on the new node.
1778 source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1779 target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1781 self.LogInfo("Shutting down instance %s on source node %s",
1782 self.instance.name, source_node.name)
1784 assert (self.owned_locks(locking.LEVEL_NODE) ==
1785 self.owned_locks(locking.LEVEL_NODE_RES))
1787 result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1788 self.op.shutdown_timeout,
1790 if self.op.ignore_consistency:
1791 result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1792 " anyway. Please make sure node %s is down. Error details" %
1793 (self.instance.name, source_node.name, source_node.name),
1796 result.Raise("Could not shutdown instance %s on node %s" %
1797 (self.instance.name, source_node.name))
1799 # create the target disks
1801 CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1802 except errors.OpExecError:
1803 self.LogWarning("Device creation failed")
1804 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1807 cluster_name = self.cfg.GetClusterInfo().cluster_name
1810 # activate, get path, copy the data over
1811 for idx, disk in enumerate(self.instance.disks):
1812 self.LogInfo("Copying data for disk %d", idx)
1813 result = self.rpc.call_blockdev_assemble(
1814 target_node.uuid, (disk, self.instance), self.instance.name,
1817 self.LogWarning("Can't assemble newly created disk %d: %s",
1818 idx, result.fail_msg)
1819 errs.append(result.fail_msg)
1821 dev_path = result.payload
1822 result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1824 target_node.name, dev_path,
1827 self.LogWarning("Can't copy data over for disk %d: %s",
1828 idx, result.fail_msg)
1829 errs.append(result.fail_msg)
1833 self.LogWarning("Some disks failed to copy, aborting")
1835 RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1837 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1838 raise errors.OpExecError("Errors during disk copy: %s" %
1841 self.instance.primary_node = target_node.uuid
1842 self.cfg.Update(self.instance, feedback_fn)
1844 self.LogInfo("Removing the disks on the original node")
1845 RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1847 # Only start the instance if it's marked as up
1848 if self.instance.admin_state == constants.ADMINST_UP:
1849 self.LogInfo("Starting instance %s on node %s",
1850 self.instance.name, target_node.name)
1852 disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1853 ignore_secondaries=True)
1855 ShutdownInstanceDisks(self, self.instance)
1856 raise errors.OpExecError("Can't activate the instance's disks")
1858 result = self.rpc.call_instance_start(target_node.uuid,
1859 (self.instance, None, None), False,
1861 msg = result.fail_msg
1863 ShutdownInstanceDisks(self, self.instance)
1864 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1865 (self.instance.name, target_node.name, msg))
1868 class LUInstanceMultiAlloc(NoHooksLU):
1869 """Allocates multiple instances at the same time.
1874 def CheckArguments(self):
1879 for inst in self.op.instances:
1880 if inst.iallocator is not None:
1881 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1882 " instance objects", errors.ECODE_INVAL)
1883 nodes.append(bool(inst.pnode))
1884 if inst.disk_template in constants.DTS_INT_MIRROR:
1885 nodes.append(bool(inst.snode))
1887 has_nodes = compat.any(nodes)
1888 if compat.all(nodes) ^ has_nodes:
1889 raise errors.OpPrereqError("There are instance objects providing"
1890 " pnode/snode while others do not",
1893 if not has_nodes and self.op.iallocator is None:
1894 default_iallocator = self.cfg.GetDefaultIAllocator()
1895 if default_iallocator:
1896 self.op.iallocator = default_iallocator
1898 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1899 " given and no cluster-wide default"
1900 " iallocator found; please specify either"
1901 " an iallocator or nodes on the instances"
1902 " or set a cluster-wide default iallocator",
1905 _CheckOpportunisticLocking(self.op)
1907 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1909 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1910 utils.CommaJoin(dups), errors.ECODE_INVAL)
1912 def ExpandNames(self):
1913 """Calculate the locks.
1916 self.share_locks = ShareAll()
1917 self.needed_locks = {
1918 # iallocator will select nodes and even if no iallocator is used,
1919 # collisions with LUInstanceCreate should be avoided
1920 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1923 if self.op.iallocator:
1924 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1925 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1927 if self.op.opportunistic_locking:
1928 self.opportunistic_locks[locking.LEVEL_NODE] = True
1929 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1932 for inst in self.op.instances:
1933 (inst.pnode_uuid, inst.pnode) = \
1934 ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1935 nodeslist.append(inst.pnode_uuid)
1936 if inst.snode is not None:
1937 (inst.snode_uuid, inst.snode) = \
1938 ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1939 nodeslist.append(inst.snode_uuid)
1941 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1942 # Lock resources of instance's primary and secondary nodes (copy to
1943 # prevent accidential modification)
1944 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1946 def CheckPrereq(self):
1947 """Check prerequisite.
1950 if self.op.iallocator:
1951 cluster = self.cfg.GetClusterInfo()
1952 default_vg = self.cfg.GetVGName()
1953 ec_id = self.proc.GetECId()
1955 if self.op.opportunistic_locking:
1956 # Only consider nodes for which a lock is held
1957 node_whitelist = self.cfg.GetNodeNames(
1958 list(self.owned_locks(locking.LEVEL_NODE)))
1960 node_whitelist = None
1962 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1963 _ComputeNics(op, cluster, None,
1965 _ComputeFullBeParams(op, cluster),
1967 for op in self.op.instances]
1969 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1970 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1972 ial.Run(self.op.iallocator)
1975 raise errors.OpPrereqError("Can't compute nodes using"
1976 " iallocator '%s': %s" %
1977 (self.op.iallocator, ial.info),
1980 self.ia_result = ial.result
1983 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1984 constants.JOB_IDS_KEY: [],
1987 def _ConstructPartialResult(self):
1988 """Contructs the partial result.
1991 if self.op.iallocator:
1992 (allocatable, failed_insts) = self.ia_result
1993 allocatable_insts = map(compat.fst, allocatable)
1995 allocatable_insts = [op.instance_name for op in self.op.instances]
1999 constants.ALLOCATABLE_KEY: allocatable_insts,
2000 constants.FAILED_KEY: failed_insts,
2003 def Exec(self, feedback_fn):
2004 """Executes the opcode.
2008 if self.op.iallocator:
2009 op2inst = dict((op.instance_name, op) for op in self.op.instances)
2010 (allocatable, failed) = self.ia_result
2012 for (name, node_names) in allocatable:
2013 op = op2inst.pop(name)
2015 (op.pnode_uuid, op.pnode) = \
2016 ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2017 if len(node_names) > 1:
2018 (op.snode_uuid, op.snode) = \
2019 ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2023 missing = set(op2inst.keys()) - set(failed)
2024 assert not missing, \
2025 "Iallocator did return incomplete result: %s" % \
2026 utils.CommaJoin(missing)
2028 jobs.extend([op] for op in self.op.instances)
2030 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2033 class _InstNicModPrivate:
2034 """Data structure for network interface modifications.
2036 Used by L{LUInstanceSetParams}.
2044 def _PrepareContainerMods(mods, private_fn):
2045 """Prepares a list of container modifications by adding a private data field.
2047 @type mods: list of tuples; (operation, index, parameters)
2048 @param mods: List of modifications
2049 @type private_fn: callable or None
2050 @param private_fn: Callable for constructing a private data field for a
2055 if private_fn is None:
2060 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2063 def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2064 """Checks if nodes have enough physical CPUs
2066 This function checks if all given nodes have the needed number of
2067 physical CPUs. In case any node has less CPUs or we cannot get the
2068 information from the node, this function raises an OpPrereqError
2071 @type lu: C{LogicalUnit}
2072 @param lu: a logical unit from which we get configuration data
2073 @type node_uuids: C{list}
2074 @param node_uuids: the list of node UUIDs to check
2075 @type requested: C{int}
2076 @param requested: the minimum acceptable number of physical CPUs
2077 @type hypervisor_specs: list of pairs (string, dict of strings)
2078 @param hypervisor_specs: list of hypervisor specifications in
2079 pairs (hypervisor_name, hvparams)
2080 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2081 or we cannot check the node
2084 nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2085 for node_uuid in node_uuids:
2086 info = nodeinfo[node_uuid]
2087 node_name = lu.cfg.GetNodeName(node_uuid)
2088 info.Raise("Cannot get current information from node %s" % node_name,
2089 prereq=True, ecode=errors.ECODE_ENVIRON)
2090 (_, _, (hv_info, )) = info.payload
2091 num_cpus = hv_info.get("cpu_total", None)
2092 if not isinstance(num_cpus, int):
2093 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2094 " on node %s, result was '%s'" %
2095 (node_name, num_cpus), errors.ECODE_ENVIRON)
2096 if requested > num_cpus:
2097 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2098 "required" % (node_name, num_cpus, requested),
2102 def GetItemFromContainer(identifier, kind, container):
2103 """Return the item refered by the identifier.
2105 @type identifier: string
2106 @param identifier: Item index or name or UUID
2108 @param kind: One-word item description
2109 @type container: list
2110 @param container: Container to get the item from
2115 idx = int(identifier)
2118 absidx = len(container) - 1
2120 raise IndexError("Not accepting negative indices other than -1")
2121 elif idx > len(container):
2122 raise IndexError("Got %s index %s, but there are only %s" %
2123 (kind, idx, len(container)))
2126 return (absidx, container[idx])
2130 for idx, item in enumerate(container):
2131 if item.uuid == identifier or item.name == identifier:
2134 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2135 (kind, identifier), errors.ECODE_NOENT)
2138 def _ApplyContainerMods(kind, container, chgdesc, mods,
2139 create_fn, modify_fn, remove_fn,
2141 """Applies descriptions in C{mods} to C{container}.
2144 @param kind: One-word item description
2145 @type container: list
2146 @param container: Container to modify
2147 @type chgdesc: None or list
2148 @param chgdesc: List of applied changes
2150 @param mods: Modifications as returned by L{_PrepareContainerMods}
2151 @type create_fn: callable
2152 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2153 receives absolute item index, parameters and private data object as added
2154 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2156 @type modify_fn: callable
2157 @param modify_fn: Callback for modifying an existing item
2158 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2159 and private data object as added by L{_PrepareContainerMods}, returns
2161 @type remove_fn: callable
2162 @param remove_fn: Callback on removing item; receives absolute item index,
2163 item and private data object as added by L{_PrepareContainerMods}
2164 @type post_add_fn: callable
2165 @param post_add_fn: Callable for post-processing a newly created item after
2166 it has been put into the container. It receives the index of the new item
2167 and the new item as parameters.
2170 for (op, identifier, params, private) in mods:
2173 if op == constants.DDM_ADD:
2174 # Calculate where item will be added
2175 # When adding an item, identifier can only be an index
2177 idx = int(identifier)
2179 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2180 " identifier for %s" % constants.DDM_ADD,
2183 addidx = len(container)
2186 raise IndexError("Not accepting negative indices other than -1")
2187 elif idx > len(container):
2188 raise IndexError("Got %s index %s, but there are only %s" %
2189 (kind, idx, len(container)))
2192 if create_fn is None:
2195 (item, changes) = create_fn(addidx, params, private)
2198 container.append(item)
2201 assert idx <= len(container)
2202 # list.insert does so before the specified index
2203 container.insert(idx, item)
2205 if post_add_fn is not None:
2206 post_add_fn(addidx, item)
2209 # Retrieve existing item
2210 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2212 if op == constants.DDM_REMOVE:
2215 if remove_fn is not None:
2216 remove_fn(absidx, item, private)
2218 changes = [("%s/%s" % (kind, absidx), "remove")]
2220 assert container[absidx] == item
2221 del container[absidx]
2222 elif op == constants.DDM_MODIFY:
2223 if modify_fn is not None:
2224 changes = modify_fn(absidx, item, params, private)
2226 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2228 assert _TApplyContModsCbChanges(changes)
2230 if not (chgdesc is None or changes is None):
2231 chgdesc.extend(changes)
2234 def _UpdateIvNames(base_index, disks):
2235 """Updates the C{iv_name} attribute of disks.
2237 @type disks: list of L{objects.Disk}
2240 for (idx, disk) in enumerate(disks):
2241 disk.iv_name = "disk/%s" % (base_index + idx, )
2244 class LUInstanceSetParams(LogicalUnit):
2245 """Modifies an instances's parameters.
2248 HPATH = "instance-modify"
2249 HTYPE = constants.HTYPE_INSTANCE
2253 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2254 assert ht.TList(mods)
2255 assert not mods or len(mods[0]) in (2, 3)
2257 if mods and len(mods[0]) == 2:
2261 for op, params in mods:
2262 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2263 result.append((op, -1, params))
2267 raise errors.OpPrereqError("Only one %s add or remove operation is"
2268 " supported at a time" % kind,
2271 result.append((constants.DDM_MODIFY, op, params))
2273 assert verify_fn(result)
2280 def _CheckMods(kind, mods, key_types, item_fn):
2281 """Ensures requested disk/NIC modifications are valid.
2284 for (op, _, params) in mods:
2285 assert ht.TDict(params)
2287 # If 'key_types' is an empty dict, we assume we have an
2288 # 'ext' template and thus do not ForceDictType
2290 utils.ForceDictType(params, key_types)
2292 if op == constants.DDM_REMOVE:
2294 raise errors.OpPrereqError("No settings should be passed when"
2295 " removing a %s" % kind,
2297 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2300 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2303 def _VerifyDiskModification(op, params, excl_stor):
2304 """Verifies a disk modification.
2307 if op == constants.DDM_ADD:
2308 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2309 if mode not in constants.DISK_ACCESS_SET:
2310 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2313 size = params.get(constants.IDISK_SIZE, None)
2315 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2316 constants.IDISK_SIZE, errors.ECODE_INVAL)
2319 params[constants.IDISK_SIZE] = size
2320 name = params.get(constants.IDISK_NAME, None)
2321 if name is not None and name.lower() == constants.VALUE_NONE:
2322 params[constants.IDISK_NAME] = None
2324 CheckSpindlesExclusiveStorage(params, excl_stor, True)
2326 elif op == constants.DDM_MODIFY:
2327 if constants.IDISK_SIZE in params:
2328 raise errors.OpPrereqError("Disk size change not possible, use"
2329 " grow-disk", errors.ECODE_INVAL)
2331 raise errors.OpPrereqError("Disk modification doesn't support"
2332 " additional arbitrary parameters",
2334 name = params.get(constants.IDISK_NAME, None)
2335 if name is not None and name.lower() == constants.VALUE_NONE:
2336 params[constants.IDISK_NAME] = None
2339 def _VerifyNicModification(op, params):
2340 """Verifies a network interface modification.
2343 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2344 ip = params.get(constants.INIC_IP, None)
2345 name = params.get(constants.INIC_NAME, None)
2346 req_net = params.get(constants.INIC_NETWORK, None)
2347 link = params.get(constants.NIC_LINK, None)
2348 mode = params.get(constants.NIC_MODE, None)
2349 if name is not None and name.lower() == constants.VALUE_NONE:
2350 params[constants.INIC_NAME] = None
2351 if req_net is not None:
2352 if req_net.lower() == constants.VALUE_NONE:
2353 params[constants.INIC_NETWORK] = None
2355 elif link is not None or mode is not None:
2356 raise errors.OpPrereqError("If network is given"
2357 " mode or link should not",
2360 if op == constants.DDM_ADD:
2361 macaddr = params.get(constants.INIC_MAC, None)
2363 params[constants.INIC_MAC] = constants.VALUE_AUTO
2366 if ip.lower() == constants.VALUE_NONE:
2367 params[constants.INIC_IP] = None
2369 if ip.lower() == constants.NIC_IP_POOL:
2370 if op == constants.DDM_ADD and req_net is None:
2371 raise errors.OpPrereqError("If ip=pool, parameter network"
2375 if not netutils.IPAddress.IsValid(ip):
2376 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2379 if constants.INIC_MAC in params:
2380 macaddr = params[constants.INIC_MAC]
2381 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2382 macaddr = utils.NormalizeAndValidateMac(macaddr)
2384 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2385 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2386 " modifying an existing NIC",
2389 def CheckArguments(self):
2390 if not (self.op.nics or self.op.disks or self.op.disk_template or
2391 self.op.hvparams or self.op.beparams or self.op.os_name or
2392 self.op.osparams or self.op.offline is not None or
2393 self.op.runtime_mem or self.op.pnode):
2394 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2396 if self.op.hvparams:
2397 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2398 "hypervisor", "instance", "cluster")
2400 self.op.disks = self._UpgradeDiskNicMods(
2401 "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2402 self.op.nics = self._UpgradeDiskNicMods(
2403 "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2405 if self.op.disks and self.op.disk_template is not None:
2406 raise errors.OpPrereqError("Disk template conversion and other disk"
2407 " changes not supported at the same time",
2410 if (self.op.disk_template and
2411 self.op.disk_template in constants.DTS_INT_MIRROR and
2412 self.op.remote_node is None):
2413 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2414 " one requires specifying a secondary node",
2417 # Check NIC modifications
2418 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2419 self._VerifyNicModification)
2422 (self.op.pnode_uuid, self.op.pnode) = \
2423 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2425 def ExpandNames(self):
2426 self._ExpandAndLockInstance()
2427 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2428 # Can't even acquire node locks in shared mode as upcoming changes in
2429 # Ganeti 2.6 will start to modify the node object on disk conversion
2430 self.needed_locks[locking.LEVEL_NODE] = []
2431 self.needed_locks[locking.LEVEL_NODE_RES] = []
2432 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2433 # Look node group to look up the ipolicy
2434 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2436 def DeclareLocks(self, level):
2437 if level == locking.LEVEL_NODEGROUP:
2438 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2439 # Acquire locks for the instance's nodegroups optimistically. Needs
2440 # to be verified in CheckPrereq
2441 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2442 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2443 elif level == locking.LEVEL_NODE:
2444 self._LockInstancesNodes()
2445 if self.op.disk_template and self.op.remote_node:
2446 (self.op.remote_node_uuid, self.op.remote_node) = \
2447 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2448 self.op.remote_node)
2449 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2450 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2452 self.needed_locks[locking.LEVEL_NODE_RES] = \
2453 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2455 def BuildHooksEnv(self):
2458 This runs on the master, primary and secondaries.
2462 if constants.BE_MINMEM in self.be_new:
2463 args["minmem"] = self.be_new[constants.BE_MINMEM]
2464 if constants.BE_MAXMEM in self.be_new:
2465 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2466 if constants.BE_VCPUS in self.be_new:
2467 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2468 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2469 # information at all.
2471 if self._new_nics is not None:
2474 for nic in self._new_nics:
2475 n = copy.deepcopy(nic)
2476 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2477 n.nicparams = nicparams
2478 nics.append(NICToTuple(self, n))
2482 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2483 if self.op.disk_template:
2484 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2485 if self.op.runtime_mem:
2486 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2490 def BuildHooksNodes(self):
2491 """Build hooks nodes.
2494 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2497 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2498 old_params, cluster, pnode_uuid):
2500 update_params_dict = dict([(key, params[key])
2501 for key in constants.NICS_PARAMETERS
2504 req_link = update_params_dict.get(constants.NIC_LINK, None)
2505 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2508 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2509 if new_net_uuid_or_name:
2510 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2511 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2514 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2517 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2519 raise errors.OpPrereqError("No netparams found for the network"
2520 " %s, probably not connected" %
2521 new_net_obj.name, errors.ECODE_INVAL)
2522 new_params = dict(netparams)
2524 new_params = GetUpdatedParams(old_params, update_params_dict)
2526 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2528 new_filled_params = cluster.SimpleFillNIC(new_params)
2529 objects.NIC.CheckParameterSyntax(new_filled_params)
2531 new_mode = new_filled_params[constants.NIC_MODE]
2532 if new_mode == constants.NIC_MODE_BRIDGED:
2533 bridge = new_filled_params[constants.NIC_LINK]
2534 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2536 msg = "Error checking bridges on node '%s': %s" % \
2537 (self.cfg.GetNodeName(pnode_uuid), msg)
2539 self.warn.append(msg)
2541 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2543 elif new_mode == constants.NIC_MODE_ROUTED:
2544 ip = params.get(constants.INIC_IP, old_ip)
2546 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2547 " on a routed NIC", errors.ECODE_INVAL)
2549 elif new_mode == constants.NIC_MODE_OVS:
2550 # TODO: check OVS link
2551 self.LogInfo("OVS links are currently not checked for correctness")
2553 if constants.INIC_MAC in params:
2554 mac = params[constants.INIC_MAC]
2556 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2558 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2559 # otherwise generate the MAC address
2560 params[constants.INIC_MAC] = \
2561 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2563 # or validate/reserve the current one
2565 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2566 except errors.ReservationError:
2567 raise errors.OpPrereqError("MAC address '%s' already in use"
2568 " in cluster" % mac,
2569 errors.ECODE_NOTUNIQUE)
2570 elif new_net_uuid != old_net_uuid:
2572 def get_net_prefix(net_uuid):
2575 nobj = self.cfg.GetNetwork(net_uuid)
2576 mac_prefix = nobj.mac_prefix
2580 new_prefix = get_net_prefix(new_net_uuid)
2581 old_prefix = get_net_prefix(old_net_uuid)
2582 if old_prefix != new_prefix:
2583 params[constants.INIC_MAC] = \
2584 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2586 # if there is a change in (ip, network) tuple
2587 new_ip = params.get(constants.INIC_IP, old_ip)
2588 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2590 # if IP is pool then require a network and generate one IP
2591 if new_ip.lower() == constants.NIC_IP_POOL:
2594 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2595 except errors.ReservationError:
2596 raise errors.OpPrereqError("Unable to get a free IP"
2597 " from the address pool",
2599 self.LogInfo("Chose IP %s from network %s",
2602 params[constants.INIC_IP] = new_ip
2604 raise errors.OpPrereqError("ip=pool, but no network found",
2606 # Reserve new IP if in the new network if any
2609 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2610 self.LogInfo("Reserving IP %s in network %s",
2611 new_ip, new_net_obj.name)
2612 except errors.ReservationError:
2613 raise errors.OpPrereqError("IP %s not available in network %s" %
2614 (new_ip, new_net_obj.name),
2615 errors.ECODE_NOTUNIQUE)
2616 # new network is None so check if new IP is a conflicting IP
2617 elif self.op.conflicts_check:
2618 _CheckForConflictingIp(self, new_ip, pnode_uuid)
2620 # release old IP if old network is not None
2621 if old_ip and old_net_uuid:
2623 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2624 except errors.AddressPoolError:
2625 logging.warning("Release IP %s not contained in network %s",
2626 old_ip, old_net_obj.name)
2628 # there are no changes in (ip, network) tuple and old network is not None
2629 elif (old_net_uuid is not None and
2630 (req_link is not None or req_mode is not None)):
2631 raise errors.OpPrereqError("Not allowed to change link or mode of"
2632 " a NIC that is connected to a network",
2635 private.params = new_params
2636 private.filled = new_filled_params
2638 def _PreCheckDiskTemplate(self, pnode_info):
2639 """CheckPrereq checks related to a new disk template."""
2640 # Arguments are passed to avoid configuration lookups
2641 pnode_uuid = self.instance.primary_node
2642 if self.instance.disk_template == self.op.disk_template:
2643 raise errors.OpPrereqError("Instance already has disk template %s" %
2644 self.instance.disk_template,
2647 if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2648 raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2649 " cluster." % self.op.disk_template)
2651 if (self.instance.disk_template,
2652 self.op.disk_template) not in self._DISK_CONVERSIONS:
2653 raise errors.OpPrereqError("Unsupported disk template conversion from"
2654 " %s to %s" % (self.instance.disk_template,
2655 self.op.disk_template),
2657 CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2658 msg="cannot change disk template")
2659 if self.op.disk_template in constants.DTS_INT_MIRROR:
2660 if self.op.remote_node_uuid == pnode_uuid:
2661 raise errors.OpPrereqError("Given new secondary node %s is the same"
2662 " as the primary node of the instance" %
2663 self.op.remote_node, errors.ECODE_STATE)
2664 CheckNodeOnline(self, self.op.remote_node_uuid)
2665 CheckNodeNotDrained(self, self.op.remote_node_uuid)
2666 # FIXME: here we assume that the old instance type is DT_PLAIN
2667 assert self.instance.disk_template == constants.DT_PLAIN
2668 disks = [{constants.IDISK_SIZE: d.size,
2669 constants.IDISK_VG: d.logical_id[0]}
2670 for d in self.instance.disks]
2671 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2672 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2674 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2675 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2676 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2678 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2679 ignore=self.op.ignore_ipolicy)
2680 if pnode_info.group != snode_info.group:
2681 self.LogWarning("The primary and secondary nodes are in two"
2682 " different node groups; the disk parameters"
2683 " from the first disk's node group will be"
2686 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2687 # Make sure none of the nodes require exclusive storage
2688 nodes = [pnode_info]
2689 if self.op.disk_template in constants.DTS_INT_MIRROR:
2691 nodes.append(snode_info)
2692 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2693 if compat.any(map(has_es, nodes)):
2694 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2695 " storage is enabled" % (self.instance.disk_template,
2696 self.op.disk_template))
2697 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2699 def _PreCheckDisks(self, ispec):
2700 """CheckPrereq checks related to disk changes.
2703 @param ispec: instance specs to be updated with the new disks
2706 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2708 excl_stor = compat.any(
2709 rpc.GetExclusiveStorageForNodes(self.cfg,
2710 self.instance.all_nodes).values()
2713 # Check disk modifications. This is done here and not in CheckArguments
2714 # (as with NICs), because we need to know the instance's disk template
2715 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2716 if self.instance.disk_template == constants.DT_EXT:
2717 self._CheckMods("disk", self.op.disks, {}, ver_fn)
2719 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2722 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2724 # Check the validity of the `provider' parameter
2725 if self.instance.disk_template in constants.DT_EXT:
2726 for mod in self.diskmod:
2727 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2728 if mod[0] == constants.DDM_ADD:
2729 if ext_provider is None:
2730 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2731 " '%s' missing, during disk add" %
2733 constants.IDISK_PROVIDER),
2735 elif mod[0] == constants.DDM_MODIFY:
2737 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2739 constants.IDISK_PROVIDER,
2742 for mod in self.diskmod:
2743 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2744 if ext_provider is not None:
2745 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2746 " instances of type '%s'" %
2747 (constants.IDISK_PROVIDER,
2751 if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2752 raise errors.OpPrereqError("Disk operations not supported for"
2753 " diskless instances", errors.ECODE_INVAL)
2755 def _PrepareDiskMod(_, disk, params, __):
2756 disk.name = params.get(constants.IDISK_NAME, None)
2758 # Verify disk changes (operating on a copy)
2759 disks = copy.deepcopy(self.instance.disks)
2760 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2761 _PrepareDiskMod, None)
2762 utils.ValidateDeviceNames("disk", disks)
2763 if len(disks) > constants.MAX_DISKS:
2764 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2765 " more" % constants.MAX_DISKS,
2767 disk_sizes = [disk.size for disk in self.instance.disks]
2768 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2769 self.diskmod if op == constants.DDM_ADD)
2770 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2771 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2773 if self.op.offline is not None and self.op.offline:
2774 CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2775 msg="can't change to offline")
2777 def CheckPrereq(self):
2778 """Check prerequisites.
2780 This only checks the instance list against the existing names.
2783 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2784 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2785 self.cluster = self.cfg.GetClusterInfo()
2787 assert self.instance is not None, \
2788 "Cannot retrieve locked instance %s" % self.op.instance_name
2790 pnode_uuid = self.instance.primary_node
2794 if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2796 # verify that the instance is not up
2797 instance_info = self.rpc.call_instance_info(
2798 pnode_uuid, self.instance.name, self.instance.hypervisor,
2799 self.instance.hvparams)
2800 if instance_info.fail_msg:
2801 self.warn.append("Can't get instance runtime information: %s" %
2802 instance_info.fail_msg)
2803 elif instance_info.payload:
2804 raise errors.OpPrereqError("Instance is still running on %s" %
2805 self.cfg.GetNodeName(pnode_uuid),
2808 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2809 node_uuids = list(self.instance.all_nodes)
2810 pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2812 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2813 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2814 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2816 # dictionary with instance information after the modification
2819 # Prepare NIC modifications
2820 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2823 if self.op.os_name and not self.op.force:
2824 CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2825 self.op.force_variant)
2826 instance_os = self.op.os_name
2828 instance_os = self.instance.os
2830 assert not (self.op.disk_template and self.op.disks), \
2831 "Can't modify disk template and apply disk changes at the same time"
2833 if self.op.disk_template:
2834 self._PreCheckDiskTemplate(pnode_info)
2836 self._PreCheckDisks(ispec)
2838 # hvparams processing
2839 if self.op.hvparams:
2840 hv_type = self.instance.hypervisor
2841 i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2842 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2843 hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2846 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2847 CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2848 self.hv_proposed = self.hv_new = hv_new # the new actual values
2849 self.hv_inst = i_hvdict # the new dict (without defaults)
2851 self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2853 self.instance.hvparams)
2854 self.hv_new = self.hv_inst = {}
2856 # beparams processing
2857 if self.op.beparams:
2858 i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2860 objects.UpgradeBeParams(i_bedict)
2861 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2862 be_new = self.cluster.SimpleFillBE(i_bedict)
2863 self.be_proposed = self.be_new = be_new # the new actual values
2864 self.be_inst = i_bedict # the new dict (without defaults)
2866 self.be_new = self.be_inst = {}
2867 self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2868 be_old = self.cluster.FillBE(self.instance)
2870 # CPU param validation -- checking every time a parameter is
2871 # changed to cover all cases where either CPU mask or vcpus have
2873 if (constants.BE_VCPUS in self.be_proposed and
2874 constants.HV_CPU_MASK in self.hv_proposed):
2876 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2877 # Verify mask is consistent with number of vCPUs. Can skip this
2878 # test if only 1 entry in the CPU mask, which means same mask
2879 # is applied to all vCPUs.
2880 if (len(cpu_list) > 1 and
2881 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2882 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2884 (self.be_proposed[constants.BE_VCPUS],
2885 self.hv_proposed[constants.HV_CPU_MASK]),
2888 # Only perform this test if a new CPU mask is given
2889 if constants.HV_CPU_MASK in self.hv_new:
2890 # Calculate the largest CPU number requested
2891 max_requested_cpu = max(map(max, cpu_list))
2892 # Check that all of the instance's nodes have enough physical CPUs to
2893 # satisfy the requested CPU mask
2894 hvspecs = [(self.instance.hypervisor,
2895 self.cfg.GetClusterInfo()
2896 .hvparams[self.instance.hypervisor])]
2897 _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2898 max_requested_cpu + 1,
2901 # osparams processing
2902 if self.op.osparams:
2903 i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2904 CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2905 self.os_inst = i_osdict # the new dict (without defaults)
2909 #TODO(dynmem): do the appropriate check involving MINMEM
2910 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2911 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2912 mem_check_list = [pnode_uuid]
2913 if be_new[constants.BE_AUTO_BALANCE]:
2914 # either we changed auto_balance to yes or it was from before
2915 mem_check_list.extend(self.instance.secondary_nodes)
2916 instance_info = self.rpc.call_instance_info(
2917 pnode_uuid, self.instance.name, self.instance.hypervisor,
2918 self.instance.hvparams)
2919 hvspecs = [(self.instance.hypervisor,
2920 self.cluster.hvparams[self.instance.hypervisor])]
2921 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2923 pninfo = nodeinfo[pnode_uuid]
2924 msg = pninfo.fail_msg
2926 # Assume the primary node is unreachable and go ahead
2927 self.warn.append("Can't get info from primary node %s: %s" %
2928 (self.cfg.GetNodeName(pnode_uuid), msg))
2930 (_, _, (pnhvinfo, )) = pninfo.payload
2931 if not isinstance(pnhvinfo.get("memory_free", None), int):
2932 self.warn.append("Node data from primary node %s doesn't contain"
2933 " free memory information" %
2934 self.cfg.GetNodeName(pnode_uuid))
2935 elif instance_info.fail_msg:
2936 self.warn.append("Can't get instance runtime information: %s" %
2937 instance_info.fail_msg)
2939 if instance_info.payload:
2940 current_mem = int(instance_info.payload["memory"])
2942 # Assume instance not running
2943 # (there is a slight race condition here, but it's not very
2944 # probable, and we have no other way to check)
2945 # TODO: Describe race condition
2947 #TODO(dynmem): do the appropriate check involving MINMEM
2948 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2949 pnhvinfo["memory_free"])
2951 raise errors.OpPrereqError("This change will prevent the instance"
2952 " from starting, due to %d MB of memory"
2953 " missing on its primary node" %
2954 miss_mem, errors.ECODE_NORES)
2956 if be_new[constants.BE_AUTO_BALANCE]:
2957 for node_uuid, nres in nodeinfo.items():
2958 if node_uuid not in self.instance.secondary_nodes:
2960 nres.Raise("Can't get info from secondary node %s" %
2961 self.cfg.GetNodeName(node_uuid), prereq=True,
2962 ecode=errors.ECODE_STATE)
2963 (_, _, (nhvinfo, )) = nres.payload
2964 if not isinstance(nhvinfo.get("memory_free", None), int):
2965 raise errors.OpPrereqError("Secondary node %s didn't return free"
2966 " memory information" %
2967 self.cfg.GetNodeName(node_uuid),
2969 #TODO(dynmem): do the appropriate check involving MINMEM
2970 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2971 raise errors.OpPrereqError("This change will prevent the instance"
2972 " from failover to its secondary node"
2973 " %s, due to not enough memory" %
2974 self.cfg.GetNodeName(node_uuid),
2977 if self.op.runtime_mem:
2978 remote_info = self.rpc.call_instance_info(
2979 self.instance.primary_node, self.instance.name,
2980 self.instance.hypervisor,
2981 self.cluster.hvparams[self.instance.hypervisor])
2982 remote_info.Raise("Error checking node %s" %
2983 self.cfg.GetNodeName(self.instance.primary_node))
2984 if not remote_info.payload: # not running already
2985 raise errors.OpPrereqError("Instance %s is not running" %
2986 self.instance.name, errors.ECODE_STATE)
2988 current_memory = remote_info.payload["memory"]
2989 if (not self.op.force and
2990 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2991 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2992 raise errors.OpPrereqError("Instance %s must have memory between %d"
2993 " and %d MB of memory unless --force is"
2995 (self.instance.name,
2996 self.be_proposed[constants.BE_MINMEM],
2997 self.be_proposed[constants.BE_MAXMEM]),
3000 delta = self.op.runtime_mem - current_memory
3002 CheckNodeFreeMemory(
3003 self, self.instance.primary_node,
3004 "ballooning memory for instance %s" % self.instance.name, delta,
3005 self.instance.hypervisor,
3006 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3008 # make self.cluster visible in the functions below
3009 cluster = self.cluster
3011 def _PrepareNicCreate(_, params, private):
3012 self._PrepareNicModification(params, private, None, None,
3013 {}, cluster, pnode_uuid)
3016 def _PrepareNicMod(_, nic, params, private):
3017 self._PrepareNicModification(params, private, nic.ip, nic.network,
3018 nic.nicparams, cluster, pnode_uuid)
3021 def _PrepareNicRemove(_, params, __):
3023 net = params.network
3024 if net is not None and ip is not None:
3025 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3027 # Verify NIC changes (operating on copy)
3028 nics = self.instance.nics[:]
3029 _ApplyContainerMods("NIC", nics, None, self.nicmod,
3030 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3031 if len(nics) > constants.MAX_NICS:
3032 raise errors.OpPrereqError("Instance has too many network interfaces"
3033 " (%d), cannot add more" % constants.MAX_NICS,
3036 # Pre-compute NIC changes (necessary to use result in hooks)
3037 self._nic_chgdesc = []
3039 # Operate on copies as this is still in prereq
3040 nics = [nic.Copy() for nic in self.instance.nics]
3041 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3042 self._CreateNewNic, self._ApplyNicMods, None)
3043 # Verify that NIC names are unique and valid
3044 utils.ValidateDeviceNames("NIC", nics)
3045 self._new_nics = nics
3046 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3048 self._new_nics = None
3049 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3051 if not self.op.ignore_ipolicy:
3052 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3055 # Fill ispec with backend parameters
3056 ispec[constants.ISPEC_SPINDLE_USE] = \
3057 self.be_new.get(constants.BE_SPINDLE_USE, None)
3058 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3061 # Copy ispec to verify parameters with min/max values separately
3062 if self.op.disk_template:
3063 new_disk_template = self.op.disk_template
3065 new_disk_template = self.instance.disk_template
3066 ispec_max = ispec.copy()
3067 ispec_max[constants.ISPEC_MEM_SIZE] = \
3068 self.be_new.get(constants.BE_MAXMEM, None)
3069 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3071 ispec_min = ispec.copy()
3072 ispec_min[constants.ISPEC_MEM_SIZE] = \
3073 self.be_new.get(constants.BE_MINMEM, None)
3074 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3077 if (res_max or res_min):
3078 # FIXME: Improve error message by including information about whether
3079 # the upper or lower limit of the parameter fails the ipolicy.
3080 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3081 (group_info, group_info.name,
3082 utils.CommaJoin(set(res_max + res_min))))
3083 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3085 def _ConvertPlainToDrbd(self, feedback_fn):
3086 """Converts an instance from plain to drbd.
3089 feedback_fn("Converting template to drbd")
3090 pnode_uuid = self.instance.primary_node
3091 snode_uuid = self.op.remote_node_uuid
3093 assert self.instance.disk_template == constants.DT_PLAIN
3095 # create a fake disk info for _GenerateDiskTemplate
3096 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3097 constants.IDISK_VG: d.logical_id[0],
3098 constants.IDISK_NAME: d.name}
3099 for d in self.instance.disks]
3100 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3101 self.instance.uuid, pnode_uuid,
3102 [snode_uuid], disk_info, None, None, 0,
3103 feedback_fn, self.diskparams)
3104 anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3105 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3106 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3107 info = GetInstanceInfoText(self.instance)
3108 feedback_fn("Creating additional volumes...")
3109 # first, create the missing data and meta devices
3110 for disk in anno_disks:
3111 # unfortunately this is... not too nice
3112 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3113 info, True, p_excl_stor)
3114 for child in disk.children:
3115 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3117 # at this stage, all new LVs have been created, we can rename the
3119 feedback_fn("Renaming original volumes...")
3120 rename_list = [(o, n.children[0].logical_id)
3121 for (o, n) in zip(self.instance.disks, new_disks)]
3122 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3123 result.Raise("Failed to rename original LVs")
3125 feedback_fn("Initializing DRBD devices...")
3126 # all child devices are in place, we can now create the DRBD devices
3128 for disk in anno_disks:
3129 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3130 (snode_uuid, s_excl_stor)]:
3131 f_create = node_uuid == pnode_uuid
3132 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3133 f_create, excl_stor)
3134 except errors.GenericError, e:
3135 feedback_fn("Initializing of DRBD devices failed;"
3136 " renaming back original volumes...")
3137 rename_back_list = [(n.children[0], o.logical_id)
3138 for (n, o) in zip(new_disks, self.instance.disks)]
3139 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3140 result.Raise("Failed to rename LVs back after error %s" % str(e))
3143 # at this point, the instance has been modified
3144 self.instance.disk_template = constants.DT_DRBD8
3145 self.instance.disks = new_disks
3146 self.cfg.Update(self.instance, feedback_fn)
3148 # Release node locks while waiting for sync
3149 ReleaseLocks(self, locking.LEVEL_NODE)
3151 # disks are created, waiting for sync
3152 disk_abort = not WaitForSync(self, self.instance,
3153 oneshot=not self.op.wait_for_sync)
3155 raise errors.OpExecError("There are some degraded disks for"
3156 " this instance, please cleanup manually")
3158 # Node resource locks will be released by caller
3160 def _ConvertDrbdToPlain(self, feedback_fn):
3161 """Converts an instance from drbd to plain.
3164 assert len(self.instance.secondary_nodes) == 1
3165 assert self.instance.disk_template == constants.DT_DRBD8
3167 pnode_uuid = self.instance.primary_node
3168 snode_uuid = self.instance.secondary_nodes[0]
3169 feedback_fn("Converting template to plain")
3171 old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3172 new_disks = [d.children[0] for d in self.instance.disks]
3174 # copy over size, mode and name
3175 for parent, child in zip(old_disks, new_disks):
3176 child.size = parent.size
3177 child.mode = parent.mode
3178 child.name = parent.name
3180 # this is a DRBD disk, return its port to the pool
3181 # NOTE: this must be done right before the call to cfg.Update!
3182 for disk in old_disks:
3183 tcp_port = disk.logical_id[2]
3184 self.cfg.AddTcpUdpPort(tcp_port)
3186 # update instance structure
3187 self.instance.disks = new_disks
3188 self.instance.disk_template = constants.DT_PLAIN
3189 _UpdateIvNames(0, self.instance.disks)
3190 self.cfg.Update(self.instance, feedback_fn)
3192 # Release locks in case removing disks takes a while
3193 ReleaseLocks(self, locking.LEVEL_NODE)
3195 feedback_fn("Removing volumes on the secondary node...")
3196 for disk in old_disks:
3197 result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3198 result.Warn("Could not remove block device %s on node %s,"
3199 " continuing anyway" %
3200 (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3203 feedback_fn("Removing unneeded volumes on the primary node...")
3204 for idx, disk in enumerate(old_disks):
3205 meta = disk.children[1]
3206 result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3207 result.Warn("Could not remove metadata for disk %d on node %s,"
3208 " continuing anyway" %
3209 (idx, self.cfg.GetNodeName(pnode_uuid)),
3212 def _CreateNewDisk(self, idx, params, _):
3213 """Creates a new disk.
3217 if self.instance.disk_template in constants.DTS_FILEBASED:
3218 (file_driver, file_path) = self.instance.disks[0].logical_id
3219 file_path = os.path.dirname(file_path)
3221 file_driver = file_path = None
3224 GenerateDiskTemplate(self, self.instance.disk_template,
3225 self.instance.uuid, self.instance.primary_node,
3226 self.instance.secondary_nodes, [params], file_path,
3227 file_driver, idx, self.Log, self.diskparams)[0]
3229 new_disks = CreateDisks(self, self.instance, disks=[disk])
3231 if self.cluster.prealloc_wipe_disks:
3233 WipeOrCleanupDisks(self, self.instance,
3234 disks=[(idx, disk, 0)],
3238 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3241 def _PostAddDisk(self, _, disk):
3242 if not WaitForSync(self, self.instance, disks=[disk],
3243 oneshot=not self.op.wait_for_sync):
3244 raise errors.OpExecError("Failed to sync disks of %s" %
3248 def _ModifyDisk(idx, disk, params, _):
3253 mode = params.get(constants.IDISK_MODE, None)
3256 changes.append(("disk.mode/%d" % idx, disk.mode))
3258 name = params.get(constants.IDISK_NAME, None)
3260 changes.append(("disk.name/%d" % idx, disk.name))
3264 def _RemoveDisk(self, idx, root, _):
3268 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3269 for node_uuid, disk in anno_disk.ComputeNodeTree(
3270 self.instance.primary_node):
3271 msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3274 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3275 " continuing anyway", idx,
3276 self.cfg.GetNodeName(node_uuid), msg)
3278 # if this is a DRBD disk, return its port to the pool
3279 if root.dev_type in constants.LDS_DRBD:
3280 self.cfg.AddTcpUdpPort(root.logical_id[2])
3282 def _CreateNewNic(self, idx, params, private):
3283 """Creates data structure for a new network interface.
3286 mac = params[constants.INIC_MAC]
3287 ip = params.get(constants.INIC_IP, None)
3288 net = params.get(constants.INIC_NETWORK, None)
3289 name = params.get(constants.INIC_NAME, None)
3290 net_uuid = self.cfg.LookupNetwork(net)
3291 #TODO: not private.filled?? can a nic have no nicparams??
3292 nicparams = private.filled
3293 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3294 nicparams=nicparams)
3295 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3299 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3300 (mac, ip, private.filled[constants.NIC_MODE],
3301 private.filled[constants.NIC_LINK],
3305 def _ApplyNicMods(self, idx, nic, params, private):
3306 """Modifies a network interface.
3311 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3313 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3314 setattr(nic, key, params[key])
3316 new_net = params.get(constants.INIC_NETWORK, nic.network)
3317 new_net_uuid = self.cfg.LookupNetwork(new_net)
3318 if new_net_uuid != nic.network:
3319 changes.append(("nic.network/%d" % idx, new_net))
3320 nic.network = new_net_uuid
3323 nic.nicparams = private.filled
3325 for (key, val) in nic.nicparams.items():
3326 changes.append(("nic.%s/%d" % (key, idx), val))
3330 def Exec(self, feedback_fn):
3331 """Modifies an instance.
3333 All parameters take effect only at the next restart of the instance.
3336 # Process here the warnings from CheckPrereq, as we don't have a
3337 # feedback_fn there.
3338 # TODO: Replace with self.LogWarning
3339 for warn in self.warn:
3340 feedback_fn("WARNING: %s" % warn)
3342 assert ((self.op.disk_template is None) ^
3343 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3344 "Not owning any node resource locks"
3349 if self.op.pnode_uuid:
3350 self.instance.primary_node = self.op.pnode_uuid
3353 if self.op.runtime_mem:
3354 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3356 self.op.runtime_mem)
3357 rpcres.Raise("Cannot modify instance runtime memory")
3358 result.append(("runtime_memory", self.op.runtime_mem))
3360 # Apply disk changes
3361 _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3362 self._CreateNewDisk, self._ModifyDisk,
3363 self._RemoveDisk, post_add_fn=self._PostAddDisk)
3364 _UpdateIvNames(0, self.instance.disks)
3366 if self.op.disk_template:
3368 check_nodes = set(self.instance.all_nodes)
3369 if self.op.remote_node_uuid:
3370 check_nodes.add(self.op.remote_node_uuid)
3371 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3372 owned = self.owned_locks(level)
3373 assert not (check_nodes - owned), \
3374 ("Not owning the correct locks, owning %r, expected at least %r" %
3375 (owned, check_nodes))
3377 r_shut = ShutdownInstanceDisks(self, self.instance)
3379 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3380 " proceed with disk template conversion")
3381 mode = (self.instance.disk_template, self.op.disk_template)
3383 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3385 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3387 result.append(("disk_template", self.op.disk_template))
3389 assert self.instance.disk_template == self.op.disk_template, \
3390 ("Expected disk template '%s', found '%s'" %
3391 (self.op.disk_template, self.instance.disk_template))
3393 # Release node and resource locks if there are any (they might already have
3394 # been released during disk conversion)
3395 ReleaseLocks(self, locking.LEVEL_NODE)
3396 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3399 if self._new_nics is not None:
3400 self.instance.nics = self._new_nics
3401 result.extend(self._nic_chgdesc)
3404 if self.op.hvparams:
3405 self.instance.hvparams = self.hv_inst
3406 for key, val in self.op.hvparams.iteritems():
3407 result.append(("hv/%s" % key, val))
3410 if self.op.beparams:
3411 self.instance.beparams = self.be_inst
3412 for key, val in self.op.beparams.iteritems():
3413 result.append(("be/%s" % key, val))
3417 self.instance.os = self.op.os_name
3420 if self.op.osparams:
3421 self.instance.osparams = self.os_inst
3422 for key, val in self.op.osparams.iteritems():
3423 result.append(("os/%s" % key, val))
3425 if self.op.offline is None:
3428 elif self.op.offline:
3429 # Mark instance as offline
3430 self.cfg.MarkInstanceOffline(self.instance.uuid)
3431 result.append(("admin_state", constants.ADMINST_OFFLINE))
3433 # Mark instance as online, but stopped
3434 self.cfg.MarkInstanceDown(self.instance.uuid)
3435 result.append(("admin_state", constants.ADMINST_DOWN))
3437 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3439 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3440 self.owned_locks(locking.LEVEL_NODE)), \
3441 "All node locks should have been released by now"
3445 _DISK_CONVERSIONS = {
3446 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3447 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3451 class LUInstanceChangeGroup(LogicalUnit):
3452 HPATH = "instance-change-group"
3453 HTYPE = constants.HTYPE_INSTANCE
3456 def ExpandNames(self):
3457 self.share_locks = ShareAll()
3459 self.needed_locks = {
3460 locking.LEVEL_NODEGROUP: [],
3461 locking.LEVEL_NODE: [],
3462 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3465 self._ExpandAndLockInstance()
3467 if self.op.target_groups:
3468 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3469 self.op.target_groups)
3471 self.req_target_uuids = None
3473 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3475 def DeclareLocks(self, level):
3476 if level == locking.LEVEL_NODEGROUP:
3477 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3479 if self.req_target_uuids:
3480 lock_groups = set(self.req_target_uuids)
3482 # Lock all groups used by instance optimistically; this requires going
3483 # via the node before it's locked, requiring verification later on
3484 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3485 lock_groups.update(instance_groups)
3487 # No target groups, need to lock all of them
3488 lock_groups = locking.ALL_SET
3490 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3492 elif level == locking.LEVEL_NODE:
3493 if self.req_target_uuids:
3494 # Lock all nodes used by instances
3495 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3496 self._LockInstancesNodes()
3498 # Lock all nodes in all potential target groups
3499 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3500 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3501 member_nodes = [node_uuid
3502 for group in lock_groups
3503 for node_uuid in self.cfg.GetNodeGroup(group).members]
3504 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3506 # Lock all nodes as all groups are potential targets
3507 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3509 def CheckPrereq(self):
3510 owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3511 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3512 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3514 assert (self.req_target_uuids is None or
3515 owned_groups.issuperset(self.req_target_uuids))
3516 assert owned_instance_names == set([self.op.instance_name])
3518 # Get instance information
3519 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3521 # Check if node groups for locked instance are still correct
3522 assert owned_nodes.issuperset(self.instance.all_nodes), \
3523 ("Instance %s's nodes changed while we kept the lock" %
3524 self.op.instance_name)
3526 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3529 if self.req_target_uuids:
3530 # User requested specific target groups
3531 self.target_uuids = frozenset(self.req_target_uuids)
3533 # All groups except those used by the instance are potential targets
3534 self.target_uuids = owned_groups - inst_groups
3536 conflicting_groups = self.target_uuids & inst_groups
3537 if conflicting_groups:
3538 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3539 " used by the instance '%s'" %
3540 (utils.CommaJoin(conflicting_groups),
3541 self.op.instance_name),
3544 if not self.target_uuids:
3545 raise errors.OpPrereqError("There are no possible target groups",
3548 def BuildHooksEnv(self):
3552 assert self.target_uuids
3555 "TARGET_GROUPS": " ".join(self.target_uuids),
3558 env.update(BuildInstanceHookEnvByObject(self, self.instance))
3562 def BuildHooksNodes(self):
3563 """Build hooks nodes.
3566 mn = self.cfg.GetMasterNode()
3569 def Exec(self, feedback_fn):
3570 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3572 assert instances == [self.op.instance_name], "Instance not locked"
3574 req = iallocator.IAReqGroupChange(instances=instances,
3575 target_groups=list(self.target_uuids))
3576 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3578 ial.Run(self.op.iallocator)
3581 raise errors.OpPrereqError("Can't compute solution for changing group of"
3582 " instance '%s' using iallocator '%s': %s" %
3583 (self.op.instance_name, self.op.iallocator,
3584 ial.info), errors.ECODE_NORES)
3586 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3588 self.LogInfo("Iallocator returned %s job(s) for changing group of"
3589 " instance '%s'", len(jobs), self.op.instance_name)
3591 return ResultWithJobs(jobs)