4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Logical units dealing with instances."""
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import pathutils
40 from ganeti import rpc
41 from ganeti import utils
43 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45 from ganeti.cmdlib.common import INSTANCE_DOWN, \
46 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
47 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
48 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
49 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
50 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
51 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
52 CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58 CheckSpindlesExclusiveStorage
59 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66 import ganeti.masterd.instance
69 #: Type description for changes as returned by L{_ApplyContainerMods}'s
71 _TApplyContModsCbChanges = \
72 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
78 def _CheckHostnameSane(lu, name):
79 """Ensures that a given hostname resolves to a 'sane' name.
81 The given name is required to be a prefix of the resolved hostname,
82 to prevent accidental mismatches.
84 @param lu: the logical unit on behalf of which we're checking
85 @param name: the name we should resolve and check
86 @return: the resolved hostname object
89 hostname = netutils.GetHostname(name=name)
90 if hostname.name != name:
91 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92 if not utils.MatchNameComponent(name, [hostname.name]):
93 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94 " same as given hostname '%s'") %
95 (hostname.name, name), errors.ECODE_INVAL)
99 def _CheckOpportunisticLocking(op):
100 """Generate error if opportunistic locking is not possible.
103 if op.opportunistic_locking and not op.iallocator:
104 raise errors.OpPrereqError("Opportunistic locking is only available in"
105 " combination with an instance allocator",
109 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110 """Wrapper around IAReqInstanceAlloc.
112 @param op: The instance opcode
113 @param disks: The computed disks
114 @param nics: The computed nics
115 @param beparams: The full filled beparams
116 @param node_name_whitelist: List of nodes which should appear as online to the
117 allocator (unless the node is already marked offline)
119 @returns: A filled L{iallocator.IAReqInstanceAlloc}
122 spindle_use = beparams[constants.BE_SPINDLE_USE]
123 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124 disk_template=op.disk_template,
127 vcpus=beparams[constants.BE_VCPUS],
128 memory=beparams[constants.BE_MAXMEM],
129 spindle_use=spindle_use,
131 nics=[n.ToDict() for n in nics],
132 hypervisor=op.hypervisor,
133 node_whitelist=node_name_whitelist)
136 def _ComputeFullBeParams(op, cluster):
137 """Computes the full beparams.
139 @param op: The instance opcode
140 @param cluster: The cluster config object
142 @return: The fully filled beparams
145 default_beparams = cluster.beparams[constants.PP_DEFAULT]
146 for param, value in op.beparams.iteritems():
147 if value == constants.VALUE_AUTO:
148 op.beparams[param] = default_beparams[param]
149 objects.UpgradeBeParams(op.beparams)
150 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151 return cluster.SimpleFillBE(op.beparams)
154 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155 """Computes the nics.
157 @param op: The instance opcode
158 @param cluster: Cluster configuration object
159 @param default_ip: The default ip to assign
160 @param cfg: An instance of the configuration object
161 @param ec_id: Execution context ID
163 @returns: The build up nics
168 nic_mode_req = nic.get(constants.INIC_MODE, None)
169 nic_mode = nic_mode_req
170 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173 net = nic.get(constants.INIC_NETWORK, None)
174 link = nic.get(constants.NIC_LINK, None)
175 ip = nic.get(constants.INIC_IP, None)
176 vlan = nic.get(constants.INIC_VLAN, None)
178 if net is None or net.lower() == constants.VALUE_NONE:
181 if nic_mode_req is not None or link is not None:
182 raise errors.OpPrereqError("If network is given, no mode or link"
183 " is allowed to be passed",
187 if ip is None or ip.lower() == constants.VALUE_NONE:
189 elif ip.lower() == constants.VALUE_AUTO:
190 if not op.name_check:
191 raise errors.OpPrereqError("IP address set to auto but name checks"
192 " have been skipped",
196 # We defer pool operations until later, so that the iallocator has
197 # filled in the instance's node(s) dimara
198 if ip.lower() == constants.NIC_IP_POOL:
200 raise errors.OpPrereqError("if ip=pool, parameter network"
201 " must be passed too",
204 elif not netutils.IPAddress.IsValid(ip):
205 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
210 # TODO: check the ip address for uniqueness
211 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
212 raise errors.OpPrereqError("Routed nic mode requires an ip address",
215 # MAC address verification
216 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
217 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
218 mac = utils.NormalizeAndValidateMac(mac)
221 # TODO: We need to factor this out
222 cfg.ReserveMAC(mac, ec_id)
223 except errors.ReservationError:
224 raise errors.OpPrereqError("MAC address %s already in use"
226 errors.ECODE_NOTUNIQUE)
228 # Build nic parameters
231 nicparams[constants.NIC_MODE] = nic_mode
233 nicparams[constants.NIC_LINK] = link
235 nicparams[constants.NIC_VLAN] = vlan
237 check_params = cluster.SimpleFillNIC(nicparams)
238 objects.NIC.CheckParameterSyntax(check_params)
239 net_uuid = cfg.LookupNetwork(net)
240 name = nic.get(constants.INIC_NAME, None)
241 if name is not None and name.lower() == constants.VALUE_NONE:
243 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
244 network=net_uuid, nicparams=nicparams)
245 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
251 def _CheckForConflictingIp(lu, ip, node_uuid):
252 """In case of conflicting IP address raise error.
255 @param ip: IP address
256 @type node_uuid: string
257 @param node_uuid: node UUID
260 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
261 if conf_net is not None:
262 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
263 " network %s, but the target NIC does not." %
270 def _ComputeIPolicyInstanceSpecViolation(
271 ipolicy, instance_spec, disk_template,
272 _compute_fn=ComputeIPolicySpecViolation):
273 """Compute if instance specs meets the specs of ipolicy.
276 @param ipolicy: The ipolicy to verify against
277 @param instance_spec: dict
278 @param instance_spec: The instance spec to verify
279 @type disk_template: string
280 @param disk_template: the disk template of the instance
281 @param _compute_fn: The function to verify ipolicy (unittest only)
282 @see: L{ComputeIPolicySpecViolation}
285 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
286 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
287 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
288 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
289 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
290 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
292 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
293 disk_sizes, spindle_use, disk_template)
296 def _CheckOSVariant(os_obj, name):
297 """Check whether an OS name conforms to the os variants specification.
299 @type os_obj: L{objects.OS}
300 @param os_obj: OS object to check
302 @param name: OS name passed by the user, to check for validity
305 variant = objects.OS.GetVariant(name)
306 if not os_obj.supported_variants:
308 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
309 " passed)" % (os_obj.name, variant),
313 raise errors.OpPrereqError("OS name must include a variant",
316 if variant not in os_obj.supported_variants:
317 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
320 class LUInstanceCreate(LogicalUnit):
321 """Create an instance.
324 HPATH = "instance-add"
325 HTYPE = constants.HTYPE_INSTANCE
328 def _CheckDiskTemplateValid(self):
329 """Checks validity of disk template.
332 cluster = self.cfg.GetClusterInfo()
333 if self.op.disk_template is None:
334 # FIXME: It would be better to take the default disk template from the
335 # ipolicy, but for the ipolicy we need the primary node, which we get from
336 # the iallocator, which wants the disk template as input. To solve this
337 # chicken-and-egg problem, it should be possible to specify just a node
338 # group from the iallocator and take the ipolicy from that.
339 self.op.disk_template = cluster.enabled_disk_templates[0]
340 CheckDiskTemplateEnabled(cluster, self.op.disk_template)
342 def _CheckDiskArguments(self):
343 """Checks validity of disk-related arguments.
346 # check that disk's names are unique and valid
347 utils.ValidateDeviceNames("disk", self.op.disks)
349 self._CheckDiskTemplateValid()
351 # check disks. parameter names and consistent adopt/no-adopt strategy
352 has_adopt = has_no_adopt = False
353 for disk in self.op.disks:
354 if self.op.disk_template != constants.DT_EXT:
355 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
356 if constants.IDISK_ADOPT in disk:
360 if has_adopt and has_no_adopt:
361 raise errors.OpPrereqError("Either all disks are adopted or none is",
364 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
365 raise errors.OpPrereqError("Disk adoption is not supported for the"
366 " '%s' disk template" %
367 self.op.disk_template,
369 if self.op.iallocator is not None:
370 raise errors.OpPrereqError("Disk adoption not allowed with an"
371 " iallocator script", errors.ECODE_INVAL)
372 if self.op.mode == constants.INSTANCE_IMPORT:
373 raise errors.OpPrereqError("Disk adoption not allowed for"
374 " instance import", errors.ECODE_INVAL)
376 if self.op.disk_template in constants.DTS_MUST_ADOPT:
377 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
378 " but no 'adopt' parameter given" %
379 self.op.disk_template,
382 self.adopt_disks = has_adopt
384 def _CheckVLANArguments(self):
385 """ Check validity of VLANs if given
388 for nic in self.op.nics:
389 vlan = nic.get(constants.INIC_VLAN, None)
392 # vlan starting with dot means single untagged vlan,
393 # might be followed by trunk (:)
394 if not vlan[1:].isdigit():
395 vlanlist = vlan[1:].split(':')
398 raise errors.OpPrereqError("Specified VLAN parameter is "
399 "invalid : %s" % vlan,
402 # Trunk - tagged only
403 vlanlist = vlan[1:].split(':')
406 raise errors.OpPrereqError("Specified VLAN parameter is invalid"
407 " : %s" % vlan, errors.ECODE_INVAL)
409 # This is the simplest case. No dots, only single digit
410 # -> Create untagged access port, dot needs to be added
411 nic[constants.INIC_VLAN] = "." + vlan
413 raise errors.OpPrereqError("Specified VLAN parameter is invalid"
414 " : %s" % vlan, errors.ECODE_INVAL)
416 def CheckArguments(self):
420 # do not require name_check to ease forward/backward compatibility
422 if self.op.no_install and self.op.start:
423 self.LogInfo("No-installation mode selected, disabling startup")
424 self.op.start = False
425 # validate/normalize the instance name
426 self.op.instance_name = \
427 netutils.Hostname.GetNormalizedName(self.op.instance_name)
429 if self.op.ip_check and not self.op.name_check:
430 # TODO: make the ip check more flexible and not depend on the name check
431 raise errors.OpPrereqError("Cannot do IP address check without a name"
432 " check", errors.ECODE_INVAL)
434 # check nics' parameter names
435 for nic in self.op.nics:
436 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
437 # check that NIC's parameters names are unique and valid
438 utils.ValidateDeviceNames("NIC", self.op.nics)
440 self._CheckVLANArguments()
442 self._CheckDiskArguments()
443 assert self.op.disk_template is not None
445 # instance name verification
446 if self.op.name_check:
447 self.hostname = _CheckHostnameSane(self, self.op.instance_name)
448 self.op.instance_name = self.hostname.name
449 # used in CheckPrereq for ip ping check
450 self.check_ip = self.hostname.ip
454 # file storage checks
455 if (self.op.file_driver and
456 not self.op.file_driver in constants.FILE_DRIVER):
457 raise errors.OpPrereqError("Invalid file driver name '%s'" %
458 self.op.file_driver, errors.ECODE_INVAL)
460 # set default file_driver if unset and required
461 if (not self.op.file_driver and
462 self.op.disk_template in [constants.DT_FILE,
463 constants.DT_SHARED_FILE]):
464 self.op.file_driver = constants.FD_LOOP
466 ### Node/iallocator related checks
467 CheckIAllocatorOrNode(self, "iallocator", "pnode")
469 if self.op.pnode is not None:
470 if self.op.disk_template in constants.DTS_INT_MIRROR:
471 if self.op.snode is None:
472 raise errors.OpPrereqError("The networked disk templates need"
473 " a mirror node", errors.ECODE_INVAL)
475 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
479 _CheckOpportunisticLocking(self.op)
481 if self.op.mode == constants.INSTANCE_IMPORT:
482 # On import force_variant must be True, because if we forced it at
483 # initial install, our only chance when importing it back is that it
485 self.op.force_variant = True
487 if self.op.no_install:
488 self.LogInfo("No-installation mode has no effect during import")
490 elif self.op.mode == constants.INSTANCE_CREATE:
491 if self.op.os_type is None:
492 raise errors.OpPrereqError("No guest OS specified",
494 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
495 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
496 " installation" % self.op.os_type,
498 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
499 self._cds = GetClusterDomainSecret()
501 # Check handshake to ensure both clusters have the same domain secret
502 src_handshake = self.op.source_handshake
503 if not src_handshake:
504 raise errors.OpPrereqError("Missing source handshake",
507 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
510 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
513 # Load and check source CA
514 self.source_x509_ca_pem = self.op.source_x509_ca
515 if not self.source_x509_ca_pem:
516 raise errors.OpPrereqError("Missing source X509 CA",
520 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
522 except OpenSSL.crypto.Error, err:
523 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
524 (err, ), errors.ECODE_INVAL)
526 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
527 if errcode is not None:
528 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
531 self.source_x509_ca = cert
533 src_instance_name = self.op.source_instance_name
534 if not src_instance_name:
535 raise errors.OpPrereqError("Missing source instance name",
538 self.source_instance_name = \
539 netutils.GetHostname(name=src_instance_name).name
542 raise errors.OpPrereqError("Invalid instance creation mode %r" %
543 self.op.mode, errors.ECODE_INVAL)
545 def ExpandNames(self):
546 """ExpandNames for CreateInstance.
548 Figure out the right locks for instance creation.
551 self.needed_locks = {}
553 # this is just a preventive check, but someone might still add this
554 # instance in the meantime, and creation will fail at lock-add time
555 if self.op.instance_name in\
556 [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
557 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
558 self.op.instance_name, errors.ECODE_EXISTS)
560 self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
562 if self.op.iallocator:
563 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
564 # specifying a group on instance creation and then selecting nodes from
566 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
567 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
569 if self.op.opportunistic_locking:
570 self.opportunistic_locks[locking.LEVEL_NODE] = True
571 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
573 (self.op.pnode_uuid, self.op.pnode) = \
574 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
575 nodelist = [self.op.pnode_uuid]
576 if self.op.snode is not None:
577 (self.op.snode_uuid, self.op.snode) = \
578 ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
579 nodelist.append(self.op.snode_uuid)
580 self.needed_locks[locking.LEVEL_NODE] = nodelist
582 # in case of import lock the source node too
583 if self.op.mode == constants.INSTANCE_IMPORT:
584 src_node = self.op.src_node
585 src_path = self.op.src_path
588 self.op.src_path = src_path = self.op.instance_name
591 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
592 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
593 self.op.src_node = None
594 if os.path.isabs(src_path):
595 raise errors.OpPrereqError("Importing an instance from a path"
596 " requires a source node option",
599 (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
600 ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
601 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
602 self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
603 if not os.path.isabs(src_path):
605 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
607 self.needed_locks[locking.LEVEL_NODE_RES] = \
608 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
610 def _RunAllocator(self):
611 """Run the allocator based on input opcode.
614 if self.op.opportunistic_locking:
615 # Only consider nodes for which a lock is held
616 node_name_whitelist = self.cfg.GetNodeNames(
617 self.owned_locks(locking.LEVEL_NODE))
619 node_name_whitelist = None
621 req = _CreateInstanceAllocRequest(self.op, self.disks,
622 self.nics, self.be_full,
624 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
626 ial.Run(self.op.iallocator)
629 # When opportunistic locks are used only a temporary failure is generated
630 if self.op.opportunistic_locking:
631 ecode = errors.ECODE_TEMP_NORES
633 ecode = errors.ECODE_NORES
635 raise errors.OpPrereqError("Can't compute nodes using"
636 " iallocator '%s': %s" %
637 (self.op.iallocator, ial.info),
640 (self.op.pnode_uuid, self.op.pnode) = \
641 ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
642 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
643 self.op.instance_name, self.op.iallocator,
644 utils.CommaJoin(ial.result))
646 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
648 if req.RequiredNodes() == 2:
649 (self.op.snode_uuid, self.op.snode) = \
650 ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
652 def BuildHooksEnv(self):
655 This runs on master, primary and secondary nodes of the instance.
659 "ADD_MODE": self.op.mode,
661 if self.op.mode == constants.INSTANCE_IMPORT:
662 env["SRC_NODE"] = self.op.src_node
663 env["SRC_PATH"] = self.op.src_path
664 env["SRC_IMAGES"] = self.src_images
666 env.update(BuildInstanceHookEnv(
667 name=self.op.instance_name,
668 primary_node_name=self.op.pnode,
669 secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
670 status=self.op.start,
671 os_type=self.op.os_type,
672 minmem=self.be_full[constants.BE_MINMEM],
673 maxmem=self.be_full[constants.BE_MAXMEM],
674 vcpus=self.be_full[constants.BE_VCPUS],
675 nics=NICListToTuple(self, self.nics),
676 disk_template=self.op.disk_template,
677 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
678 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
679 for d in self.disks],
682 hypervisor_name=self.op.hypervisor,
688 def BuildHooksNodes(self):
689 """Build hooks nodes.
692 nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
695 def _ReadExportInfo(self):
696 """Reads the export information from disk.
698 It will override the opcode source node and path with the actual
699 information, if these two were not specified before.
701 @return: the export information
704 assert self.op.mode == constants.INSTANCE_IMPORT
706 if self.op.src_node_uuid is None:
707 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
708 exp_list = self.rpc.call_export_list(locked_nodes)
710 for node_uuid in exp_list:
711 if exp_list[node_uuid].fail_msg:
713 if self.op.src_path in exp_list[node_uuid].payload:
715 self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
716 self.op.src_node_uuid = node_uuid
717 self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
721 raise errors.OpPrereqError("No export found for relative path %s" %
722 self.op.src_path, errors.ECODE_INVAL)
724 CheckNodeOnline(self, self.op.src_node_uuid)
725 result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
726 result.Raise("No export or invalid export found in dir %s" %
729 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
730 if not export_info.has_section(constants.INISECT_EXP):
731 raise errors.ProgrammerError("Corrupted export config",
732 errors.ECODE_ENVIRON)
734 ei_version = export_info.get(constants.INISECT_EXP, "version")
735 if int(ei_version) != constants.EXPORT_VERSION:
736 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
737 (ei_version, constants.EXPORT_VERSION),
738 errors.ECODE_ENVIRON)
741 def _ReadExportParams(self, einfo):
742 """Use export parameters as defaults.
744 In case the opcode doesn't specify (as in override) some instance
745 parameters, then try to use them from the export information, if
749 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
751 if not self.op.disks:
753 # TODO: import the disk iv_name too
754 for idx in range(constants.MAX_DISKS):
755 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
756 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
757 disks.append({constants.IDISK_SIZE: disk_sz})
758 self.op.disks = disks
759 if not disks and self.op.disk_template != constants.DT_DISKLESS:
760 raise errors.OpPrereqError("No disk info specified and the export"
761 " is missing the disk information",
766 for idx in range(constants.MAX_NICS):
767 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
769 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
770 nic_param_name = "nic%d_%s" % (idx, name)
771 if einfo.has_option(constants.INISECT_INS, nic_param_name):
772 v = einfo.get(constants.INISECT_INS, nic_param_name)
779 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
780 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
782 if (self.op.hypervisor is None and
783 einfo.has_option(constants.INISECT_INS, "hypervisor")):
784 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
786 if einfo.has_section(constants.INISECT_HYP):
787 # use the export parameters but do not override the ones
788 # specified by the user
789 for name, value in einfo.items(constants.INISECT_HYP):
790 if name not in self.op.hvparams:
791 self.op.hvparams[name] = value
793 if einfo.has_section(constants.INISECT_BEP):
794 # use the parameters, without overriding
795 for name, value in einfo.items(constants.INISECT_BEP):
796 if name not in self.op.beparams:
797 self.op.beparams[name] = value
798 # Compatibility for the old "memory" be param
799 if name == constants.BE_MEMORY:
800 if constants.BE_MAXMEM not in self.op.beparams:
801 self.op.beparams[constants.BE_MAXMEM] = value
802 if constants.BE_MINMEM not in self.op.beparams:
803 self.op.beparams[constants.BE_MINMEM] = value
805 # try to read the parameters old style, from the main section
806 for name in constants.BES_PARAMETERS:
807 if (name not in self.op.beparams and
808 einfo.has_option(constants.INISECT_INS, name)):
809 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
811 if einfo.has_section(constants.INISECT_OSP):
812 # use the parameters, without overriding
813 for name, value in einfo.items(constants.INISECT_OSP):
814 if name not in self.op.osparams:
815 self.op.osparams[name] = value
817 def _RevertToDefaults(self, cluster):
818 """Revert the instance parameters to the default values.
822 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
823 for name in self.op.hvparams.keys():
824 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
825 del self.op.hvparams[name]
827 be_defs = cluster.SimpleFillBE({})
828 for name in self.op.beparams.keys():
829 if name in be_defs and be_defs[name] == self.op.beparams[name]:
830 del self.op.beparams[name]
832 nic_defs = cluster.SimpleFillNIC({})
833 for nic in self.op.nics:
834 for name in constants.NICS_PARAMETERS:
835 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
838 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
839 for name in self.op.osparams.keys():
840 if name in os_defs and os_defs[name] == self.op.osparams[name]:
841 del self.op.osparams[name]
843 def _CalculateFileStorageDir(self):
844 """Calculate final instance file storage dir.
847 # file storage dir calculation/check
848 self.instance_file_storage_dir = None
849 if self.op.disk_template in constants.DTS_FILEBASED:
850 # build the full file storage dir path
853 if self.op.disk_template == constants.DT_SHARED_FILE:
854 get_fsd_fn = self.cfg.GetSharedFileStorageDir
856 get_fsd_fn = self.cfg.GetFileStorageDir
858 cfg_storagedir = get_fsd_fn()
859 if not cfg_storagedir:
860 raise errors.OpPrereqError("Cluster file storage dir not defined",
862 joinargs.append(cfg_storagedir)
864 if self.op.file_storage_dir is not None:
865 joinargs.append(self.op.file_storage_dir)
867 joinargs.append(self.op.instance_name)
869 # pylint: disable=W0142
870 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
872 def CheckPrereq(self): # pylint: disable=R0914
873 """Check prerequisites.
876 self._CalculateFileStorageDir()
878 if self.op.mode == constants.INSTANCE_IMPORT:
879 export_info = self._ReadExportInfo()
880 self._ReadExportParams(export_info)
881 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
883 self._old_instance_name = None
885 if (not self.cfg.GetVGName() and
886 self.op.disk_template not in constants.DTS_NOT_LVM):
887 raise errors.OpPrereqError("Cluster does not support lvm-based"
888 " instances", errors.ECODE_STATE)
890 if (self.op.hypervisor is None or
891 self.op.hypervisor == constants.VALUE_AUTO):
892 self.op.hypervisor = self.cfg.GetHypervisorType()
894 cluster = self.cfg.GetClusterInfo()
895 enabled_hvs = cluster.enabled_hypervisors
896 if self.op.hypervisor not in enabled_hvs:
897 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
899 (self.op.hypervisor, ",".join(enabled_hvs)),
903 for tag in self.op.tags:
904 objects.TaggableObject.ValidateTag(tag)
906 # check hypervisor parameter syntax (locally)
907 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
908 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
910 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
911 hv_type.CheckParameterSyntax(filled_hvp)
912 self.hv_full = filled_hvp
913 # check that we don't specify global parameters on an instance
914 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
915 "instance", "cluster")
917 # fill and remember the beparams dict
918 self.be_full = _ComputeFullBeParams(self.op, cluster)
920 # build os parameters
921 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
923 # now that hvp/bep are in final format, let's reset to defaults,
925 if self.op.identify_defaults:
926 self._RevertToDefaults(cluster)
929 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
932 # disk checks/pre-build
933 default_vg = self.cfg.GetVGName()
934 self.disks = ComputeDisks(self.op, default_vg)
936 if self.op.mode == constants.INSTANCE_IMPORT:
938 for idx in range(len(self.disks)):
939 option = "disk%d_dump" % idx
940 if export_info.has_option(constants.INISECT_INS, option):
941 # FIXME: are the old os-es, disk sizes, etc. useful?
942 export_name = export_info.get(constants.INISECT_INS, option)
943 image = utils.PathJoin(self.op.src_path, export_name)
944 disk_images.append(image)
946 disk_images.append(False)
948 self.src_images = disk_images
950 if self.op.instance_name == self._old_instance_name:
951 for idx, nic in enumerate(self.nics):
952 if nic.mac == constants.VALUE_AUTO:
953 nic_mac_ini = "nic%d_mac" % idx
954 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
956 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
958 # ip ping checks (we use the same ip that was resolved in ExpandNames)
960 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
961 raise errors.OpPrereqError("IP %s of instance %s already in use" %
962 (self.check_ip, self.op.instance_name),
963 errors.ECODE_NOTUNIQUE)
965 #### mac address generation
966 # By generating here the mac address both the allocator and the hooks get
967 # the real final mac address rather than the 'auto' or 'generate' value.
968 # There is a race condition between the generation and the instance object
969 # creation, which means that we know the mac is valid now, but we're not
970 # sure it will be when we actually add the instance. If things go bad
971 # adding the instance will abort because of a duplicate mac, and the
972 # creation job will fail.
973 for nic in self.nics:
974 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
975 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
979 if self.op.iallocator is not None:
982 # Release all unneeded node locks
983 keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
984 self.op.src_node_uuid])
985 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
986 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
987 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
989 assert (self.owned_locks(locking.LEVEL_NODE) ==
990 self.owned_locks(locking.LEVEL_NODE_RES)), \
991 "Node locks differ from node resource locks"
993 #### node related checks
996 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
997 assert self.pnode is not None, \
998 "Cannot retrieve locked node %s" % self.op.pnode_uuid
1000 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1001 pnode.name, errors.ECODE_STATE)
1003 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1004 pnode.name, errors.ECODE_STATE)
1005 if not pnode.vm_capable:
1006 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1007 " '%s'" % pnode.name, errors.ECODE_STATE)
1009 self.secondaries = []
1011 # Fill in any IPs from IP pools. This must happen here, because we need to
1012 # know the nic's primary node, as specified by the iallocator
1013 for idx, nic in enumerate(self.nics):
1014 net_uuid = nic.network
1015 if net_uuid is not None:
1016 nobj = self.cfg.GetNetwork(net_uuid)
1017 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1018 if netparams is None:
1019 raise errors.OpPrereqError("No netparams found for network"
1020 " %s. Probably not connected to"
1021 " node's %s nodegroup" %
1022 (nobj.name, self.pnode.name),
1024 self.LogInfo("NIC/%d inherits netparams %s" %
1025 (idx, netparams.values()))
1026 nic.nicparams = dict(netparams)
1027 if nic.ip is not None:
1028 if nic.ip.lower() == constants.NIC_IP_POOL:
1030 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1031 except errors.ReservationError:
1032 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1033 " from the address pool" % idx,
1035 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1038 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1039 check=self.op.conflicts_check)
1040 except errors.ReservationError:
1041 raise errors.OpPrereqError("IP address %s already in use"
1042 " or does not belong to network %s" %
1043 (nic.ip, nobj.name),
1044 errors.ECODE_NOTUNIQUE)
1046 # net is None, ip None or given
1047 elif self.op.conflicts_check:
1048 _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1050 # mirror node verification
1051 if self.op.disk_template in constants.DTS_INT_MIRROR:
1052 if self.op.snode_uuid == pnode.uuid:
1053 raise errors.OpPrereqError("The secondary node cannot be the"
1054 " primary node", errors.ECODE_INVAL)
1055 CheckNodeOnline(self, self.op.snode_uuid)
1056 CheckNodeNotDrained(self, self.op.snode_uuid)
1057 CheckNodeVmCapable(self, self.op.snode_uuid)
1058 self.secondaries.append(self.op.snode_uuid)
1060 snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1061 if pnode.group != snode.group:
1062 self.LogWarning("The primary and secondary nodes are in two"
1063 " different node groups; the disk parameters"
1064 " from the first disk's node group will be"
1068 if self.op.disk_template in constants.DTS_INT_MIRROR:
1070 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1071 excl_stor = compat.any(map(has_es, nodes))
1072 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1073 raise errors.OpPrereqError("Disk template %s not supported with"
1074 " exclusive storage" % self.op.disk_template,
1076 for disk in self.disks:
1077 CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1079 node_uuids = [pnode.uuid] + self.secondaries
1081 if not self.adopt_disks:
1082 if self.op.disk_template == constants.DT_RBD:
1083 # _CheckRADOSFreeSpace() is just a placeholder.
1084 # Any function that checks prerequisites can be placed here.
1085 # Check if there is enough space on the RADOS cluster.
1086 CheckRADOSFreeSpace()
1087 elif self.op.disk_template == constants.DT_EXT:
1088 # FIXME: Function that checks prereqs if needed
1090 elif self.op.disk_template in constants.DTS_LVM:
1091 # Check lv size requirements, if not adopting
1092 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1093 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1095 # FIXME: add checks for other, non-adopting, non-lvm disk templates
1098 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1099 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1100 disk[constants.IDISK_ADOPT])
1101 for disk in self.disks])
1102 if len(all_lvs) != len(self.disks):
1103 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1105 for lv_name in all_lvs:
1107 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1108 # to ReserveLV uses the same syntax
1109 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1110 except errors.ReservationError:
1111 raise errors.OpPrereqError("LV named %s used by another instance" %
1112 lv_name, errors.ECODE_NOTUNIQUE)
1114 vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1115 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1117 node_lvs = self.rpc.call_lv_list([pnode.uuid],
1118 vg_names.payload.keys())[pnode.uuid]
1119 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1120 node_lvs = node_lvs.payload
1122 delta = all_lvs.difference(node_lvs.keys())
1124 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1125 utils.CommaJoin(delta),
1127 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1129 raise errors.OpPrereqError("Online logical volumes found, cannot"
1130 " adopt: %s" % utils.CommaJoin(online_lvs),
1132 # update the size of disk based on what is found
1133 for dsk in self.disks:
1134 dsk[constants.IDISK_SIZE] = \
1135 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1136 dsk[constants.IDISK_ADOPT])][0]))
1138 elif self.op.disk_template == constants.DT_BLOCK:
1139 # Normalize and de-duplicate device paths
1140 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1141 for disk in self.disks])
1142 if len(all_disks) != len(self.disks):
1143 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1145 baddisks = [d for d in all_disks
1146 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1148 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1149 " cannot be adopted" %
1150 (utils.CommaJoin(baddisks),
1151 constants.ADOPTABLE_BLOCKDEV_ROOT),
1154 node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1155 list(all_disks))[pnode.uuid]
1156 node_disks.Raise("Cannot get block device information from node %s" %
1158 node_disks = node_disks.payload
1159 delta = all_disks.difference(node_disks.keys())
1161 raise errors.OpPrereqError("Missing block device(s): %s" %
1162 utils.CommaJoin(delta),
1164 for dsk in self.disks:
1165 dsk[constants.IDISK_SIZE] = \
1166 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1168 # Check disk access param to be compatible with specified hypervisor
1169 node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1170 node_group = self.cfg.GetNodeGroup(node_info.group)
1171 disk_params = self.cfg.GetGroupDiskParams(node_group)
1172 access_type = disk_params[self.op.disk_template].get(
1173 constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1176 if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1177 self.op.disk_template,
1179 raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1180 " used with %s disk access param" %
1181 (self.op.hypervisor, access_type),
1184 # Verify instance specs
1185 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1187 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1188 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1189 constants.ISPEC_DISK_COUNT: len(self.disks),
1190 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1191 for disk in self.disks],
1192 constants.ISPEC_NIC_COUNT: len(self.nics),
1193 constants.ISPEC_SPINDLE_USE: spindle_use,
1196 group_info = self.cfg.GetNodeGroup(pnode.group)
1197 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1198 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1199 self.op.disk_template)
1200 if not self.op.ignore_ipolicy and res:
1201 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1202 (pnode.group, group_info.name, utils.CommaJoin(res)))
1203 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1205 CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1207 CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1208 # check OS parameters (remotely)
1209 CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1211 CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1213 #TODO: _CheckExtParams (remotely)
1214 # Check parameters for extstorage
1216 # memory check on primary node
1217 #TODO(dynmem): use MINMEM for checking
1219 hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1221 CheckNodeFreeMemory(self, self.pnode.uuid,
1222 "creating instance %s" % self.op.instance_name,
1223 self.be_full[constants.BE_MAXMEM],
1224 self.op.hypervisor, hvfull)
1226 self.dry_run_result = list(node_uuids)
1228 def Exec(self, feedback_fn):
1229 """Create and add the instance to the cluster.
1232 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1233 self.owned_locks(locking.LEVEL_NODE)), \
1234 "Node locks differ from node resource locks"
1235 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1237 ht_kind = self.op.hypervisor
1238 if ht_kind in constants.HTS_REQ_PORT:
1239 network_port = self.cfg.AllocatePort()
1243 instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1245 # This is ugly but we got a chicken-egg problem here
1246 # We can only take the group disk parameters, as the instance
1247 # has no disks yet (we are generating them right here).
1248 nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1249 disks = GenerateDiskTemplate(self,
1250 self.op.disk_template,
1251 instance_uuid, self.pnode.uuid,
1254 self.instance_file_storage_dir,
1255 self.op.file_driver,
1258 self.cfg.GetGroupDiskParams(nodegroup))
1260 iobj = objects.Instance(name=self.op.instance_name,
1263 primary_node=self.pnode.uuid,
1264 nics=self.nics, disks=disks,
1265 disk_template=self.op.disk_template,
1267 admin_state=constants.ADMINST_DOWN,
1268 network_port=network_port,
1269 beparams=self.op.beparams,
1270 hvparams=self.op.hvparams,
1271 hypervisor=self.op.hypervisor,
1272 osparams=self.op.osparams,
1276 for tag in self.op.tags:
1279 if self.adopt_disks:
1280 if self.op.disk_template == constants.DT_PLAIN:
1281 # rename LVs to the newly-generated names; we need to construct
1282 # 'fake' LV disks with the old data, plus the new unique_id
1283 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1285 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1286 rename_to.append(t_dsk.logical_id)
1287 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1288 result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1289 zip(tmp_disks, rename_to))
1290 result.Raise("Failed to rename adoped LVs")
1292 feedback_fn("* creating instance disks...")
1294 CreateDisks(self, iobj)
1295 except errors.OpExecError:
1296 self.LogWarning("Device creation failed")
1297 self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1300 feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1302 self.cfg.AddInstance(iobj, self.proc.GetECId())
1304 # Declare that we don't want to remove the instance lock anymore, as we've
1305 # added the instance to the config
1306 del self.remove_locks[locking.LEVEL_INSTANCE]
1308 if self.op.mode == constants.INSTANCE_IMPORT:
1309 # Release unused nodes
1310 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1313 ReleaseLocks(self, locking.LEVEL_NODE)
1316 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1317 feedback_fn("* wiping instance disks...")
1319 WipeDisks(self, iobj)
1320 except errors.OpExecError, err:
1321 logging.exception("Wiping disks failed")
1322 self.LogWarning("Wiping instance disks failed (%s)", err)
1326 # Something is already wrong with the disks, don't do anything else
1328 elif self.op.wait_for_sync:
1329 disk_abort = not WaitForSync(self, iobj)
1330 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1331 # make sure the disks are not degraded (still sync-ing is ok)
1332 feedback_fn("* checking mirrors status")
1333 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1338 RemoveDisks(self, iobj)
1339 self.cfg.RemoveInstance(iobj.uuid)
1340 # Make sure the instance lock gets removed
1341 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1342 raise errors.OpExecError("There are some degraded disks for"
1345 # instance disks are now active
1346 iobj.disks_active = True
1348 # Release all node resource locks
1349 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1351 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1352 if self.op.mode == constants.INSTANCE_CREATE:
1353 if not self.op.no_install:
1354 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1355 not self.op.wait_for_sync)
1357 feedback_fn("* pausing disk sync to install instance OS")
1358 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1361 for idx, success in enumerate(result.payload):
1363 logging.warn("pause-sync of instance %s for disk %d failed",
1364 self.op.instance_name, idx)
1366 feedback_fn("* running the instance OS create scripts...")
1367 # FIXME: pass debug option from opcode to backend
1369 self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1370 self.op.debug_level)
1372 feedback_fn("* resuming disk sync")
1373 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1376 for idx, success in enumerate(result.payload):
1378 logging.warn("resume-sync of instance %s for disk %d failed",
1379 self.op.instance_name, idx)
1381 os_add_result.Raise("Could not add os for instance %s"
1382 " on node %s" % (self.op.instance_name,
1386 if self.op.mode == constants.INSTANCE_IMPORT:
1387 feedback_fn("* running the instance OS import scripts...")
1391 for idx, image in enumerate(self.src_images):
1395 # FIXME: pass debug option from opcode to backend
1396 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1397 constants.IEIO_FILE, (image, ),
1398 constants.IEIO_SCRIPT,
1399 ((iobj.disks[idx], iobj), idx),
1401 transfers.append(dt)
1404 masterd.instance.TransferInstanceData(self, feedback_fn,
1405 self.op.src_node_uuid,
1407 self.pnode.secondary_ip,
1409 if not compat.all(import_result):
1410 self.LogWarning("Some disks for instance %s on node %s were not"
1411 " imported successfully" % (self.op.instance_name,
1414 rename_from = self._old_instance_name
1416 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1417 feedback_fn("* preparing remote import...")
1418 # The source cluster will stop the instance before attempting to make
1419 # a connection. In some cases stopping an instance can take a long
1420 # time, hence the shutdown timeout is added to the connection
1422 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1423 self.op.source_shutdown_timeout)
1424 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1426 assert iobj.primary_node == self.pnode.uuid
1428 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1429 self.source_x509_ca,
1430 self._cds, timeouts)
1431 if not compat.all(disk_results):
1432 # TODO: Should the instance still be started, even if some disks
1433 # failed to import (valid for local imports, too)?
1434 self.LogWarning("Some disks for instance %s on node %s were not"
1435 " imported successfully" % (self.op.instance_name,
1438 rename_from = self.source_instance_name
1441 # also checked in the prereq part
1442 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1445 # Run rename script on newly imported instance
1446 assert iobj.name == self.op.instance_name
1447 feedback_fn("Running rename script for %s" % self.op.instance_name)
1448 result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1450 self.op.debug_level)
1451 result.Warn("Failed to run rename script for %s on node %s" %
1452 (self.op.instance_name, self.pnode.name), self.LogWarning)
1454 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1457 iobj.admin_state = constants.ADMINST_UP
1458 self.cfg.Update(iobj, feedback_fn)
1459 logging.info("Starting instance %s on node %s", self.op.instance_name,
1461 feedback_fn("* starting instance...")
1462 result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1463 False, self.op.reason)
1464 result.Raise("Could not start instance")
1466 return list(iobj.all_nodes)
1469 class LUInstanceRename(LogicalUnit):
1470 """Rename an instance.
1473 HPATH = "instance-rename"
1474 HTYPE = constants.HTYPE_INSTANCE
1476 def CheckArguments(self):
1480 if self.op.ip_check and not self.op.name_check:
1481 # TODO: make the ip check more flexible and not depend on the name check
1482 raise errors.OpPrereqError("IP address check requires a name check",
1485 def BuildHooksEnv(self):
1488 This runs on master, primary and secondary nodes of the instance.
1491 env = BuildInstanceHookEnvByObject(self, self.instance)
1492 env["INSTANCE_NEW_NAME"] = self.op.new_name
1495 def BuildHooksNodes(self):
1496 """Build hooks nodes.
1499 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1502 def CheckPrereq(self):
1503 """Check prerequisites.
1505 This checks that the instance is in the cluster and is not running.
1508 (self.op.instance_uuid, self.op.instance_name) = \
1509 ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1510 self.op.instance_name)
1511 instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1512 assert instance is not None
1514 # It should actually not happen that an instance is running with a disabled
1515 # disk template, but in case it does, the renaming of file-based instances
1516 # will fail horribly. Thus, we test it before.
1517 if (instance.disk_template in constants.DTS_FILEBASED and
1518 self.op.new_name != instance.name):
1519 CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1520 instance.disk_template)
1522 CheckNodeOnline(self, instance.primary_node)
1523 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1524 msg="cannot rename")
1525 self.instance = instance
1527 new_name = self.op.new_name
1528 if self.op.name_check:
1529 hostname = _CheckHostnameSane(self, new_name)
1530 new_name = self.op.new_name = hostname.name
1531 if (self.op.ip_check and
1532 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1533 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1534 (hostname.ip, new_name),
1535 errors.ECODE_NOTUNIQUE)
1537 instance_names = [inst.name for
1538 inst in self.cfg.GetAllInstancesInfo().values()]
1539 if new_name in instance_names and new_name != instance.name:
1540 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1541 new_name, errors.ECODE_EXISTS)
1543 def Exec(self, feedback_fn):
1544 """Rename the instance.
1547 old_name = self.instance.name
1549 rename_file_storage = False
1550 if (self.instance.disk_template in constants.DTS_FILEBASED and
1551 self.op.new_name != self.instance.name):
1552 old_file_storage_dir = os.path.dirname(
1553 self.instance.disks[0].logical_id[1])
1554 rename_file_storage = True
1556 self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1557 # Change the instance lock. This is definitely safe while we hold the BGL.
1558 # Otherwise the new lock would have to be added in acquired mode.
1560 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1561 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1562 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1564 # re-read the instance from the configuration after rename
1565 renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1567 if rename_file_storage:
1568 new_file_storage_dir = os.path.dirname(
1569 renamed_inst.disks[0].logical_id[1])
1570 result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1571 old_file_storage_dir,
1572 new_file_storage_dir)
1573 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1574 " (but the instance has been renamed in Ganeti)" %
1575 (self.cfg.GetNodeName(renamed_inst.primary_node),
1576 old_file_storage_dir, new_file_storage_dir))
1578 StartInstanceDisks(self, renamed_inst, None)
1579 # update info on disks
1580 info = GetInstanceInfoText(renamed_inst)
1581 for (idx, disk) in enumerate(renamed_inst.disks):
1582 for node_uuid in renamed_inst.all_nodes:
1583 result = self.rpc.call_blockdev_setinfo(node_uuid,
1584 (disk, renamed_inst), info)
1585 result.Warn("Error setting info on node %s for disk %s" %
1586 (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1588 result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1589 renamed_inst, old_name,
1590 self.op.debug_level)
1591 result.Warn("Could not run OS rename script for instance %s on node %s"
1592 " (but the instance has been renamed in Ganeti)" %
1594 self.cfg.GetNodeName(renamed_inst.primary_node)),
1597 ShutdownInstanceDisks(self, renamed_inst)
1599 return renamed_inst.name
1602 class LUInstanceRemove(LogicalUnit):
1603 """Remove an instance.
1606 HPATH = "instance-remove"
1607 HTYPE = constants.HTYPE_INSTANCE
1610 def ExpandNames(self):
1611 self._ExpandAndLockInstance()
1612 self.needed_locks[locking.LEVEL_NODE] = []
1613 self.needed_locks[locking.LEVEL_NODE_RES] = []
1614 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1616 def DeclareLocks(self, level):
1617 if level == locking.LEVEL_NODE:
1618 self._LockInstancesNodes()
1619 elif level == locking.LEVEL_NODE_RES:
1621 self.needed_locks[locking.LEVEL_NODE_RES] = \
1622 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1624 def BuildHooksEnv(self):
1627 This runs on master, primary and secondary nodes of the instance.
1630 env = BuildInstanceHookEnvByObject(self, self.instance)
1631 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1634 def BuildHooksNodes(self):
1635 """Build hooks nodes.
1638 nl = [self.cfg.GetMasterNode()]
1639 nl_post = list(self.instance.all_nodes) + nl
1640 return (nl, nl_post)
1642 def CheckPrereq(self):
1643 """Check prerequisites.
1645 This checks that the instance is in the cluster.
1648 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1649 assert self.instance is not None, \
1650 "Cannot retrieve locked instance %s" % self.op.instance_name
1652 def Exec(self, feedback_fn):
1653 """Remove the instance.
1656 logging.info("Shutting down instance %s on node %s", self.instance.name,
1657 self.cfg.GetNodeName(self.instance.primary_node))
1659 result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1661 self.op.shutdown_timeout,
1663 if self.op.ignore_failures:
1664 result.Warn("Warning: can't shutdown instance", feedback_fn)
1666 result.Raise("Could not shutdown instance %s on node %s" %
1667 (self.instance.name,
1668 self.cfg.GetNodeName(self.instance.primary_node)))
1670 assert (self.owned_locks(locking.LEVEL_NODE) ==
1671 self.owned_locks(locking.LEVEL_NODE_RES))
1672 assert not (set(self.instance.all_nodes) -
1673 self.owned_locks(locking.LEVEL_NODE)), \
1674 "Not owning correct locks"
1676 RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1679 class LUInstanceMove(LogicalUnit):
1680 """Move an instance by data-copying.
1683 HPATH = "instance-move"
1684 HTYPE = constants.HTYPE_INSTANCE
1687 def ExpandNames(self):
1688 self._ExpandAndLockInstance()
1689 (self.op.target_node_uuid, self.op.target_node) = \
1690 ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1691 self.op.target_node)
1692 self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1693 self.needed_locks[locking.LEVEL_NODE_RES] = []
1694 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1696 def DeclareLocks(self, level):
1697 if level == locking.LEVEL_NODE:
1698 self._LockInstancesNodes(primary_only=True)
1699 elif level == locking.LEVEL_NODE_RES:
1701 self.needed_locks[locking.LEVEL_NODE_RES] = \
1702 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1704 def BuildHooksEnv(self):
1707 This runs on master, primary and secondary nodes of the instance.
1711 "TARGET_NODE": self.op.target_node,
1712 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1714 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1717 def BuildHooksNodes(self):
1718 """Build hooks nodes.
1722 self.cfg.GetMasterNode(),
1723 self.instance.primary_node,
1724 self.op.target_node_uuid,
1728 def CheckPrereq(self):
1729 """Check prerequisites.
1731 This checks that the instance is in the cluster.
1734 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1735 assert self.instance is not None, \
1736 "Cannot retrieve locked instance %s" % self.op.instance_name
1738 if self.instance.disk_template not in constants.DTS_COPYABLE:
1739 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1740 self.instance.disk_template,
1743 target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1744 assert target_node is not None, \
1745 "Cannot retrieve locked node %s" % self.op.target_node
1747 self.target_node_uuid = target_node.uuid
1748 if target_node.uuid == self.instance.primary_node:
1749 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1750 (self.instance.name, target_node.name),
1753 bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1755 for idx, dsk in enumerate(self.instance.disks):
1756 if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1757 constants.DT_SHARED_FILE):
1758 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1759 " cannot copy" % idx, errors.ECODE_STATE)
1761 CheckNodeOnline(self, target_node.uuid)
1762 CheckNodeNotDrained(self, target_node.uuid)
1763 CheckNodeVmCapable(self, target_node.uuid)
1764 cluster = self.cfg.GetClusterInfo()
1765 group_info = self.cfg.GetNodeGroup(target_node.group)
1766 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1767 CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1768 ignore=self.op.ignore_ipolicy)
1770 if self.instance.admin_state == constants.ADMINST_UP:
1771 # check memory requirements on the secondary node
1772 CheckNodeFreeMemory(
1773 self, target_node.uuid, "failing over instance %s" %
1774 self.instance.name, bep[constants.BE_MAXMEM],
1775 self.instance.hypervisor,
1776 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1778 self.LogInfo("Not checking memory on the secondary node as"
1779 " instance will not be started")
1781 # check bridge existance
1782 CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1784 def Exec(self, feedback_fn):
1785 """Move an instance.
1787 The move is done by shutting it down on its present node, copying
1788 the data over (slow) and starting it on the new node.
1791 source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1792 target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1794 self.LogInfo("Shutting down instance %s on source node %s",
1795 self.instance.name, source_node.name)
1797 assert (self.owned_locks(locking.LEVEL_NODE) ==
1798 self.owned_locks(locking.LEVEL_NODE_RES))
1800 result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1801 self.op.shutdown_timeout,
1803 if self.op.ignore_consistency:
1804 result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1805 " anyway. Please make sure node %s is down. Error details" %
1806 (self.instance.name, source_node.name, source_node.name),
1809 result.Raise("Could not shutdown instance %s on node %s" %
1810 (self.instance.name, source_node.name))
1812 # create the target disks
1814 CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1815 except errors.OpExecError:
1816 self.LogWarning("Device creation failed")
1817 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1820 cluster_name = self.cfg.GetClusterInfo().cluster_name
1823 # activate, get path, copy the data over
1824 for idx, disk in enumerate(self.instance.disks):
1825 self.LogInfo("Copying data for disk %d", idx)
1826 result = self.rpc.call_blockdev_assemble(
1827 target_node.uuid, (disk, self.instance), self.instance.name,
1830 self.LogWarning("Can't assemble newly created disk %d: %s",
1831 idx, result.fail_msg)
1832 errs.append(result.fail_msg)
1834 dev_path, _ = result.payload
1835 result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1837 target_node.secondary_ip,
1838 dev_path, cluster_name)
1840 self.LogWarning("Can't copy data over for disk %d: %s",
1841 idx, result.fail_msg)
1842 errs.append(result.fail_msg)
1846 self.LogWarning("Some disks failed to copy, aborting")
1848 RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1850 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1851 raise errors.OpExecError("Errors during disk copy: %s" %
1854 self.instance.primary_node = target_node.uuid
1855 self.cfg.Update(self.instance, feedback_fn)
1857 self.LogInfo("Removing the disks on the original node")
1858 RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1860 # Only start the instance if it's marked as up
1861 if self.instance.admin_state == constants.ADMINST_UP:
1862 self.LogInfo("Starting instance %s on node %s",
1863 self.instance.name, target_node.name)
1865 disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1866 ignore_secondaries=True)
1868 ShutdownInstanceDisks(self, self.instance)
1869 raise errors.OpExecError("Can't activate the instance's disks")
1871 result = self.rpc.call_instance_start(target_node.uuid,
1872 (self.instance, None, None), False,
1874 msg = result.fail_msg
1876 ShutdownInstanceDisks(self, self.instance)
1877 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1878 (self.instance.name, target_node.name, msg))
1881 class LUInstanceMultiAlloc(NoHooksLU):
1882 """Allocates multiple instances at the same time.
1887 def CheckArguments(self):
1892 for inst in self.op.instances:
1893 if inst.iallocator is not None:
1894 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1895 " instance objects", errors.ECODE_INVAL)
1896 nodes.append(bool(inst.pnode))
1897 if inst.disk_template in constants.DTS_INT_MIRROR:
1898 nodes.append(bool(inst.snode))
1900 has_nodes = compat.any(nodes)
1901 if compat.all(nodes) ^ has_nodes:
1902 raise errors.OpPrereqError("There are instance objects providing"
1903 " pnode/snode while others do not",
1906 if not has_nodes and self.op.iallocator is None:
1907 default_iallocator = self.cfg.GetDefaultIAllocator()
1908 if default_iallocator:
1909 self.op.iallocator = default_iallocator
1911 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1912 " given and no cluster-wide default"
1913 " iallocator found; please specify either"
1914 " an iallocator or nodes on the instances"
1915 " or set a cluster-wide default iallocator",
1918 _CheckOpportunisticLocking(self.op)
1920 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1922 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1923 utils.CommaJoin(dups), errors.ECODE_INVAL)
1925 def ExpandNames(self):
1926 """Calculate the locks.
1929 self.share_locks = ShareAll()
1930 self.needed_locks = {
1931 # iallocator will select nodes and even if no iallocator is used,
1932 # collisions with LUInstanceCreate should be avoided
1933 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1936 if self.op.iallocator:
1937 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1938 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1940 if self.op.opportunistic_locking:
1941 self.opportunistic_locks[locking.LEVEL_NODE] = True
1942 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1945 for inst in self.op.instances:
1946 (inst.pnode_uuid, inst.pnode) = \
1947 ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1948 nodeslist.append(inst.pnode_uuid)
1949 if inst.snode is not None:
1950 (inst.snode_uuid, inst.snode) = \
1951 ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1952 nodeslist.append(inst.snode_uuid)
1954 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1955 # Lock resources of instance's primary and secondary nodes (copy to
1956 # prevent accidential modification)
1957 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1959 def CheckPrereq(self):
1960 """Check prerequisite.
1963 if self.op.iallocator:
1964 cluster = self.cfg.GetClusterInfo()
1965 default_vg = self.cfg.GetVGName()
1966 ec_id = self.proc.GetECId()
1968 if self.op.opportunistic_locking:
1969 # Only consider nodes for which a lock is held
1970 node_whitelist = self.cfg.GetNodeNames(
1971 list(self.owned_locks(locking.LEVEL_NODE)))
1973 node_whitelist = None
1975 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1976 _ComputeNics(op, cluster, None,
1978 _ComputeFullBeParams(op, cluster),
1980 for op in self.op.instances]
1982 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1983 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1985 ial.Run(self.op.iallocator)
1988 raise errors.OpPrereqError("Can't compute nodes using"
1989 " iallocator '%s': %s" %
1990 (self.op.iallocator, ial.info),
1993 self.ia_result = ial.result
1996 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1997 constants.JOB_IDS_KEY: [],
2000 def _ConstructPartialResult(self):
2001 """Contructs the partial result.
2004 if self.op.iallocator:
2005 (allocatable, failed_insts) = self.ia_result
2006 allocatable_insts = map(compat.fst, allocatable)
2008 allocatable_insts = [op.instance_name for op in self.op.instances]
2012 constants.ALLOCATABLE_KEY: allocatable_insts,
2013 constants.FAILED_KEY: failed_insts,
2016 def Exec(self, feedback_fn):
2017 """Executes the opcode.
2021 if self.op.iallocator:
2022 op2inst = dict((op.instance_name, op) for op in self.op.instances)
2023 (allocatable, failed) = self.ia_result
2025 for (name, node_names) in allocatable:
2026 op = op2inst.pop(name)
2028 (op.pnode_uuid, op.pnode) = \
2029 ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2030 if len(node_names) > 1:
2031 (op.snode_uuid, op.snode) = \
2032 ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2036 missing = set(op2inst.keys()) - set(failed)
2037 assert not missing, \
2038 "Iallocator did return incomplete result: %s" % \
2039 utils.CommaJoin(missing)
2041 jobs.extend([op] for op in self.op.instances)
2043 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2046 class _InstNicModPrivate:
2047 """Data structure for network interface modifications.
2049 Used by L{LUInstanceSetParams}.
2057 def _PrepareContainerMods(mods, private_fn):
2058 """Prepares a list of container modifications by adding a private data field.
2060 @type mods: list of tuples; (operation, index, parameters)
2061 @param mods: List of modifications
2062 @type private_fn: callable or None
2063 @param private_fn: Callable for constructing a private data field for a
2068 if private_fn is None:
2073 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2076 def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2077 """Checks if nodes have enough physical CPUs
2079 This function checks if all given nodes have the needed number of
2080 physical CPUs. In case any node has less CPUs or we cannot get the
2081 information from the node, this function raises an OpPrereqError
2084 @type lu: C{LogicalUnit}
2085 @param lu: a logical unit from which we get configuration data
2086 @type node_uuids: C{list}
2087 @param node_uuids: the list of node UUIDs to check
2088 @type requested: C{int}
2089 @param requested: the minimum acceptable number of physical CPUs
2090 @type hypervisor_specs: list of pairs (string, dict of strings)
2091 @param hypervisor_specs: list of hypervisor specifications in
2092 pairs (hypervisor_name, hvparams)
2093 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2094 or we cannot check the node
2097 nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2098 for node_uuid in node_uuids:
2099 info = nodeinfo[node_uuid]
2100 node_name = lu.cfg.GetNodeName(node_uuid)
2101 info.Raise("Cannot get current information from node %s" % node_name,
2102 prereq=True, ecode=errors.ECODE_ENVIRON)
2103 (_, _, (hv_info, )) = info.payload
2104 num_cpus = hv_info.get("cpu_total", None)
2105 if not isinstance(num_cpus, int):
2106 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2107 " on node %s, result was '%s'" %
2108 (node_name, num_cpus), errors.ECODE_ENVIRON)
2109 if requested > num_cpus:
2110 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2111 "required" % (node_name, num_cpus, requested),
2115 def GetItemFromContainer(identifier, kind, container):
2116 """Return the item refered by the identifier.
2118 @type identifier: string
2119 @param identifier: Item index or name or UUID
2121 @param kind: One-word item description
2122 @type container: list
2123 @param container: Container to get the item from
2128 idx = int(identifier)
2131 absidx = len(container) - 1
2133 raise IndexError("Not accepting negative indices other than -1")
2134 elif idx > len(container):
2135 raise IndexError("Got %s index %s, but there are only %s" %
2136 (kind, idx, len(container)))
2139 return (absidx, container[idx])
2143 for idx, item in enumerate(container):
2144 if item.uuid == identifier or item.name == identifier:
2147 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2148 (kind, identifier), errors.ECODE_NOENT)
2151 def _ApplyContainerMods(kind, container, chgdesc, mods,
2152 create_fn, modify_fn, remove_fn,
2154 """Applies descriptions in C{mods} to C{container}.
2157 @param kind: One-word item description
2158 @type container: list
2159 @param container: Container to modify
2160 @type chgdesc: None or list
2161 @param chgdesc: List of applied changes
2163 @param mods: Modifications as returned by L{_PrepareContainerMods}
2164 @type create_fn: callable
2165 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2166 receives absolute item index, parameters and private data object as added
2167 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2169 @type modify_fn: callable
2170 @param modify_fn: Callback for modifying an existing item
2171 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2172 and private data object as added by L{_PrepareContainerMods}, returns
2174 @type remove_fn: callable
2175 @param remove_fn: Callback on removing item; receives absolute item index,
2176 item and private data object as added by L{_PrepareContainerMods}
2177 @type post_add_fn: callable
2178 @param post_add_fn: Callable for post-processing a newly created item after
2179 it has been put into the container. It receives the index of the new item
2180 and the new item as parameters.
2183 for (op, identifier, params, private) in mods:
2186 if op == constants.DDM_ADD:
2187 # Calculate where item will be added
2188 # When adding an item, identifier can only be an index
2190 idx = int(identifier)
2192 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2193 " identifier for %s" % constants.DDM_ADD,
2196 addidx = len(container)
2199 raise IndexError("Not accepting negative indices other than -1")
2200 elif idx > len(container):
2201 raise IndexError("Got %s index %s, but there are only %s" %
2202 (kind, idx, len(container)))
2205 if create_fn is None:
2208 (item, changes) = create_fn(addidx, params, private)
2211 container.append(item)
2214 assert idx <= len(container)
2215 # list.insert does so before the specified index
2216 container.insert(idx, item)
2218 if post_add_fn is not None:
2219 post_add_fn(addidx, item)
2222 # Retrieve existing item
2223 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2225 if op == constants.DDM_REMOVE:
2228 changes = [("%s/%s" % (kind, absidx), "remove")]
2230 if remove_fn is not None:
2231 msg = remove_fn(absidx, item, private)
2233 changes.append(("%s/%s" % (kind, absidx), msg))
2235 assert container[absidx] == item
2236 del container[absidx]
2237 elif op == constants.DDM_MODIFY:
2238 if modify_fn is not None:
2239 changes = modify_fn(absidx, item, params, private)
2241 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2243 assert _TApplyContModsCbChanges(changes)
2245 if not (chgdesc is None or changes is None):
2246 chgdesc.extend(changes)
2249 def _UpdateIvNames(base_index, disks):
2250 """Updates the C{iv_name} attribute of disks.
2252 @type disks: list of L{objects.Disk}
2255 for (idx, disk) in enumerate(disks):
2256 disk.iv_name = "disk/%s" % (base_index + idx, )
2259 class LUInstanceSetParams(LogicalUnit):
2260 """Modifies an instances's parameters.
2263 HPATH = "instance-modify"
2264 HTYPE = constants.HTYPE_INSTANCE
2268 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2269 assert ht.TList(mods)
2270 assert not mods or len(mods[0]) in (2, 3)
2272 if mods and len(mods[0]) == 2:
2276 for op, params in mods:
2277 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2278 result.append((op, -1, params))
2282 raise errors.OpPrereqError("Only one %s add or remove operation is"
2283 " supported at a time" % kind,
2286 result.append((constants.DDM_MODIFY, op, params))
2288 assert verify_fn(result)
2295 def _CheckMods(kind, mods, key_types, item_fn):
2296 """Ensures requested disk/NIC modifications are valid.
2299 for (op, _, params) in mods:
2300 assert ht.TDict(params)
2302 # If 'key_types' is an empty dict, we assume we have an
2303 # 'ext' template and thus do not ForceDictType
2305 utils.ForceDictType(params, key_types)
2307 if op == constants.DDM_REMOVE:
2309 raise errors.OpPrereqError("No settings should be passed when"
2310 " removing a %s" % kind,
2312 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2315 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2318 def _VerifyDiskModification(op, params, excl_stor):
2319 """Verifies a disk modification.
2322 if op == constants.DDM_ADD:
2323 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2324 if mode not in constants.DISK_ACCESS_SET:
2325 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2328 size = params.get(constants.IDISK_SIZE, None)
2330 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2331 constants.IDISK_SIZE, errors.ECODE_INVAL)
2334 params[constants.IDISK_SIZE] = size
2335 name = params.get(constants.IDISK_NAME, None)
2336 if name is not None and name.lower() == constants.VALUE_NONE:
2337 params[constants.IDISK_NAME] = None
2339 CheckSpindlesExclusiveStorage(params, excl_stor, True)
2341 elif op == constants.DDM_MODIFY:
2342 if constants.IDISK_SIZE in params:
2343 raise errors.OpPrereqError("Disk size change not possible, use"
2344 " grow-disk", errors.ECODE_INVAL)
2346 raise errors.OpPrereqError("Disk modification doesn't support"
2347 " additional arbitrary parameters",
2349 name = params.get(constants.IDISK_NAME, None)
2350 if name is not None and name.lower() == constants.VALUE_NONE:
2351 params[constants.IDISK_NAME] = None
2354 def _VerifyNicModification(op, params):
2355 """Verifies a network interface modification.
2358 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2359 ip = params.get(constants.INIC_IP, None)
2360 name = params.get(constants.INIC_NAME, None)
2361 req_net = params.get(constants.INIC_NETWORK, None)
2362 link = params.get(constants.NIC_LINK, None)
2363 mode = params.get(constants.NIC_MODE, None)
2364 if name is not None and name.lower() == constants.VALUE_NONE:
2365 params[constants.INIC_NAME] = None
2366 if req_net is not None:
2367 if req_net.lower() == constants.VALUE_NONE:
2368 params[constants.INIC_NETWORK] = None
2370 elif link is not None or mode is not None:
2371 raise errors.OpPrereqError("If network is given"
2372 " mode or link should not",
2375 if op == constants.DDM_ADD:
2376 macaddr = params.get(constants.INIC_MAC, None)
2378 params[constants.INIC_MAC] = constants.VALUE_AUTO
2381 if ip.lower() == constants.VALUE_NONE:
2382 params[constants.INIC_IP] = None
2384 if ip.lower() == constants.NIC_IP_POOL:
2385 if op == constants.DDM_ADD and req_net is None:
2386 raise errors.OpPrereqError("If ip=pool, parameter network"
2390 if not netutils.IPAddress.IsValid(ip):
2391 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2394 if constants.INIC_MAC in params:
2395 macaddr = params[constants.INIC_MAC]
2396 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2397 macaddr = utils.NormalizeAndValidateMac(macaddr)
2399 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2400 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2401 " modifying an existing NIC",
2404 def CheckArguments(self):
2405 if not (self.op.nics or self.op.disks or self.op.disk_template or
2406 self.op.hvparams or self.op.beparams or self.op.os_name or
2407 self.op.osparams or self.op.offline is not None or
2408 self.op.runtime_mem or self.op.pnode):
2409 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2411 if self.op.hvparams:
2412 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2413 "hypervisor", "instance", "cluster")
2415 self.op.disks = self._UpgradeDiskNicMods(
2416 "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2417 self.op.nics = self._UpgradeDiskNicMods(
2418 "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2420 if self.op.disks and self.op.disk_template is not None:
2421 raise errors.OpPrereqError("Disk template conversion and other disk"
2422 " changes not supported at the same time",
2425 if (self.op.disk_template and
2426 self.op.disk_template in constants.DTS_INT_MIRROR and
2427 self.op.remote_node is None):
2428 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2429 " one requires specifying a secondary node",
2432 # Check NIC modifications
2433 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2434 self._VerifyNicModification)
2437 (self.op.pnode_uuid, self.op.pnode) = \
2438 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2440 def ExpandNames(self):
2441 self._ExpandAndLockInstance()
2442 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2443 # Can't even acquire node locks in shared mode as upcoming changes in
2444 # Ganeti 2.6 will start to modify the node object on disk conversion
2445 self.needed_locks[locking.LEVEL_NODE] = []
2446 self.needed_locks[locking.LEVEL_NODE_RES] = []
2447 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2448 # Look node group to look up the ipolicy
2449 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2451 def DeclareLocks(self, level):
2452 if level == locking.LEVEL_NODEGROUP:
2453 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2454 # Acquire locks for the instance's nodegroups optimistically. Needs
2455 # to be verified in CheckPrereq
2456 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2457 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2458 elif level == locking.LEVEL_NODE:
2459 self._LockInstancesNodes()
2460 if self.op.disk_template and self.op.remote_node:
2461 (self.op.remote_node_uuid, self.op.remote_node) = \
2462 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2463 self.op.remote_node)
2464 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2465 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2467 self.needed_locks[locking.LEVEL_NODE_RES] = \
2468 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2470 def BuildHooksEnv(self):
2473 This runs on the master, primary and secondaries.
2477 if constants.BE_MINMEM in self.be_new:
2478 args["minmem"] = self.be_new[constants.BE_MINMEM]
2479 if constants.BE_MAXMEM in self.be_new:
2480 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2481 if constants.BE_VCPUS in self.be_new:
2482 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2483 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2484 # information at all.
2486 if self._new_nics is not None:
2489 for nic in self._new_nics:
2490 n = copy.deepcopy(nic)
2491 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2492 n.nicparams = nicparams
2493 nics.append(NICToTuple(self, n))
2497 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2498 if self.op.disk_template:
2499 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2500 if self.op.runtime_mem:
2501 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2505 def BuildHooksNodes(self):
2506 """Build hooks nodes.
2509 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2512 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2513 old_params, cluster, pnode_uuid):
2515 update_params_dict = dict([(key, params[key])
2516 for key in constants.NICS_PARAMETERS
2519 req_link = update_params_dict.get(constants.NIC_LINK, None)
2520 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2523 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2524 if new_net_uuid_or_name:
2525 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2526 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2529 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2532 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2534 raise errors.OpPrereqError("No netparams found for the network"
2535 " %s, probably not connected" %
2536 new_net_obj.name, errors.ECODE_INVAL)
2537 new_params = dict(netparams)
2539 new_params = GetUpdatedParams(old_params, update_params_dict)
2541 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2543 new_filled_params = cluster.SimpleFillNIC(new_params)
2544 objects.NIC.CheckParameterSyntax(new_filled_params)
2546 new_mode = new_filled_params[constants.NIC_MODE]
2547 if new_mode == constants.NIC_MODE_BRIDGED:
2548 bridge = new_filled_params[constants.NIC_LINK]
2549 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2551 msg = "Error checking bridges on node '%s': %s" % \
2552 (self.cfg.GetNodeName(pnode_uuid), msg)
2554 self.warn.append(msg)
2556 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2558 elif new_mode == constants.NIC_MODE_ROUTED:
2559 ip = params.get(constants.INIC_IP, old_ip)
2561 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2562 " on a routed NIC", errors.ECODE_INVAL)
2564 elif new_mode == constants.NIC_MODE_OVS:
2565 # TODO: check OVS link
2566 self.LogInfo("OVS links are currently not checked for correctness")
2568 if constants.INIC_MAC in params:
2569 mac = params[constants.INIC_MAC]
2571 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2573 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2574 # otherwise generate the MAC address
2575 params[constants.INIC_MAC] = \
2576 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2578 # or validate/reserve the current one
2580 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2581 except errors.ReservationError:
2582 raise errors.OpPrereqError("MAC address '%s' already in use"
2583 " in cluster" % mac,
2584 errors.ECODE_NOTUNIQUE)
2585 elif new_net_uuid != old_net_uuid:
2587 def get_net_prefix(net_uuid):
2590 nobj = self.cfg.GetNetwork(net_uuid)
2591 mac_prefix = nobj.mac_prefix
2595 new_prefix = get_net_prefix(new_net_uuid)
2596 old_prefix = get_net_prefix(old_net_uuid)
2597 if old_prefix != new_prefix:
2598 params[constants.INIC_MAC] = \
2599 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2601 # if there is a change in (ip, network) tuple
2602 new_ip = params.get(constants.INIC_IP, old_ip)
2603 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2605 # if IP is pool then require a network and generate one IP
2606 if new_ip.lower() == constants.NIC_IP_POOL:
2609 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2610 except errors.ReservationError:
2611 raise errors.OpPrereqError("Unable to get a free IP"
2612 " from the address pool",
2614 self.LogInfo("Chose IP %s from network %s",
2617 params[constants.INIC_IP] = new_ip
2619 raise errors.OpPrereqError("ip=pool, but no network found",
2621 # Reserve new IP if in the new network if any
2624 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2625 check=self.op.conflicts_check)
2626 self.LogInfo("Reserving IP %s in network %s",
2627 new_ip, new_net_obj.name)
2628 except errors.ReservationError:
2629 raise errors.OpPrereqError("IP %s not available in network %s" %
2630 (new_ip, new_net_obj.name),
2631 errors.ECODE_NOTUNIQUE)
2632 # new network is None so check if new IP is a conflicting IP
2633 elif self.op.conflicts_check:
2634 _CheckForConflictingIp(self, new_ip, pnode_uuid)
2636 # release old IP if old network is not None
2637 if old_ip and old_net_uuid:
2639 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2640 except errors.AddressPoolError:
2641 logging.warning("Release IP %s not contained in network %s",
2642 old_ip, old_net_obj.name)
2644 # there are no changes in (ip, network) tuple and old network is not None
2645 elif (old_net_uuid is not None and
2646 (req_link is not None or req_mode is not None)):
2647 raise errors.OpPrereqError("Not allowed to change link or mode of"
2648 " a NIC that is connected to a network",
2651 private.params = new_params
2652 private.filled = new_filled_params
2654 def _PreCheckDiskTemplate(self, pnode_info):
2655 """CheckPrereq checks related to a new disk template."""
2656 # Arguments are passed to avoid configuration lookups
2657 pnode_uuid = self.instance.primary_node
2658 if self.instance.disk_template == self.op.disk_template:
2659 raise errors.OpPrereqError("Instance already has disk template %s" %
2660 self.instance.disk_template,
2663 if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2664 raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2665 " cluster." % self.op.disk_template)
2667 if (self.instance.disk_template,
2668 self.op.disk_template) not in self._DISK_CONVERSIONS:
2669 raise errors.OpPrereqError("Unsupported disk template conversion from"
2670 " %s to %s" % (self.instance.disk_template,
2671 self.op.disk_template),
2673 CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2674 msg="cannot change disk template")
2675 if self.op.disk_template in constants.DTS_INT_MIRROR:
2676 if self.op.remote_node_uuid == pnode_uuid:
2677 raise errors.OpPrereqError("Given new secondary node %s is the same"
2678 " as the primary node of the instance" %
2679 self.op.remote_node, errors.ECODE_STATE)
2680 CheckNodeOnline(self, self.op.remote_node_uuid)
2681 CheckNodeNotDrained(self, self.op.remote_node_uuid)
2682 # FIXME: here we assume that the old instance type is DT_PLAIN
2683 assert self.instance.disk_template == constants.DT_PLAIN
2684 disks = [{constants.IDISK_SIZE: d.size,
2685 constants.IDISK_VG: d.logical_id[0]}
2686 for d in self.instance.disks]
2687 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2688 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2690 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2691 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2692 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2694 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2695 ignore=self.op.ignore_ipolicy)
2696 if pnode_info.group != snode_info.group:
2697 self.LogWarning("The primary and secondary nodes are in two"
2698 " different node groups; the disk parameters"
2699 " from the first disk's node group will be"
2702 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2703 # Make sure none of the nodes require exclusive storage
2704 nodes = [pnode_info]
2705 if self.op.disk_template in constants.DTS_INT_MIRROR:
2707 nodes.append(snode_info)
2708 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2709 if compat.any(map(has_es, nodes)):
2710 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2711 " storage is enabled" % (self.instance.disk_template,
2712 self.op.disk_template))
2713 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2715 def _PreCheckDisks(self, ispec):
2716 """CheckPrereq checks related to disk changes.
2719 @param ispec: instance specs to be updated with the new disks
2722 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2724 excl_stor = compat.any(
2725 rpc.GetExclusiveStorageForNodes(self.cfg,
2726 self.instance.all_nodes).values()
2729 # Check disk modifications. This is done here and not in CheckArguments
2730 # (as with NICs), because we need to know the instance's disk template
2731 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2732 if self.instance.disk_template == constants.DT_EXT:
2733 self._CheckMods("disk", self.op.disks, {}, ver_fn)
2735 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2738 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2740 # Check the validity of the `provider' parameter
2741 if self.instance.disk_template in constants.DT_EXT:
2742 for mod in self.diskmod:
2743 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2744 if mod[0] == constants.DDM_ADD:
2745 if ext_provider is None:
2746 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2747 " '%s' missing, during disk add" %
2749 constants.IDISK_PROVIDER),
2751 elif mod[0] == constants.DDM_MODIFY:
2753 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2755 constants.IDISK_PROVIDER,
2758 for mod in self.diskmod:
2759 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2760 if ext_provider is not None:
2761 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2762 " instances of type '%s'" %
2763 (constants.IDISK_PROVIDER,
2767 if not self.op.wait_for_sync and self.instance.disks_active:
2768 for mod in self.diskmod:
2769 if mod[0] == constants.DDM_ADD:
2770 raise errors.OpPrereqError("Can't add a disk to an instance with"
2771 " activated disks and"
2772 " --no-wait-for-sync given.",
2775 if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2776 raise errors.OpPrereqError("Disk operations not supported for"
2777 " diskless instances", errors.ECODE_INVAL)
2779 def _PrepareDiskMod(_, disk, params, __):
2780 disk.name = params.get(constants.IDISK_NAME, None)
2782 # Verify disk changes (operating on a copy)
2783 disks = copy.deepcopy(self.instance.disks)
2784 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2785 _PrepareDiskMod, None)
2786 utils.ValidateDeviceNames("disk", disks)
2787 if len(disks) > constants.MAX_DISKS:
2788 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2789 " more" % constants.MAX_DISKS,
2791 disk_sizes = [disk.size for disk in self.instance.disks]
2792 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2793 self.diskmod if op == constants.DDM_ADD)
2794 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2795 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2797 if self.op.offline is not None and self.op.offline:
2798 CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2799 msg="can't change to offline")
2801 def CheckPrereq(self):
2802 """Check prerequisites.
2804 This only checks the instance list against the existing names.
2807 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2808 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2809 self.cluster = self.cfg.GetClusterInfo()
2810 cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
2812 assert self.instance is not None, \
2813 "Cannot retrieve locked instance %s" % self.op.instance_name
2815 pnode_uuid = self.instance.primary_node
2819 if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2821 # verify that the instance is not up
2822 instance_info = self.rpc.call_instance_info(
2823 pnode_uuid, self.instance.name, self.instance.hypervisor,
2825 if instance_info.fail_msg:
2826 self.warn.append("Can't get instance runtime information: %s" %
2827 instance_info.fail_msg)
2828 elif instance_info.payload:
2829 raise errors.OpPrereqError("Instance is still running on %s" %
2830 self.cfg.GetNodeName(pnode_uuid),
2833 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2834 node_uuids = list(self.instance.all_nodes)
2835 pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2837 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2838 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2839 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2841 # dictionary with instance information after the modification
2844 if self.op.hotplug or self.op.hotplug_if_possible:
2845 result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2849 result.Raise("Hotplug is not possible: %s" % result.fail_msg,
2852 self.LogWarning(result.fail_msg)
2853 self.op.hotplug = False
2854 self.LogInfo("Modification will take place without hotplugging.")
2856 self.op.hotplug = True
2858 # Prepare NIC modifications
2859 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2862 if self.op.os_name and not self.op.force:
2863 CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2864 self.op.force_variant)
2865 instance_os = self.op.os_name
2867 instance_os = self.instance.os
2869 assert not (self.op.disk_template and self.op.disks), \
2870 "Can't modify disk template and apply disk changes at the same time"
2872 if self.op.disk_template:
2873 self._PreCheckDiskTemplate(pnode_info)
2875 self._PreCheckDisks(ispec)
2877 # hvparams processing
2878 if self.op.hvparams:
2879 hv_type = self.instance.hypervisor
2880 i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2881 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2882 hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2885 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2886 CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2887 self.hv_proposed = self.hv_new = hv_new # the new actual values
2888 self.hv_inst = i_hvdict # the new dict (without defaults)
2890 self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2892 self.instance.hvparams)
2893 self.hv_new = self.hv_inst = {}
2895 # beparams processing
2896 if self.op.beparams:
2897 i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2899 objects.UpgradeBeParams(i_bedict)
2900 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2901 be_new = self.cluster.SimpleFillBE(i_bedict)
2902 self.be_proposed = self.be_new = be_new # the new actual values
2903 self.be_inst = i_bedict # the new dict (without defaults)
2905 self.be_new = self.be_inst = {}
2906 self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2907 be_old = self.cluster.FillBE(self.instance)
2909 # CPU param validation -- checking every time a parameter is
2910 # changed to cover all cases where either CPU mask or vcpus have
2912 if (constants.BE_VCPUS in self.be_proposed and
2913 constants.HV_CPU_MASK in self.hv_proposed):
2915 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2916 # Verify mask is consistent with number of vCPUs. Can skip this
2917 # test if only 1 entry in the CPU mask, which means same mask
2918 # is applied to all vCPUs.
2919 if (len(cpu_list) > 1 and
2920 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2921 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2923 (self.be_proposed[constants.BE_VCPUS],
2924 self.hv_proposed[constants.HV_CPU_MASK]),
2927 # Only perform this test if a new CPU mask is given
2928 if constants.HV_CPU_MASK in self.hv_new:
2929 # Calculate the largest CPU number requested
2930 max_requested_cpu = max(map(max, cpu_list))
2931 # Check that all of the instance's nodes have enough physical CPUs to
2932 # satisfy the requested CPU mask
2933 hvspecs = [(self.instance.hypervisor,
2934 self.cfg.GetClusterInfo()
2935 .hvparams[self.instance.hypervisor])]
2936 _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2937 max_requested_cpu + 1,
2940 # osparams processing
2941 if self.op.osparams:
2942 i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2943 CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2944 self.os_inst = i_osdict # the new dict (without defaults)
2948 #TODO(dynmem): do the appropriate check involving MINMEM
2949 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2950 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2951 mem_check_list = [pnode_uuid]
2952 if be_new[constants.BE_AUTO_BALANCE]:
2953 # either we changed auto_balance to yes or it was from before
2954 mem_check_list.extend(self.instance.secondary_nodes)
2955 instance_info = self.rpc.call_instance_info(
2956 pnode_uuid, self.instance.name, self.instance.hypervisor,
2958 hvspecs = [(self.instance.hypervisor,
2960 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2962 pninfo = nodeinfo[pnode_uuid]
2963 msg = pninfo.fail_msg
2965 # Assume the primary node is unreachable and go ahead
2966 self.warn.append("Can't get info from primary node %s: %s" %
2967 (self.cfg.GetNodeName(pnode_uuid), msg))
2969 (_, _, (pnhvinfo, )) = pninfo.payload
2970 if not isinstance(pnhvinfo.get("memory_free", None), int):
2971 self.warn.append("Node data from primary node %s doesn't contain"
2972 " free memory information" %
2973 self.cfg.GetNodeName(pnode_uuid))
2974 elif instance_info.fail_msg:
2975 self.warn.append("Can't get instance runtime information: %s" %
2976 instance_info.fail_msg)
2978 if instance_info.payload:
2979 current_mem = int(instance_info.payload["memory"])
2981 # Assume instance not running
2982 # (there is a slight race condition here, but it's not very
2983 # probable, and we have no other way to check)
2984 # TODO: Describe race condition
2986 #TODO(dynmem): do the appropriate check involving MINMEM
2987 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2988 pnhvinfo["memory_free"])
2990 raise errors.OpPrereqError("This change will prevent the instance"
2991 " from starting, due to %d MB of memory"
2992 " missing on its primary node" %
2993 miss_mem, errors.ECODE_NORES)
2995 if be_new[constants.BE_AUTO_BALANCE]:
2996 for node_uuid, nres in nodeinfo.items():
2997 if node_uuid not in self.instance.secondary_nodes:
2999 nres.Raise("Can't get info from secondary node %s" %
3000 self.cfg.GetNodeName(node_uuid), prereq=True,
3001 ecode=errors.ECODE_STATE)
3002 (_, _, (nhvinfo, )) = nres.payload
3003 if not isinstance(nhvinfo.get("memory_free", None), int):
3004 raise errors.OpPrereqError("Secondary node %s didn't return free"
3005 " memory information" %
3006 self.cfg.GetNodeName(node_uuid),
3008 #TODO(dynmem): do the appropriate check involving MINMEM
3009 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3010 raise errors.OpPrereqError("This change will prevent the instance"
3011 " from failover to its secondary node"
3012 " %s, due to not enough memory" %
3013 self.cfg.GetNodeName(node_uuid),
3016 if self.op.runtime_mem:
3017 remote_info = self.rpc.call_instance_info(
3018 self.instance.primary_node, self.instance.name,
3019 self.instance.hypervisor,
3021 remote_info.Raise("Error checking node %s" %
3022 self.cfg.GetNodeName(self.instance.primary_node))
3023 if not remote_info.payload: # not running already
3024 raise errors.OpPrereqError("Instance %s is not running" %
3025 self.instance.name, errors.ECODE_STATE)
3027 current_memory = remote_info.payload["memory"]
3028 if (not self.op.force and
3029 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3030 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3031 raise errors.OpPrereqError("Instance %s must have memory between %d"
3032 " and %d MB of memory unless --force is"
3034 (self.instance.name,
3035 self.be_proposed[constants.BE_MINMEM],
3036 self.be_proposed[constants.BE_MAXMEM]),
3039 delta = self.op.runtime_mem - current_memory
3041 CheckNodeFreeMemory(
3042 self, self.instance.primary_node,
3043 "ballooning memory for instance %s" % self.instance.name, delta,
3044 self.instance.hypervisor,
3045 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3047 # make self.cluster visible in the functions below
3048 cluster = self.cluster
3050 def _PrepareNicCreate(_, params, private):
3051 self._PrepareNicModification(params, private, None, None,
3052 {}, cluster, pnode_uuid)
3055 def _PrepareNicMod(_, nic, params, private):
3056 self._PrepareNicModification(params, private, nic.ip, nic.network,
3057 nic.nicparams, cluster, pnode_uuid)
3060 def _PrepareNicRemove(_, params, __):
3062 net = params.network
3063 if net is not None and ip is not None:
3064 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3066 # Verify NIC changes (operating on copy)
3067 nics = self.instance.nics[:]
3068 _ApplyContainerMods("NIC", nics, None, self.nicmod,
3069 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3070 if len(nics) > constants.MAX_NICS:
3071 raise errors.OpPrereqError("Instance has too many network interfaces"
3072 " (%d), cannot add more" % constants.MAX_NICS,
3075 # Pre-compute NIC changes (necessary to use result in hooks)
3076 self._nic_chgdesc = []
3078 # Operate on copies as this is still in prereq
3079 nics = [nic.Copy() for nic in self.instance.nics]
3080 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3081 self._CreateNewNic, self._ApplyNicMods,
3083 # Verify that NIC names are unique and valid
3084 utils.ValidateDeviceNames("NIC", nics)
3085 self._new_nics = nics
3086 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3088 self._new_nics = None
3089 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3091 if not self.op.ignore_ipolicy:
3092 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3095 # Fill ispec with backend parameters
3096 ispec[constants.ISPEC_SPINDLE_USE] = \
3097 self.be_new.get(constants.BE_SPINDLE_USE, None)
3098 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3101 # Copy ispec to verify parameters with min/max values separately
3102 if self.op.disk_template:
3103 new_disk_template = self.op.disk_template
3105 new_disk_template = self.instance.disk_template
3106 ispec_max = ispec.copy()
3107 ispec_max[constants.ISPEC_MEM_SIZE] = \
3108 self.be_new.get(constants.BE_MAXMEM, None)
3109 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3111 ispec_min = ispec.copy()
3112 ispec_min[constants.ISPEC_MEM_SIZE] = \
3113 self.be_new.get(constants.BE_MINMEM, None)
3114 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3117 if (res_max or res_min):
3118 # FIXME: Improve error message by including information about whether
3119 # the upper or lower limit of the parameter fails the ipolicy.
3120 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3121 (group_info, group_info.name,
3122 utils.CommaJoin(set(res_max + res_min))))
3123 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3125 def _ConvertPlainToDrbd(self, feedback_fn):
3126 """Converts an instance from plain to drbd.
3129 feedback_fn("Converting template to drbd")
3130 pnode_uuid = self.instance.primary_node
3131 snode_uuid = self.op.remote_node_uuid
3133 assert self.instance.disk_template == constants.DT_PLAIN
3135 # create a fake disk info for _GenerateDiskTemplate
3136 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3137 constants.IDISK_VG: d.logical_id[0],
3138 constants.IDISK_NAME: d.name}
3139 for d in self.instance.disks]
3140 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3141 self.instance.uuid, pnode_uuid,
3142 [snode_uuid], disk_info, None, None, 0,
3143 feedback_fn, self.diskparams)
3144 anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3145 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3146 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3147 info = GetInstanceInfoText(self.instance)
3148 feedback_fn("Creating additional volumes...")
3149 # first, create the missing data and meta devices
3150 for disk in anno_disks:
3151 # unfortunately this is... not too nice
3152 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3153 info, True, p_excl_stor)
3154 for child in disk.children:
3155 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3157 # at this stage, all new LVs have been created, we can rename the
3159 feedback_fn("Renaming original volumes...")
3160 rename_list = [(o, n.children[0].logical_id)
3161 for (o, n) in zip(self.instance.disks, new_disks)]
3162 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3163 result.Raise("Failed to rename original LVs")
3165 feedback_fn("Initializing DRBD devices...")
3166 # all child devices are in place, we can now create the DRBD devices
3168 for disk in anno_disks:
3169 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3170 (snode_uuid, s_excl_stor)]:
3171 f_create = node_uuid == pnode_uuid
3172 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3173 f_create, excl_stor)
3174 except errors.GenericError, e:
3175 feedback_fn("Initializing of DRBD devices failed;"
3176 " renaming back original volumes...")
3177 rename_back_list = [(n.children[0], o.logical_id)
3178 for (n, o) in zip(new_disks, self.instance.disks)]
3179 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3180 result.Raise("Failed to rename LVs back after error %s" % str(e))
3183 # at this point, the instance has been modified
3184 self.instance.disk_template = constants.DT_DRBD8
3185 self.instance.disks = new_disks
3186 self.cfg.Update(self.instance, feedback_fn)
3188 # Release node locks while waiting for sync
3189 ReleaseLocks(self, locking.LEVEL_NODE)
3191 # disks are created, waiting for sync
3192 disk_abort = not WaitForSync(self, self.instance,
3193 oneshot=not self.op.wait_for_sync)
3195 raise errors.OpExecError("There are some degraded disks for"
3196 " this instance, please cleanup manually")
3198 # Node resource locks will be released by caller
3200 def _ConvertDrbdToPlain(self, feedback_fn):
3201 """Converts an instance from drbd to plain.
3204 assert len(self.instance.secondary_nodes) == 1
3205 assert self.instance.disk_template == constants.DT_DRBD8
3207 pnode_uuid = self.instance.primary_node
3208 snode_uuid = self.instance.secondary_nodes[0]
3209 feedback_fn("Converting template to plain")
3211 old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3212 new_disks = [d.children[0] for d in self.instance.disks]
3214 # copy over size, mode and name
3215 for parent, child in zip(old_disks, new_disks):
3216 child.size = parent.size
3217 child.mode = parent.mode
3218 child.name = parent.name
3220 # this is a DRBD disk, return its port to the pool
3221 # NOTE: this must be done right before the call to cfg.Update!
3222 for disk in old_disks:
3223 tcp_port = disk.logical_id[2]
3224 self.cfg.AddTcpUdpPort(tcp_port)
3226 # update instance structure
3227 self.instance.disks = new_disks
3228 self.instance.disk_template = constants.DT_PLAIN
3229 _UpdateIvNames(0, self.instance.disks)
3230 self.cfg.Update(self.instance, feedback_fn)
3232 # Release locks in case removing disks takes a while
3233 ReleaseLocks(self, locking.LEVEL_NODE)
3235 feedback_fn("Removing volumes on the secondary node...")
3236 for disk in old_disks:
3237 result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3238 result.Warn("Could not remove block device %s on node %s,"
3239 " continuing anyway" %
3240 (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3243 feedback_fn("Removing unneeded volumes on the primary node...")
3244 for idx, disk in enumerate(old_disks):
3245 meta = disk.children[1]
3246 result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3247 result.Warn("Could not remove metadata for disk %d on node %s,"
3248 " continuing anyway" %
3249 (idx, self.cfg.GetNodeName(pnode_uuid)),
3252 def _HotplugDevice(self, action, dev_type, device, extra, seq):
3253 self.LogInfo("Trying to hotplug device...")
3255 result = self.rpc.call_hotplug_device(self.instance.primary_node,
3256 self.instance, action, dev_type,
3257 (device, self.instance),
3260 self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3261 self.LogInfo("Continuing execution..")
3264 self.LogInfo("Hotplug done.")
3268 def _CreateNewDisk(self, idx, params, _):
3269 """Creates a new disk.
3273 if self.instance.disk_template in constants.DTS_FILEBASED:
3274 (file_driver, file_path) = self.instance.disks[0].logical_id
3275 file_path = os.path.dirname(file_path)
3277 file_driver = file_path = None
3280 GenerateDiskTemplate(self, self.instance.disk_template,
3281 self.instance.uuid, self.instance.primary_node,
3282 self.instance.secondary_nodes, [params], file_path,
3283 file_driver, idx, self.Log, self.diskparams)[0]
3285 new_disks = CreateDisks(self, self.instance, disks=[disk])
3287 if self.cluster.prealloc_wipe_disks:
3289 WipeOrCleanupDisks(self, self.instance,
3290 disks=[(idx, disk, 0)],
3295 "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3298 result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3299 (disk, self.instance),
3300 self.instance.name, True, idx)
3302 changes.append(("disk/%d" % idx, "assemble:failed"))
3303 self.LogWarning("Can't assemble newly created disk %d: %s",
3304 idx, result.fail_msg)
3306 _, link_name = result.payload
3307 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3308 constants.HOTPLUG_TARGET_DISK,
3309 disk, link_name, idx)
3310 changes.append(("disk/%d" % idx, msg))
3312 return (disk, changes)
3314 def _PostAddDisk(self, _, disk):
3315 if not WaitForSync(self, self.instance, disks=[disk],
3316 oneshot=not self.op.wait_for_sync):
3317 raise errors.OpExecError("Failed to sync disks of %s" %
3320 # the disk is active at this point, so deactivate it if the instance disks
3321 # are supposed to be inactive
3322 if not self.instance.disks_active:
3323 ShutdownInstanceDisks(self, self.instance, disks=[disk])
3326 def _ModifyDisk(idx, disk, params, _):
3331 mode = params.get(constants.IDISK_MODE, None)
3334 changes.append(("disk.mode/%d" % idx, disk.mode))
3336 name = params.get(constants.IDISK_NAME, None)
3338 changes.append(("disk.name/%d" % idx, disk.name))
3342 def _RemoveDisk(self, idx, root, _):
3348 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3349 constants.HOTPLUG_TARGET_DISK,
3351 ShutdownInstanceDisks(self, self.instance, [root])
3353 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3354 for node_uuid, disk in anno_disk.ComputeNodeTree(
3355 self.instance.primary_node):
3356 msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3359 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3360 " continuing anyway", idx,
3361 self.cfg.GetNodeName(node_uuid), msg)
3363 # if this is a DRBD disk, return its port to the pool
3364 if root.dev_type in constants.DTS_DRBD:
3365 self.cfg.AddTcpUdpPort(root.logical_id[2])
3369 def _CreateNewNic(self, idx, params, private):
3370 """Creates data structure for a new network interface.
3373 mac = params[constants.INIC_MAC]
3374 ip = params.get(constants.INIC_IP, None)
3375 net = params.get(constants.INIC_NETWORK, None)
3376 name = params.get(constants.INIC_NAME, None)
3377 net_uuid = self.cfg.LookupNetwork(net)
3378 #TODO: not private.filled?? can a nic have no nicparams??
3379 nicparams = private.filled
3380 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3381 nicparams=nicparams)
3382 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3386 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3387 (mac, ip, private.filled[constants.NIC_MODE],
3388 private.filled[constants.NIC_LINK], net)),
3392 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3393 constants.HOTPLUG_TARGET_NIC,
3395 changes.append(("nic.%d" % idx, msg))
3397 return (nobj, changes)
3399 def _ApplyNicMods(self, idx, nic, params, private):
3400 """Modifies a network interface.
3405 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3407 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3408 setattr(nic, key, params[key])
3410 new_net = params.get(constants.INIC_NETWORK, nic.network)
3411 new_net_uuid = self.cfg.LookupNetwork(new_net)
3412 if new_net_uuid != nic.network:
3413 changes.append(("nic.network/%d" % idx, new_net))
3414 nic.network = new_net_uuid
3417 nic.nicparams = private.filled
3419 for (key, val) in nic.nicparams.items():
3420 changes.append(("nic.%s/%d" % (key, idx), val))
3423 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3424 constants.HOTPLUG_TARGET_NIC,
3426 changes.append(("nic/%d" % idx, msg))
3430 def _RemoveNic(self, idx, nic, _):
3432 return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3433 constants.HOTPLUG_TARGET_NIC,
3436 def Exec(self, feedback_fn):
3437 """Modifies an instance.
3439 All parameters take effect only at the next restart of the instance.
3442 # Process here the warnings from CheckPrereq, as we don't have a
3443 # feedback_fn there.
3444 # TODO: Replace with self.LogWarning
3445 for warn in self.warn:
3446 feedback_fn("WARNING: %s" % warn)
3448 assert ((self.op.disk_template is None) ^
3449 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3450 "Not owning any node resource locks"
3455 if self.op.pnode_uuid:
3456 self.instance.primary_node = self.op.pnode_uuid
3459 if self.op.runtime_mem:
3460 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3462 self.op.runtime_mem)
3463 rpcres.Raise("Cannot modify instance runtime memory")
3464 result.append(("runtime_memory", self.op.runtime_mem))
3466 # Apply disk changes
3467 _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3468 self._CreateNewDisk, self._ModifyDisk,
3469 self._RemoveDisk, post_add_fn=self._PostAddDisk)
3470 _UpdateIvNames(0, self.instance.disks)
3472 if self.op.disk_template:
3474 check_nodes = set(self.instance.all_nodes)
3475 if self.op.remote_node_uuid:
3476 check_nodes.add(self.op.remote_node_uuid)
3477 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3478 owned = self.owned_locks(level)
3479 assert not (check_nodes - owned), \
3480 ("Not owning the correct locks, owning %r, expected at least %r" %
3481 (owned, check_nodes))
3483 r_shut = ShutdownInstanceDisks(self, self.instance)
3485 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3486 " proceed with disk template conversion")
3487 mode = (self.instance.disk_template, self.op.disk_template)
3489 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3491 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3493 result.append(("disk_template", self.op.disk_template))
3495 assert self.instance.disk_template == self.op.disk_template, \
3496 ("Expected disk template '%s', found '%s'" %
3497 (self.op.disk_template, self.instance.disk_template))
3499 # Release node and resource locks if there are any (they might already have
3500 # been released during disk conversion)
3501 ReleaseLocks(self, locking.LEVEL_NODE)
3502 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3505 if self._new_nics is not None:
3506 self.instance.nics = self._new_nics
3507 result.extend(self._nic_chgdesc)
3510 if self.op.hvparams:
3511 self.instance.hvparams = self.hv_inst
3512 for key, val in self.op.hvparams.iteritems():
3513 result.append(("hv/%s" % key, val))
3516 if self.op.beparams:
3517 self.instance.beparams = self.be_inst
3518 for key, val in self.op.beparams.iteritems():
3519 result.append(("be/%s" % key, val))
3523 self.instance.os = self.op.os_name
3526 if self.op.osparams:
3527 self.instance.osparams = self.os_inst
3528 for key, val in self.op.osparams.iteritems():
3529 result.append(("os/%s" % key, val))
3531 if self.op.offline is None:
3534 elif self.op.offline:
3535 # Mark instance as offline
3536 self.cfg.MarkInstanceOffline(self.instance.uuid)
3537 result.append(("admin_state", constants.ADMINST_OFFLINE))
3539 # Mark instance as online, but stopped
3540 self.cfg.MarkInstanceDown(self.instance.uuid)
3541 result.append(("admin_state", constants.ADMINST_DOWN))
3543 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3545 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3546 self.owned_locks(locking.LEVEL_NODE)), \
3547 "All node locks should have been released by now"
3551 _DISK_CONVERSIONS = {
3552 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3553 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3557 class LUInstanceChangeGroup(LogicalUnit):
3558 HPATH = "instance-change-group"
3559 HTYPE = constants.HTYPE_INSTANCE
3562 def ExpandNames(self):
3563 self.share_locks = ShareAll()
3565 self.needed_locks = {
3566 locking.LEVEL_NODEGROUP: [],
3567 locking.LEVEL_NODE: [],
3568 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3571 self._ExpandAndLockInstance()
3573 if self.op.target_groups:
3574 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3575 self.op.target_groups)
3577 self.req_target_uuids = None
3579 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3581 def DeclareLocks(self, level):
3582 if level == locking.LEVEL_NODEGROUP:
3583 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3585 if self.req_target_uuids:
3586 lock_groups = set(self.req_target_uuids)
3588 # Lock all groups used by instance optimistically; this requires going
3589 # via the node before it's locked, requiring verification later on
3590 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3591 lock_groups.update(instance_groups)
3593 # No target groups, need to lock all of them
3594 lock_groups = locking.ALL_SET
3596 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3598 elif level == locking.LEVEL_NODE:
3599 if self.req_target_uuids:
3600 # Lock all nodes used by instances
3601 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3602 self._LockInstancesNodes()
3604 # Lock all nodes in all potential target groups
3605 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3606 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3607 member_nodes = [node_uuid
3608 for group in lock_groups
3609 for node_uuid in self.cfg.GetNodeGroup(group).members]
3610 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3612 # Lock all nodes as all groups are potential targets
3613 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3615 def CheckPrereq(self):
3616 owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3617 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3618 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3620 assert (self.req_target_uuids is None or
3621 owned_groups.issuperset(self.req_target_uuids))
3622 assert owned_instance_names == set([self.op.instance_name])
3624 # Get instance information
3625 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3627 # Check if node groups for locked instance are still correct
3628 assert owned_nodes.issuperset(self.instance.all_nodes), \
3629 ("Instance %s's nodes changed while we kept the lock" %
3630 self.op.instance_name)
3632 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3635 if self.req_target_uuids:
3636 # User requested specific target groups
3637 self.target_uuids = frozenset(self.req_target_uuids)
3639 # All groups except those used by the instance are potential targets
3640 self.target_uuids = owned_groups - inst_groups
3642 conflicting_groups = self.target_uuids & inst_groups
3643 if conflicting_groups:
3644 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3645 " used by the instance '%s'" %
3646 (utils.CommaJoin(conflicting_groups),
3647 self.op.instance_name),
3650 if not self.target_uuids:
3651 raise errors.OpPrereqError("There are no possible target groups",
3654 def BuildHooksEnv(self):
3658 assert self.target_uuids
3661 "TARGET_GROUPS": " ".join(self.target_uuids),
3664 env.update(BuildInstanceHookEnvByObject(self, self.instance))
3668 def BuildHooksNodes(self):
3669 """Build hooks nodes.
3672 mn = self.cfg.GetMasterNode()
3675 def Exec(self, feedback_fn):
3676 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3678 assert instances == [self.op.instance_name], "Instance not locked"
3680 req = iallocator.IAReqGroupChange(instances=instances,
3681 target_groups=list(self.target_uuids))
3682 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3684 ial.Run(self.op.iallocator)
3687 raise errors.OpPrereqError("Can't compute solution for changing group of"
3688 " instance '%s' using iallocator '%s': %s" %
3689 (self.op.instance_name, self.op.iallocator,
3690 ial.info), errors.ECODE_NORES)
3692 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3694 self.LogInfo("Iallocator returned %s job(s) for changing group of"
3695 " instance '%s'", len(jobs), self.op.instance_name)
3697 return ResultWithJobs(jobs)