4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Logical units dealing with instances."""
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import pathutils
40 from ganeti import rpc
41 from ganeti import utils
43 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45 from ganeti.cmdlib.common import INSTANCE_DOWN, \
46 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
47 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
48 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
49 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
50 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
51 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
52 CheckDiskTemplateEnabled
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58 CheckSpindlesExclusiveStorage
59 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66 import ganeti.masterd.instance
69 #: Type description for changes as returned by L{_ApplyContainerMods}'s
71 _TApplyContModsCbChanges = \
72 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
78 def _CheckHostnameSane(lu, name):
79 """Ensures that a given hostname resolves to a 'sane' name.
81 The given name is required to be a prefix of the resolved hostname,
82 to prevent accidental mismatches.
84 @param lu: the logical unit on behalf of which we're checking
85 @param name: the name we should resolve and check
86 @return: the resolved hostname object
89 hostname = netutils.GetHostname(name=name)
90 if hostname.name != name:
91 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92 if not utils.MatchNameComponent(name, [hostname.name]):
93 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94 " same as given hostname '%s'") %
95 (hostname.name, name), errors.ECODE_INVAL)
99 def _CheckOpportunisticLocking(op):
100 """Generate error if opportunistic locking is not possible.
103 if op.opportunistic_locking and not op.iallocator:
104 raise errors.OpPrereqError("Opportunistic locking is only available in"
105 " combination with an instance allocator",
109 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110 """Wrapper around IAReqInstanceAlloc.
112 @param op: The instance opcode
113 @param disks: The computed disks
114 @param nics: The computed nics
115 @param beparams: The full filled beparams
116 @param node_name_whitelist: List of nodes which should appear as online to the
117 allocator (unless the node is already marked offline)
119 @returns: A filled L{iallocator.IAReqInstanceAlloc}
122 spindle_use = beparams[constants.BE_SPINDLE_USE]
123 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124 disk_template=op.disk_template,
127 vcpus=beparams[constants.BE_VCPUS],
128 memory=beparams[constants.BE_MAXMEM],
129 spindle_use=spindle_use,
131 nics=[n.ToDict() for n in nics],
132 hypervisor=op.hypervisor,
133 node_whitelist=node_name_whitelist)
136 def _ComputeFullBeParams(op, cluster):
137 """Computes the full beparams.
139 @param op: The instance opcode
140 @param cluster: The cluster config object
142 @return: The fully filled beparams
145 default_beparams = cluster.beparams[constants.PP_DEFAULT]
146 for param, value in op.beparams.iteritems():
147 if value == constants.VALUE_AUTO:
148 op.beparams[param] = default_beparams[param]
149 objects.UpgradeBeParams(op.beparams)
150 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151 return cluster.SimpleFillBE(op.beparams)
154 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155 """Computes the nics.
157 @param op: The instance opcode
158 @param cluster: Cluster configuration object
159 @param default_ip: The default ip to assign
160 @param cfg: An instance of the configuration object
161 @param ec_id: Execution context ID
163 @returns: The build up nics
168 nic_mode_req = nic.get(constants.INIC_MODE, None)
169 nic_mode = nic_mode_req
170 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173 net = nic.get(constants.INIC_NETWORK, None)
174 link = nic.get(constants.NIC_LINK, None)
175 ip = nic.get(constants.INIC_IP, None)
176 vlan = nic.get(constants.INIC_VLAN, None)
178 if net is None or net.lower() == constants.VALUE_NONE:
181 if nic_mode_req is not None or link is not None:
182 raise errors.OpPrereqError("If network is given, no mode or link"
183 " is allowed to be passed",
186 if vlan is not None and nic_mode != constants.NIC_MODE_OVS:
187 raise errors.OpPrereqError("VLAN is given, but network mode is not"
188 " openvswitch", errors.ECODE_INVAL)
191 if ip is None or ip.lower() == constants.VALUE_NONE:
193 elif ip.lower() == constants.VALUE_AUTO:
194 if not op.name_check:
195 raise errors.OpPrereqError("IP address set to auto but name checks"
196 " have been skipped",
200 # We defer pool operations until later, so that the iallocator has
201 # filled in the instance's node(s) dimara
202 if ip.lower() == constants.NIC_IP_POOL:
204 raise errors.OpPrereqError("if ip=pool, parameter network"
205 " must be passed too",
208 elif not netutils.IPAddress.IsValid(ip):
209 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
214 # TODO: check the ip address for uniqueness
215 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
216 raise errors.OpPrereqError("Routed nic mode requires an ip address",
219 # MAC address verification
220 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
221 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
222 mac = utils.NormalizeAndValidateMac(mac)
225 # TODO: We need to factor this out
226 cfg.ReserveMAC(mac, ec_id)
227 except errors.ReservationError:
228 raise errors.OpPrereqError("MAC address %s already in use"
230 errors.ECODE_NOTUNIQUE)
232 # Build nic parameters
235 nicparams[constants.NIC_MODE] = nic_mode
237 nicparams[constants.NIC_LINK] = link
239 nicparams[constants.NIC_VLAN] = vlan
241 check_params = cluster.SimpleFillNIC(nicparams)
242 objects.NIC.CheckParameterSyntax(check_params)
243 net_uuid = cfg.LookupNetwork(net)
244 name = nic.get(constants.INIC_NAME, None)
245 if name is not None and name.lower() == constants.VALUE_NONE:
247 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
248 network=net_uuid, nicparams=nicparams)
249 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
255 def _CheckForConflictingIp(lu, ip, node_uuid):
256 """In case of conflicting IP address raise error.
259 @param ip: IP address
260 @type node_uuid: string
261 @param node_uuid: node UUID
264 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
265 if conf_net is not None:
266 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
267 " network %s, but the target NIC does not." %
274 def _ComputeIPolicyInstanceSpecViolation(
275 ipolicy, instance_spec, disk_template,
276 _compute_fn=ComputeIPolicySpecViolation):
277 """Compute if instance specs meets the specs of ipolicy.
280 @param ipolicy: The ipolicy to verify against
281 @param instance_spec: dict
282 @param instance_spec: The instance spec to verify
283 @type disk_template: string
284 @param disk_template: the disk template of the instance
285 @param _compute_fn: The function to verify ipolicy (unittest only)
286 @see: L{ComputeIPolicySpecViolation}
289 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
290 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
291 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
292 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
293 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
294 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
296 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
297 disk_sizes, spindle_use, disk_template)
300 def _CheckOSVariant(os_obj, name):
301 """Check whether an OS name conforms to the os variants specification.
303 @type os_obj: L{objects.OS}
304 @param os_obj: OS object to check
306 @param name: OS name passed by the user, to check for validity
309 variant = objects.OS.GetVariant(name)
310 if not os_obj.supported_variants:
312 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
313 " passed)" % (os_obj.name, variant),
317 raise errors.OpPrereqError("OS name must include a variant",
320 if variant not in os_obj.supported_variants:
321 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
324 class LUInstanceCreate(LogicalUnit):
325 """Create an instance.
328 HPATH = "instance-add"
329 HTYPE = constants.HTYPE_INSTANCE
332 def _CheckDiskTemplateValid(self):
333 """Checks validity of disk template.
336 cluster = self.cfg.GetClusterInfo()
337 if self.op.disk_template is None:
338 # FIXME: It would be better to take the default disk template from the
339 # ipolicy, but for the ipolicy we need the primary node, which we get from
340 # the iallocator, which wants the disk template as input. To solve this
341 # chicken-and-egg problem, it should be possible to specify just a node
342 # group from the iallocator and take the ipolicy from that.
343 self.op.disk_template = cluster.enabled_disk_templates[0]
344 CheckDiskTemplateEnabled(cluster, self.op.disk_template)
346 def _CheckDiskArguments(self):
347 """Checks validity of disk-related arguments.
350 # check that disk's names are unique and valid
351 utils.ValidateDeviceNames("disk", self.op.disks)
353 self._CheckDiskTemplateValid()
355 # check disks. parameter names and consistent adopt/no-adopt strategy
356 has_adopt = has_no_adopt = False
357 for disk in self.op.disks:
358 if self.op.disk_template != constants.DT_EXT:
359 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
360 if constants.IDISK_ADOPT in disk:
364 if has_adopt and has_no_adopt:
365 raise errors.OpPrereqError("Either all disks are adopted or none is",
368 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
369 raise errors.OpPrereqError("Disk adoption is not supported for the"
370 " '%s' disk template" %
371 self.op.disk_template,
373 if self.op.iallocator is not None:
374 raise errors.OpPrereqError("Disk adoption not allowed with an"
375 " iallocator script", errors.ECODE_INVAL)
376 if self.op.mode == constants.INSTANCE_IMPORT:
377 raise errors.OpPrereqError("Disk adoption not allowed for"
378 " instance import", errors.ECODE_INVAL)
380 if self.op.disk_template in constants.DTS_MUST_ADOPT:
381 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
382 " but no 'adopt' parameter given" %
383 self.op.disk_template,
386 self.adopt_disks = has_adopt
388 def _CheckVLANArguments(self):
389 """ Check validity of VLANs if given
392 for nic in self.op.nics:
393 vlan = nic.get(constants.INIC_VLAN, None)
396 # vlan starting with dot means single untagged vlan,
397 # might be followed by trunk (:)
398 if not vlan[1:].isdigit():
399 vlanlist = vlan[1:].split(':')
402 raise errors.OpPrereqError("Specified VLAN parameter is "
403 "invalid : %s" % vlan,
406 # Trunk - tagged only
407 vlanlist = vlan[1:].split(':')
410 raise errors.OpPrereqError("Specified VLAN parameter is invalid"
411 " : %s" % vlan, errors.ECODE_INVAL)
413 # This is the simplest case. No dots, only single digit
414 # -> Create untagged access port, dot needs to be added
415 nic[constants.INIC_VLAN] = "." + vlan
417 raise errors.OpPrereqError("Specified VLAN parameter is invalid"
418 " : %s" % vlan, errors.ECODE_INVAL)
420 def CheckArguments(self):
424 # do not require name_check to ease forward/backward compatibility
426 if self.op.no_install and self.op.start:
427 self.LogInfo("No-installation mode selected, disabling startup")
428 self.op.start = False
429 # validate/normalize the instance name
430 self.op.instance_name = \
431 netutils.Hostname.GetNormalizedName(self.op.instance_name)
433 if self.op.ip_check and not self.op.name_check:
434 # TODO: make the ip check more flexible and not depend on the name check
435 raise errors.OpPrereqError("Cannot do IP address check without a name"
436 " check", errors.ECODE_INVAL)
438 # check nics' parameter names
439 for nic in self.op.nics:
440 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
441 # check that NIC's parameters names are unique and valid
442 utils.ValidateDeviceNames("NIC", self.op.nics)
444 self._CheckVLANArguments()
446 self._CheckDiskArguments()
447 assert self.op.disk_template is not None
449 # instance name verification
450 if self.op.name_check:
451 self.hostname = _CheckHostnameSane(self, self.op.instance_name)
452 self.op.instance_name = self.hostname.name
453 # used in CheckPrereq for ip ping check
454 self.check_ip = self.hostname.ip
458 ### Node/iallocator related checks
459 CheckIAllocatorOrNode(self, "iallocator", "pnode")
461 if self.op.pnode is not None:
462 if self.op.disk_template in constants.DTS_INT_MIRROR:
463 if self.op.snode is None:
464 raise errors.OpPrereqError("The networked disk templates need"
465 " a mirror node", errors.ECODE_INVAL)
467 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
471 _CheckOpportunisticLocking(self.op)
473 if self.op.mode == constants.INSTANCE_IMPORT:
474 # On import force_variant must be True, because if we forced it at
475 # initial install, our only chance when importing it back is that it
477 self.op.force_variant = True
479 if self.op.no_install:
480 self.LogInfo("No-installation mode has no effect during import")
482 elif self.op.mode == constants.INSTANCE_CREATE:
483 if self.op.os_type is None:
484 raise errors.OpPrereqError("No guest OS specified",
486 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
487 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
488 " installation" % self.op.os_type,
490 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
491 self._cds = GetClusterDomainSecret()
493 # Check handshake to ensure both clusters have the same domain secret
494 src_handshake = self.op.source_handshake
495 if not src_handshake:
496 raise errors.OpPrereqError("Missing source handshake",
499 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
502 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
505 # Load and check source CA
506 self.source_x509_ca_pem = self.op.source_x509_ca
507 if not self.source_x509_ca_pem:
508 raise errors.OpPrereqError("Missing source X509 CA",
512 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
514 except OpenSSL.crypto.Error, err:
515 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
516 (err, ), errors.ECODE_INVAL)
518 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
519 if errcode is not None:
520 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
523 self.source_x509_ca = cert
525 src_instance_name = self.op.source_instance_name
526 if not src_instance_name:
527 raise errors.OpPrereqError("Missing source instance name",
530 self.source_instance_name = \
531 netutils.GetHostname(name=src_instance_name).name
534 raise errors.OpPrereqError("Invalid instance creation mode %r" %
535 self.op.mode, errors.ECODE_INVAL)
537 def ExpandNames(self):
538 """ExpandNames for CreateInstance.
540 Figure out the right locks for instance creation.
543 self.needed_locks = {}
545 # this is just a preventive check, but someone might still add this
546 # instance in the meantime, and creation will fail at lock-add time
547 if self.op.instance_name in\
548 [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
549 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
550 self.op.instance_name, errors.ECODE_EXISTS)
552 self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
554 if self.op.iallocator:
555 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
556 # specifying a group on instance creation and then selecting nodes from
558 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
559 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
561 if self.op.opportunistic_locking:
562 self.opportunistic_locks[locking.LEVEL_NODE] = True
563 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
565 (self.op.pnode_uuid, self.op.pnode) = \
566 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
567 nodelist = [self.op.pnode_uuid]
568 if self.op.snode is not None:
569 (self.op.snode_uuid, self.op.snode) = \
570 ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
571 nodelist.append(self.op.snode_uuid)
572 self.needed_locks[locking.LEVEL_NODE] = nodelist
574 # in case of import lock the source node too
575 if self.op.mode == constants.INSTANCE_IMPORT:
576 src_node = self.op.src_node
577 src_path = self.op.src_path
580 self.op.src_path = src_path = self.op.instance_name
583 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
584 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
585 self.op.src_node = None
586 if os.path.isabs(src_path):
587 raise errors.OpPrereqError("Importing an instance from a path"
588 " requires a source node option",
591 (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
592 ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
593 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
594 self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
595 if not os.path.isabs(src_path):
597 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
599 self.needed_locks[locking.LEVEL_NODE_RES] = \
600 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
602 def _RunAllocator(self):
603 """Run the allocator based on input opcode.
606 if self.op.opportunistic_locking:
607 # Only consider nodes for which a lock is held
608 node_name_whitelist = self.cfg.GetNodeNames(
609 self.owned_locks(locking.LEVEL_NODE))
611 node_name_whitelist = None
613 req = _CreateInstanceAllocRequest(self.op, self.disks,
614 self.nics, self.be_full,
616 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
618 ial.Run(self.op.iallocator)
621 # When opportunistic locks are used only a temporary failure is generated
622 if self.op.opportunistic_locking:
623 ecode = errors.ECODE_TEMP_NORES
625 ecode = errors.ECODE_NORES
627 raise errors.OpPrereqError("Can't compute nodes using"
628 " iallocator '%s': %s" %
629 (self.op.iallocator, ial.info),
632 (self.op.pnode_uuid, self.op.pnode) = \
633 ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
634 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
635 self.op.instance_name, self.op.iallocator,
636 utils.CommaJoin(ial.result))
638 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
640 if req.RequiredNodes() == 2:
641 (self.op.snode_uuid, self.op.snode) = \
642 ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
644 def BuildHooksEnv(self):
647 This runs on master, primary and secondary nodes of the instance.
651 "ADD_MODE": self.op.mode,
653 if self.op.mode == constants.INSTANCE_IMPORT:
654 env["SRC_NODE"] = self.op.src_node
655 env["SRC_PATH"] = self.op.src_path
656 env["SRC_IMAGES"] = self.src_images
658 env.update(BuildInstanceHookEnv(
659 name=self.op.instance_name,
660 primary_node_name=self.op.pnode,
661 secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
662 status=self.op.start,
663 os_type=self.op.os_type,
664 minmem=self.be_full[constants.BE_MINMEM],
665 maxmem=self.be_full[constants.BE_MAXMEM],
666 vcpus=self.be_full[constants.BE_VCPUS],
667 nics=NICListToTuple(self, self.nics),
668 disk_template=self.op.disk_template,
669 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
670 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
671 for d in self.disks],
674 hypervisor_name=self.op.hypervisor,
680 def BuildHooksNodes(self):
681 """Build hooks nodes.
684 nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
687 def _ReadExportInfo(self):
688 """Reads the export information from disk.
690 It will override the opcode source node and path with the actual
691 information, if these two were not specified before.
693 @return: the export information
696 assert self.op.mode == constants.INSTANCE_IMPORT
698 if self.op.src_node_uuid is None:
699 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
700 exp_list = self.rpc.call_export_list(locked_nodes)
702 for node_uuid in exp_list:
703 if exp_list[node_uuid].fail_msg:
705 if self.op.src_path in exp_list[node_uuid].payload:
707 self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
708 self.op.src_node_uuid = node_uuid
709 self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
713 raise errors.OpPrereqError("No export found for relative path %s" %
714 self.op.src_path, errors.ECODE_INVAL)
716 CheckNodeOnline(self, self.op.src_node_uuid)
717 result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
718 result.Raise("No export or invalid export found in dir %s" %
721 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
722 if not export_info.has_section(constants.INISECT_EXP):
723 raise errors.ProgrammerError("Corrupted export config",
724 errors.ECODE_ENVIRON)
726 ei_version = export_info.get(constants.INISECT_EXP, "version")
727 if int(ei_version) != constants.EXPORT_VERSION:
728 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
729 (ei_version, constants.EXPORT_VERSION),
730 errors.ECODE_ENVIRON)
733 def _ReadExportParams(self, einfo):
734 """Use export parameters as defaults.
736 In case the opcode doesn't specify (as in override) some instance
737 parameters, then try to use them from the export information, if
741 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
743 if not self.op.disks:
745 # TODO: import the disk iv_name too
746 for idx in range(constants.MAX_DISKS):
747 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
748 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
749 disks.append({constants.IDISK_SIZE: disk_sz})
750 self.op.disks = disks
751 if not disks and self.op.disk_template != constants.DT_DISKLESS:
752 raise errors.OpPrereqError("No disk info specified and the export"
753 " is missing the disk information",
758 for idx in range(constants.MAX_NICS):
759 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
761 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
762 nic_param_name = "nic%d_%s" % (idx, name)
763 if einfo.has_option(constants.INISECT_INS, nic_param_name):
764 v = einfo.get(constants.INISECT_INS, nic_param_name)
771 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
772 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
774 if (self.op.hypervisor is None and
775 einfo.has_option(constants.INISECT_INS, "hypervisor")):
776 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
778 if einfo.has_section(constants.INISECT_HYP):
779 # use the export parameters but do not override the ones
780 # specified by the user
781 for name, value in einfo.items(constants.INISECT_HYP):
782 if name not in self.op.hvparams:
783 self.op.hvparams[name] = value
785 if einfo.has_section(constants.INISECT_BEP):
786 # use the parameters, without overriding
787 for name, value in einfo.items(constants.INISECT_BEP):
788 if name not in self.op.beparams:
789 self.op.beparams[name] = value
790 # Compatibility for the old "memory" be param
791 if name == constants.BE_MEMORY:
792 if constants.BE_MAXMEM not in self.op.beparams:
793 self.op.beparams[constants.BE_MAXMEM] = value
794 if constants.BE_MINMEM not in self.op.beparams:
795 self.op.beparams[constants.BE_MINMEM] = value
797 # try to read the parameters old style, from the main section
798 for name in constants.BES_PARAMETERS:
799 if (name not in self.op.beparams and
800 einfo.has_option(constants.INISECT_INS, name)):
801 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
803 if einfo.has_section(constants.INISECT_OSP):
804 # use the parameters, without overriding
805 for name, value in einfo.items(constants.INISECT_OSP):
806 if name not in self.op.osparams:
807 self.op.osparams[name] = value
809 def _RevertToDefaults(self, cluster):
810 """Revert the instance parameters to the default values.
814 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
815 for name in self.op.hvparams.keys():
816 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
817 del self.op.hvparams[name]
819 be_defs = cluster.SimpleFillBE({})
820 for name in self.op.beparams.keys():
821 if name in be_defs and be_defs[name] == self.op.beparams[name]:
822 del self.op.beparams[name]
824 nic_defs = cluster.SimpleFillNIC({})
825 for nic in self.op.nics:
826 for name in constants.NICS_PARAMETERS:
827 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
830 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
831 for name in self.op.osparams.keys():
832 if name in os_defs and os_defs[name] == self.op.osparams[name]:
833 del self.op.osparams[name]
835 def _CalculateFileStorageDir(self):
836 """Calculate final instance file storage dir.
839 # file storage dir calculation/check
840 self.instance_file_storage_dir = None
841 if self.op.disk_template in constants.DTS_FILEBASED:
842 # build the full file storage dir path
845 if self.op.disk_template == constants.DT_SHARED_FILE:
846 get_fsd_fn = self.cfg.GetSharedFileStorageDir
848 get_fsd_fn = self.cfg.GetFileStorageDir
850 cfg_storagedir = get_fsd_fn()
851 if not cfg_storagedir:
852 raise errors.OpPrereqError("Cluster file storage dir not defined",
854 joinargs.append(cfg_storagedir)
856 if self.op.file_storage_dir is not None:
857 joinargs.append(self.op.file_storage_dir)
859 joinargs.append(self.op.instance_name)
861 # pylint: disable=W0142
862 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
864 def CheckPrereq(self): # pylint: disable=R0914
865 """Check prerequisites.
868 self._CalculateFileStorageDir()
870 if self.op.mode == constants.INSTANCE_IMPORT:
871 export_info = self._ReadExportInfo()
872 self._ReadExportParams(export_info)
873 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
875 self._old_instance_name = None
877 if (not self.cfg.GetVGName() and
878 self.op.disk_template not in constants.DTS_NOT_LVM):
879 raise errors.OpPrereqError("Cluster does not support lvm-based"
880 " instances", errors.ECODE_STATE)
882 if (self.op.hypervisor is None or
883 self.op.hypervisor == constants.VALUE_AUTO):
884 self.op.hypervisor = self.cfg.GetHypervisorType()
886 cluster = self.cfg.GetClusterInfo()
887 enabled_hvs = cluster.enabled_hypervisors
888 if self.op.hypervisor not in enabled_hvs:
889 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
891 (self.op.hypervisor, ",".join(enabled_hvs)),
895 for tag in self.op.tags:
896 objects.TaggableObject.ValidateTag(tag)
898 # check hypervisor parameter syntax (locally)
899 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
900 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
902 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
903 hv_type.CheckParameterSyntax(filled_hvp)
904 self.hv_full = filled_hvp
905 # check that we don't specify global parameters on an instance
906 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
907 "instance", "cluster")
909 # fill and remember the beparams dict
910 self.be_full = _ComputeFullBeParams(self.op, cluster)
912 # build os parameters
913 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
915 # now that hvp/bep are in final format, let's reset to defaults,
917 if self.op.identify_defaults:
918 self._RevertToDefaults(cluster)
921 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
924 # disk checks/pre-build
925 default_vg = self.cfg.GetVGName()
926 self.disks = ComputeDisks(self.op, default_vg)
928 if self.op.mode == constants.INSTANCE_IMPORT:
930 for idx in range(len(self.disks)):
931 option = "disk%d_dump" % idx
932 if export_info.has_option(constants.INISECT_INS, option):
933 # FIXME: are the old os-es, disk sizes, etc. useful?
934 export_name = export_info.get(constants.INISECT_INS, option)
935 image = utils.PathJoin(self.op.src_path, export_name)
936 disk_images.append(image)
938 disk_images.append(False)
940 self.src_images = disk_images
942 if self.op.instance_name == self._old_instance_name:
943 for idx, nic in enumerate(self.nics):
944 if nic.mac == constants.VALUE_AUTO:
945 nic_mac_ini = "nic%d_mac" % idx
946 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
948 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
950 # ip ping checks (we use the same ip that was resolved in ExpandNames)
952 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
953 raise errors.OpPrereqError("IP %s of instance %s already in use" %
954 (self.check_ip, self.op.instance_name),
955 errors.ECODE_NOTUNIQUE)
957 #### mac address generation
958 # By generating here the mac address both the allocator and the hooks get
959 # the real final mac address rather than the 'auto' or 'generate' value.
960 # There is a race condition between the generation and the instance object
961 # creation, which means that we know the mac is valid now, but we're not
962 # sure it will be when we actually add the instance. If things go bad
963 # adding the instance will abort because of a duplicate mac, and the
964 # creation job will fail.
965 for nic in self.nics:
966 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
967 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
971 if self.op.iallocator is not None:
974 # Release all unneeded node locks
975 keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
976 self.op.src_node_uuid])
977 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
978 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
979 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
981 assert (self.owned_locks(locking.LEVEL_NODE) ==
982 self.owned_locks(locking.LEVEL_NODE_RES)), \
983 "Node locks differ from node resource locks"
985 #### node related checks
988 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
989 assert self.pnode is not None, \
990 "Cannot retrieve locked node %s" % self.op.pnode_uuid
992 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
993 pnode.name, errors.ECODE_STATE)
995 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
996 pnode.name, errors.ECODE_STATE)
997 if not pnode.vm_capable:
998 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
999 " '%s'" % pnode.name, errors.ECODE_STATE)
1001 self.secondaries = []
1003 # Fill in any IPs from IP pools. This must happen here, because we need to
1004 # know the nic's primary node, as specified by the iallocator
1005 for idx, nic in enumerate(self.nics):
1006 net_uuid = nic.network
1007 if net_uuid is not None:
1008 nobj = self.cfg.GetNetwork(net_uuid)
1009 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1010 if netparams is None:
1011 raise errors.OpPrereqError("No netparams found for network"
1012 " %s. Probably not connected to"
1013 " node's %s nodegroup" %
1014 (nobj.name, self.pnode.name),
1016 self.LogInfo("NIC/%d inherits netparams %s" %
1017 (idx, netparams.values()))
1018 nic.nicparams = dict(netparams)
1019 if nic.ip is not None:
1020 if nic.ip.lower() == constants.NIC_IP_POOL:
1022 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1023 except errors.ReservationError:
1024 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1025 " from the address pool" % idx,
1027 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1030 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1031 except errors.ReservationError:
1032 raise errors.OpPrereqError("IP address %s already in use"
1033 " or does not belong to network %s" %
1034 (nic.ip, nobj.name),
1035 errors.ECODE_NOTUNIQUE)
1037 # net is None, ip None or given
1038 elif self.op.conflicts_check:
1039 _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1041 # mirror node verification
1042 if self.op.disk_template in constants.DTS_INT_MIRROR:
1043 if self.op.snode_uuid == pnode.uuid:
1044 raise errors.OpPrereqError("The secondary node cannot be the"
1045 " primary node", errors.ECODE_INVAL)
1046 CheckNodeOnline(self, self.op.snode_uuid)
1047 CheckNodeNotDrained(self, self.op.snode_uuid)
1048 CheckNodeVmCapable(self, self.op.snode_uuid)
1049 self.secondaries.append(self.op.snode_uuid)
1051 snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1052 if pnode.group != snode.group:
1053 self.LogWarning("The primary and secondary nodes are in two"
1054 " different node groups; the disk parameters"
1055 " from the first disk's node group will be"
1059 if self.op.disk_template in constants.DTS_INT_MIRROR:
1061 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1062 excl_stor = compat.any(map(has_es, nodes))
1063 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1064 raise errors.OpPrereqError("Disk template %s not supported with"
1065 " exclusive storage" % self.op.disk_template,
1067 for disk in self.disks:
1068 CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1070 node_uuids = [pnode.uuid] + self.secondaries
1072 if not self.adopt_disks:
1073 if self.op.disk_template == constants.DT_RBD:
1074 # _CheckRADOSFreeSpace() is just a placeholder.
1075 # Any function that checks prerequisites can be placed here.
1076 # Check if there is enough space on the RADOS cluster.
1077 CheckRADOSFreeSpace()
1078 elif self.op.disk_template == constants.DT_EXT:
1079 # FIXME: Function that checks prereqs if needed
1081 elif self.op.disk_template in utils.GetLvmDiskTemplates():
1082 # Check lv size requirements, if not adopting
1083 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1084 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1086 # FIXME: add checks for other, non-adopting, non-lvm disk templates
1089 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1090 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1091 disk[constants.IDISK_ADOPT])
1092 for disk in self.disks])
1093 if len(all_lvs) != len(self.disks):
1094 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1096 for lv_name in all_lvs:
1098 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1099 # to ReserveLV uses the same syntax
1100 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1101 except errors.ReservationError:
1102 raise errors.OpPrereqError("LV named %s used by another instance" %
1103 lv_name, errors.ECODE_NOTUNIQUE)
1105 vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1106 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1108 node_lvs = self.rpc.call_lv_list([pnode.uuid],
1109 vg_names.payload.keys())[pnode.uuid]
1110 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1111 node_lvs = node_lvs.payload
1113 delta = all_lvs.difference(node_lvs.keys())
1115 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1116 utils.CommaJoin(delta),
1118 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1120 raise errors.OpPrereqError("Online logical volumes found, cannot"
1121 " adopt: %s" % utils.CommaJoin(online_lvs),
1123 # update the size of disk based on what is found
1124 for dsk in self.disks:
1125 dsk[constants.IDISK_SIZE] = \
1126 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1127 dsk[constants.IDISK_ADOPT])][0]))
1129 elif self.op.disk_template == constants.DT_BLOCK:
1130 # Normalize and de-duplicate device paths
1131 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1132 for disk in self.disks])
1133 if len(all_disks) != len(self.disks):
1134 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1136 baddisks = [d for d in all_disks
1137 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1139 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1140 " cannot be adopted" %
1141 (utils.CommaJoin(baddisks),
1142 constants.ADOPTABLE_BLOCKDEV_ROOT),
1145 node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1146 list(all_disks))[pnode.uuid]
1147 node_disks.Raise("Cannot get block device information from node %s" %
1149 node_disks = node_disks.payload
1150 delta = all_disks.difference(node_disks.keys())
1152 raise errors.OpPrereqError("Missing block device(s): %s" %
1153 utils.CommaJoin(delta),
1155 for dsk in self.disks:
1156 dsk[constants.IDISK_SIZE] = \
1157 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1159 # Verify instance specs
1160 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1162 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1163 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1164 constants.ISPEC_DISK_COUNT: len(self.disks),
1165 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1166 for disk in self.disks],
1167 constants.ISPEC_NIC_COUNT: len(self.nics),
1168 constants.ISPEC_SPINDLE_USE: spindle_use,
1171 group_info = self.cfg.GetNodeGroup(pnode.group)
1172 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1173 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1174 self.op.disk_template)
1175 if not self.op.ignore_ipolicy and res:
1176 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1177 (pnode.group, group_info.name, utils.CommaJoin(res)))
1178 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1180 CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1182 CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1183 # check OS parameters (remotely)
1184 CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1186 CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1188 #TODO: _CheckExtParams (remotely)
1189 # Check parameters for extstorage
1191 # memory check on primary node
1192 #TODO(dynmem): use MINMEM for checking
1194 hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1196 CheckNodeFreeMemory(self, self.pnode.uuid,
1197 "creating instance %s" % self.op.instance_name,
1198 self.be_full[constants.BE_MAXMEM],
1199 self.op.hypervisor, hvfull)
1201 self.dry_run_result = list(node_uuids)
1203 def Exec(self, feedback_fn):
1204 """Create and add the instance to the cluster.
1207 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1208 self.owned_locks(locking.LEVEL_NODE)), \
1209 "Node locks differ from node resource locks"
1210 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1212 ht_kind = self.op.hypervisor
1213 if ht_kind in constants.HTS_REQ_PORT:
1214 network_port = self.cfg.AllocatePort()
1218 instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1220 # This is ugly but we got a chicken-egg problem here
1221 # We can only take the group disk parameters, as the instance
1222 # has no disks yet (we are generating them right here).
1223 nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1224 disks = GenerateDiskTemplate(self,
1225 self.op.disk_template,
1226 instance_uuid, self.pnode.uuid,
1229 self.instance_file_storage_dir,
1230 self.op.file_driver,
1233 self.cfg.GetGroupDiskParams(nodegroup))
1235 iobj = objects.Instance(name=self.op.instance_name,
1238 primary_node=self.pnode.uuid,
1239 nics=self.nics, disks=disks,
1240 disk_template=self.op.disk_template,
1242 admin_state=constants.ADMINST_DOWN,
1243 network_port=network_port,
1244 beparams=self.op.beparams,
1245 hvparams=self.op.hvparams,
1246 hypervisor=self.op.hypervisor,
1247 osparams=self.op.osparams,
1251 for tag in self.op.tags:
1254 if self.adopt_disks:
1255 if self.op.disk_template == constants.DT_PLAIN:
1256 # rename LVs to the newly-generated names; we need to construct
1257 # 'fake' LV disks with the old data, plus the new unique_id
1258 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1260 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1261 rename_to.append(t_dsk.logical_id)
1262 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1263 self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1264 result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1265 zip(tmp_disks, rename_to))
1266 result.Raise("Failed to rename adoped LVs")
1268 feedback_fn("* creating instance disks...")
1270 CreateDisks(self, iobj)
1271 except errors.OpExecError:
1272 self.LogWarning("Device creation failed")
1273 self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1276 feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1278 self.cfg.AddInstance(iobj, self.proc.GetECId())
1280 # Declare that we don't want to remove the instance lock anymore, as we've
1281 # added the instance to the config
1282 del self.remove_locks[locking.LEVEL_INSTANCE]
1284 if self.op.mode == constants.INSTANCE_IMPORT:
1285 # Release unused nodes
1286 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1289 ReleaseLocks(self, locking.LEVEL_NODE)
1292 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1293 feedback_fn("* wiping instance disks...")
1295 WipeDisks(self, iobj)
1296 except errors.OpExecError, err:
1297 logging.exception("Wiping disks failed")
1298 self.LogWarning("Wiping instance disks failed (%s)", err)
1302 # Something is already wrong with the disks, don't do anything else
1304 elif self.op.wait_for_sync:
1305 disk_abort = not WaitForSync(self, iobj)
1306 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1307 # make sure the disks are not degraded (still sync-ing is ok)
1308 feedback_fn("* checking mirrors status")
1309 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1314 RemoveDisks(self, iobj)
1315 self.cfg.RemoveInstance(iobj.uuid)
1316 # Make sure the instance lock gets removed
1317 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1318 raise errors.OpExecError("There are some degraded disks for"
1321 # instance disks are now active
1322 iobj.disks_active = True
1324 # Release all node resource locks
1325 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1327 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1328 # we need to set the disks ID to the primary node, since the
1329 # preceding code might or might have not done it, depending on
1330 # disk template and other options
1331 for disk in iobj.disks:
1332 self.cfg.SetDiskID(disk, self.pnode.uuid)
1333 if self.op.mode == constants.INSTANCE_CREATE:
1334 if not self.op.no_install:
1335 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1336 not self.op.wait_for_sync)
1338 feedback_fn("* pausing disk sync to install instance OS")
1339 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1342 for idx, success in enumerate(result.payload):
1344 logging.warn("pause-sync of instance %s for disk %d failed",
1345 self.op.instance_name, idx)
1347 feedback_fn("* running the instance OS create scripts...")
1348 # FIXME: pass debug option from opcode to backend
1350 self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1351 self.op.debug_level)
1353 feedback_fn("* resuming disk sync")
1354 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1357 for idx, success in enumerate(result.payload):
1359 logging.warn("resume-sync of instance %s for disk %d failed",
1360 self.op.instance_name, idx)
1362 os_add_result.Raise("Could not add os for instance %s"
1363 " on node %s" % (self.op.instance_name,
1367 if self.op.mode == constants.INSTANCE_IMPORT:
1368 feedback_fn("* running the instance OS import scripts...")
1372 for idx, image in enumerate(self.src_images):
1376 # FIXME: pass debug option from opcode to backend
1377 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1378 constants.IEIO_FILE, (image, ),
1379 constants.IEIO_SCRIPT,
1380 (iobj.disks[idx], idx),
1382 transfers.append(dt)
1385 masterd.instance.TransferInstanceData(self, feedback_fn,
1386 self.op.src_node_uuid,
1388 self.pnode.secondary_ip,
1390 if not compat.all(import_result):
1391 self.LogWarning("Some disks for instance %s on node %s were not"
1392 " imported successfully" % (self.op.instance_name,
1395 rename_from = self._old_instance_name
1397 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1398 feedback_fn("* preparing remote import...")
1399 # The source cluster will stop the instance before attempting to make
1400 # a connection. In some cases stopping an instance can take a long
1401 # time, hence the shutdown timeout is added to the connection
1403 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1404 self.op.source_shutdown_timeout)
1405 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1407 assert iobj.primary_node == self.pnode.uuid
1409 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1410 self.source_x509_ca,
1411 self._cds, timeouts)
1412 if not compat.all(disk_results):
1413 # TODO: Should the instance still be started, even if some disks
1414 # failed to import (valid for local imports, too)?
1415 self.LogWarning("Some disks for instance %s on node %s were not"
1416 " imported successfully" % (self.op.instance_name,
1419 rename_from = self.source_instance_name
1422 # also checked in the prereq part
1423 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1426 # Run rename script on newly imported instance
1427 assert iobj.name == self.op.instance_name
1428 feedback_fn("Running rename script for %s" % self.op.instance_name)
1429 result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1431 self.op.debug_level)
1432 result.Warn("Failed to run rename script for %s on node %s" %
1433 (self.op.instance_name, self.pnode.name), self.LogWarning)
1435 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1438 iobj.admin_state = constants.ADMINST_UP
1439 self.cfg.Update(iobj, feedback_fn)
1440 logging.info("Starting instance %s on node %s", self.op.instance_name,
1442 feedback_fn("* starting instance...")
1443 result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1444 False, self.op.reason)
1445 result.Raise("Could not start instance")
1447 return list(iobj.all_nodes)
1450 class LUInstanceRename(LogicalUnit):
1451 """Rename an instance.
1454 HPATH = "instance-rename"
1455 HTYPE = constants.HTYPE_INSTANCE
1457 def CheckArguments(self):
1461 if self.op.ip_check and not self.op.name_check:
1462 # TODO: make the ip check more flexible and not depend on the name check
1463 raise errors.OpPrereqError("IP address check requires a name check",
1466 def BuildHooksEnv(self):
1469 This runs on master, primary and secondary nodes of the instance.
1472 env = BuildInstanceHookEnvByObject(self, self.instance)
1473 env["INSTANCE_NEW_NAME"] = self.op.new_name
1476 def BuildHooksNodes(self):
1477 """Build hooks nodes.
1480 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1483 def CheckPrereq(self):
1484 """Check prerequisites.
1486 This checks that the instance is in the cluster and is not running.
1489 (self.op.instance_uuid, self.op.instance_name) = \
1490 ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1491 self.op.instance_name)
1492 instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1493 assert instance is not None
1495 # It should actually not happen that an instance is running with a disabled
1496 # disk template, but in case it does, the renaming of file-based instances
1497 # will fail horribly. Thus, we test it before.
1498 if (instance.disk_template in constants.DTS_FILEBASED and
1499 self.op.new_name != instance.name):
1500 CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1501 instance.disk_template)
1503 CheckNodeOnline(self, instance.primary_node)
1504 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1505 msg="cannot rename")
1506 self.instance = instance
1508 new_name = self.op.new_name
1509 if self.op.name_check:
1510 hostname = _CheckHostnameSane(self, new_name)
1511 new_name = self.op.new_name = hostname.name
1512 if (self.op.ip_check and
1513 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1514 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1515 (hostname.ip, new_name),
1516 errors.ECODE_NOTUNIQUE)
1518 instance_names = [inst.name for
1519 inst in self.cfg.GetAllInstancesInfo().values()]
1520 if new_name in instance_names and new_name != instance.name:
1521 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1522 new_name, errors.ECODE_EXISTS)
1524 def Exec(self, feedback_fn):
1525 """Rename the instance.
1528 old_name = self.instance.name
1530 rename_file_storage = False
1531 if (self.instance.disk_template in constants.DTS_FILEBASED and
1532 self.op.new_name != self.instance.name):
1533 old_file_storage_dir = os.path.dirname(
1534 self.instance.disks[0].logical_id[1])
1535 rename_file_storage = True
1537 self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1538 # Change the instance lock. This is definitely safe while we hold the BGL.
1539 # Otherwise the new lock would have to be added in acquired mode.
1541 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1542 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1543 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1545 # re-read the instance from the configuration after rename
1546 renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1548 if rename_file_storage:
1549 new_file_storage_dir = os.path.dirname(
1550 renamed_inst.disks[0].logical_id[1])
1551 result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1552 old_file_storage_dir,
1553 new_file_storage_dir)
1554 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1555 " (but the instance has been renamed in Ganeti)" %
1556 (self.cfg.GetNodeName(renamed_inst.primary_node),
1557 old_file_storage_dir, new_file_storage_dir))
1559 StartInstanceDisks(self, renamed_inst, None)
1560 # update info on disks
1561 info = GetInstanceInfoText(renamed_inst)
1562 for (idx, disk) in enumerate(renamed_inst.disks):
1563 for node_uuid in renamed_inst.all_nodes:
1564 self.cfg.SetDiskID(disk, node_uuid)
1565 result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1566 result.Warn("Error setting info on node %s for disk %s" %
1567 (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1569 result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1570 renamed_inst, old_name,
1571 self.op.debug_level)
1572 result.Warn("Could not run OS rename script for instance %s on node %s"
1573 " (but the instance has been renamed in Ganeti)" %
1575 self.cfg.GetNodeName(renamed_inst.primary_node)),
1578 ShutdownInstanceDisks(self, renamed_inst)
1580 return renamed_inst.name
1583 class LUInstanceRemove(LogicalUnit):
1584 """Remove an instance.
1587 HPATH = "instance-remove"
1588 HTYPE = constants.HTYPE_INSTANCE
1591 def ExpandNames(self):
1592 self._ExpandAndLockInstance()
1593 self.needed_locks[locking.LEVEL_NODE] = []
1594 self.needed_locks[locking.LEVEL_NODE_RES] = []
1595 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1597 def DeclareLocks(self, level):
1598 if level == locking.LEVEL_NODE:
1599 self._LockInstancesNodes()
1600 elif level == locking.LEVEL_NODE_RES:
1602 self.needed_locks[locking.LEVEL_NODE_RES] = \
1603 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1605 def BuildHooksEnv(self):
1608 This runs on master, primary and secondary nodes of the instance.
1611 env = BuildInstanceHookEnvByObject(self, self.instance)
1612 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1615 def BuildHooksNodes(self):
1616 """Build hooks nodes.
1619 nl = [self.cfg.GetMasterNode()]
1620 nl_post = list(self.instance.all_nodes) + nl
1621 return (nl, nl_post)
1623 def CheckPrereq(self):
1624 """Check prerequisites.
1626 This checks that the instance is in the cluster.
1629 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1630 assert self.instance is not None, \
1631 "Cannot retrieve locked instance %s" % self.op.instance_name
1633 def Exec(self, feedback_fn):
1634 """Remove the instance.
1637 logging.info("Shutting down instance %s on node %s", self.instance.name,
1638 self.cfg.GetNodeName(self.instance.primary_node))
1640 result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1642 self.op.shutdown_timeout,
1644 if self.op.ignore_failures:
1645 result.Warn("Warning: can't shutdown instance", feedback_fn)
1647 result.Raise("Could not shutdown instance %s on node %s" %
1648 (self.instance.name,
1649 self.cfg.GetNodeName(self.instance.primary_node)))
1651 assert (self.owned_locks(locking.LEVEL_NODE) ==
1652 self.owned_locks(locking.LEVEL_NODE_RES))
1653 assert not (set(self.instance.all_nodes) -
1654 self.owned_locks(locking.LEVEL_NODE)), \
1655 "Not owning correct locks"
1657 RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1660 class LUInstanceMove(LogicalUnit):
1661 """Move an instance by data-copying.
1664 HPATH = "instance-move"
1665 HTYPE = constants.HTYPE_INSTANCE
1668 def ExpandNames(self):
1669 self._ExpandAndLockInstance()
1670 (self.op.target_node_uuid, self.op.target_node) = \
1671 ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1672 self.op.target_node)
1673 self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1674 self.needed_locks[locking.LEVEL_NODE_RES] = []
1675 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1677 def DeclareLocks(self, level):
1678 if level == locking.LEVEL_NODE:
1679 self._LockInstancesNodes(primary_only=True)
1680 elif level == locking.LEVEL_NODE_RES:
1682 self.needed_locks[locking.LEVEL_NODE_RES] = \
1683 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1685 def BuildHooksEnv(self):
1688 This runs on master, primary and secondary nodes of the instance.
1692 "TARGET_NODE": self.op.target_node,
1693 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1695 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1698 def BuildHooksNodes(self):
1699 """Build hooks nodes.
1703 self.cfg.GetMasterNode(),
1704 self.instance.primary_node,
1705 self.op.target_node_uuid,
1709 def CheckPrereq(self):
1710 """Check prerequisites.
1712 This checks that the instance is in the cluster.
1715 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1716 assert self.instance is not None, \
1717 "Cannot retrieve locked instance %s" % self.op.instance_name
1719 if self.instance.disk_template not in constants.DTS_COPYABLE:
1720 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1721 self.instance.disk_template,
1724 target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1725 assert target_node is not None, \
1726 "Cannot retrieve locked node %s" % self.op.target_node
1728 self.target_node_uuid = target_node.uuid
1729 if target_node.uuid == self.instance.primary_node:
1730 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1731 (self.instance.name, target_node.name),
1734 bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1736 for idx, dsk in enumerate(self.instance.disks):
1737 if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1738 constants.DT_SHARED_FILE):
1739 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1740 " cannot copy" % idx, errors.ECODE_STATE)
1742 CheckNodeOnline(self, target_node.uuid)
1743 CheckNodeNotDrained(self, target_node.uuid)
1744 CheckNodeVmCapable(self, target_node.uuid)
1745 cluster = self.cfg.GetClusterInfo()
1746 group_info = self.cfg.GetNodeGroup(target_node.group)
1747 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1748 CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1749 ignore=self.op.ignore_ipolicy)
1751 if self.instance.admin_state == constants.ADMINST_UP:
1752 # check memory requirements on the secondary node
1753 CheckNodeFreeMemory(
1754 self, target_node.uuid, "failing over instance %s" %
1755 self.instance.name, bep[constants.BE_MAXMEM],
1756 self.instance.hypervisor,
1757 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1759 self.LogInfo("Not checking memory on the secondary node as"
1760 " instance will not be started")
1762 # check bridge existance
1763 CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1765 def Exec(self, feedback_fn):
1766 """Move an instance.
1768 The move is done by shutting it down on its present node, copying
1769 the data over (slow) and starting it on the new node.
1772 source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1773 target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1775 self.LogInfo("Shutting down instance %s on source node %s",
1776 self.instance.name, source_node.name)
1778 assert (self.owned_locks(locking.LEVEL_NODE) ==
1779 self.owned_locks(locking.LEVEL_NODE_RES))
1781 result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1782 self.op.shutdown_timeout,
1784 if self.op.ignore_consistency:
1785 result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1786 " anyway. Please make sure node %s is down. Error details" %
1787 (self.instance.name, source_node.name, source_node.name),
1790 result.Raise("Could not shutdown instance %s on node %s" %
1791 (self.instance.name, source_node.name))
1793 # create the target disks
1795 CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1796 except errors.OpExecError:
1797 self.LogWarning("Device creation failed")
1798 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1801 cluster_name = self.cfg.GetClusterInfo().cluster_name
1804 # activate, get path, copy the data over
1805 for idx, disk in enumerate(self.instance.disks):
1806 self.LogInfo("Copying data for disk %d", idx)
1807 result = self.rpc.call_blockdev_assemble(
1808 target_node.uuid, (disk, self.instance), self.instance.name,
1811 self.LogWarning("Can't assemble newly created disk %d: %s",
1812 idx, result.fail_msg)
1813 errs.append(result.fail_msg)
1815 dev_path = result.payload
1816 result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1818 target_node.name, dev_path,
1821 self.LogWarning("Can't copy data over for disk %d: %s",
1822 idx, result.fail_msg)
1823 errs.append(result.fail_msg)
1827 self.LogWarning("Some disks failed to copy, aborting")
1829 RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1831 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1832 raise errors.OpExecError("Errors during disk copy: %s" %
1835 self.instance.primary_node = target_node.uuid
1836 self.cfg.Update(self.instance, feedback_fn)
1838 self.LogInfo("Removing the disks on the original node")
1839 RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1841 # Only start the instance if it's marked as up
1842 if self.instance.admin_state == constants.ADMINST_UP:
1843 self.LogInfo("Starting instance %s on node %s",
1844 self.instance.name, target_node.name)
1846 disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1847 ignore_secondaries=True)
1849 ShutdownInstanceDisks(self, self.instance)
1850 raise errors.OpExecError("Can't activate the instance's disks")
1852 result = self.rpc.call_instance_start(target_node.uuid,
1853 (self.instance, None, None), False,
1855 msg = result.fail_msg
1857 ShutdownInstanceDisks(self, self.instance)
1858 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1859 (self.instance.name, target_node.name, msg))
1862 class LUInstanceMultiAlloc(NoHooksLU):
1863 """Allocates multiple instances at the same time.
1868 def CheckArguments(self):
1873 for inst in self.op.instances:
1874 if inst.iallocator is not None:
1875 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1876 " instance objects", errors.ECODE_INVAL)
1877 nodes.append(bool(inst.pnode))
1878 if inst.disk_template in constants.DTS_INT_MIRROR:
1879 nodes.append(bool(inst.snode))
1881 has_nodes = compat.any(nodes)
1882 if compat.all(nodes) ^ has_nodes:
1883 raise errors.OpPrereqError("There are instance objects providing"
1884 " pnode/snode while others do not",
1887 if not has_nodes and self.op.iallocator is None:
1888 default_iallocator = self.cfg.GetDefaultIAllocator()
1889 if default_iallocator:
1890 self.op.iallocator = default_iallocator
1892 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1893 " given and no cluster-wide default"
1894 " iallocator found; please specify either"
1895 " an iallocator or nodes on the instances"
1896 " or set a cluster-wide default iallocator",
1899 _CheckOpportunisticLocking(self.op)
1901 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1903 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1904 utils.CommaJoin(dups), errors.ECODE_INVAL)
1906 def ExpandNames(self):
1907 """Calculate the locks.
1910 self.share_locks = ShareAll()
1911 self.needed_locks = {
1912 # iallocator will select nodes and even if no iallocator is used,
1913 # collisions with LUInstanceCreate should be avoided
1914 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1917 if self.op.iallocator:
1918 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1919 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1921 if self.op.opportunistic_locking:
1922 self.opportunistic_locks[locking.LEVEL_NODE] = True
1923 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1926 for inst in self.op.instances:
1927 (inst.pnode_uuid, inst.pnode) = \
1928 ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1929 nodeslist.append(inst.pnode_uuid)
1930 if inst.snode is not None:
1931 (inst.snode_uuid, inst.snode) = \
1932 ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1933 nodeslist.append(inst.snode_uuid)
1935 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1936 # Lock resources of instance's primary and secondary nodes (copy to
1937 # prevent accidential modification)
1938 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1940 def CheckPrereq(self):
1941 """Check prerequisite.
1944 if self.op.iallocator:
1945 cluster = self.cfg.GetClusterInfo()
1946 default_vg = self.cfg.GetVGName()
1947 ec_id = self.proc.GetECId()
1949 if self.op.opportunistic_locking:
1950 # Only consider nodes for which a lock is held
1951 node_whitelist = self.cfg.GetNodeNames(
1952 list(self.owned_locks(locking.LEVEL_NODE)))
1954 node_whitelist = None
1956 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1957 _ComputeNics(op, cluster, None,
1959 _ComputeFullBeParams(op, cluster),
1961 for op in self.op.instances]
1963 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1964 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1966 ial.Run(self.op.iallocator)
1969 raise errors.OpPrereqError("Can't compute nodes using"
1970 " iallocator '%s': %s" %
1971 (self.op.iallocator, ial.info),
1974 self.ia_result = ial.result
1977 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1978 constants.JOB_IDS_KEY: [],
1981 def _ConstructPartialResult(self):
1982 """Contructs the partial result.
1985 if self.op.iallocator:
1986 (allocatable, failed_insts) = self.ia_result
1987 allocatable_insts = map(compat.fst, allocatable)
1989 allocatable_insts = [op.instance_name for op in self.op.instances]
1993 constants.ALLOCATABLE_KEY: allocatable_insts,
1994 constants.FAILED_KEY: failed_insts,
1997 def Exec(self, feedback_fn):
1998 """Executes the opcode.
2002 if self.op.iallocator:
2003 op2inst = dict((op.instance_name, op) for op in self.op.instances)
2004 (allocatable, failed) = self.ia_result
2006 for (name, node_names) in allocatable:
2007 op = op2inst.pop(name)
2009 (op.pnode_uuid, op.pnode) = \
2010 ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2011 if len(node_names) > 1:
2012 (op.snode_uuid, op.snode) = \
2013 ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2017 missing = set(op2inst.keys()) - set(failed)
2018 assert not missing, \
2019 "Iallocator did return incomplete result: %s" % \
2020 utils.CommaJoin(missing)
2022 jobs.extend([op] for op in self.op.instances)
2024 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2027 class _InstNicModPrivate:
2028 """Data structure for network interface modifications.
2030 Used by L{LUInstanceSetParams}.
2038 def _PrepareContainerMods(mods, private_fn):
2039 """Prepares a list of container modifications by adding a private data field.
2041 @type mods: list of tuples; (operation, index, parameters)
2042 @param mods: List of modifications
2043 @type private_fn: callable or None
2044 @param private_fn: Callable for constructing a private data field for a
2049 if private_fn is None:
2054 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2057 def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2058 """Checks if nodes have enough physical CPUs
2060 This function checks if all given nodes have the needed number of
2061 physical CPUs. In case any node has less CPUs or we cannot get the
2062 information from the node, this function raises an OpPrereqError
2065 @type lu: C{LogicalUnit}
2066 @param lu: a logical unit from which we get configuration data
2067 @type node_uuids: C{list}
2068 @param node_uuids: the list of node UUIDs to check
2069 @type requested: C{int}
2070 @param requested: the minimum acceptable number of physical CPUs
2071 @type hypervisor_specs: list of pairs (string, dict of strings)
2072 @param hypervisor_specs: list of hypervisor specifications in
2073 pairs (hypervisor_name, hvparams)
2074 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2075 or we cannot check the node
2078 nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2079 for node_uuid in node_uuids:
2080 info = nodeinfo[node_uuid]
2081 node_name = lu.cfg.GetNodeName(node_uuid)
2082 info.Raise("Cannot get current information from node %s" % node_name,
2083 prereq=True, ecode=errors.ECODE_ENVIRON)
2084 (_, _, (hv_info, )) = info.payload
2085 num_cpus = hv_info.get("cpu_total", None)
2086 if not isinstance(num_cpus, int):
2087 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2088 " on node %s, result was '%s'" %
2089 (node_name, num_cpus), errors.ECODE_ENVIRON)
2090 if requested > num_cpus:
2091 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2092 "required" % (node_name, num_cpus, requested),
2096 def GetItemFromContainer(identifier, kind, container):
2097 """Return the item refered by the identifier.
2099 @type identifier: string
2100 @param identifier: Item index or name or UUID
2102 @param kind: One-word item description
2103 @type container: list
2104 @param container: Container to get the item from
2109 idx = int(identifier)
2112 absidx = len(container) - 1
2114 raise IndexError("Not accepting negative indices other than -1")
2115 elif idx > len(container):
2116 raise IndexError("Got %s index %s, but there are only %s" %
2117 (kind, idx, len(container)))
2120 return (absidx, container[idx])
2124 for idx, item in enumerate(container):
2125 if item.uuid == identifier or item.name == identifier:
2128 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2129 (kind, identifier), errors.ECODE_NOENT)
2132 def _ApplyContainerMods(kind, container, chgdesc, mods,
2133 create_fn, modify_fn, remove_fn):
2134 """Applies descriptions in C{mods} to C{container}.
2137 @param kind: One-word item description
2138 @type container: list
2139 @param container: Container to modify
2140 @type chgdesc: None or list
2141 @param chgdesc: List of applied changes
2143 @param mods: Modifications as returned by L{_PrepareContainerMods}
2144 @type create_fn: callable
2145 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2146 receives absolute item index, parameters and private data object as added
2147 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2149 @type modify_fn: callable
2150 @param modify_fn: Callback for modifying an existing item
2151 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2152 and private data object as added by L{_PrepareContainerMods}, returns
2154 @type remove_fn: callable
2155 @param remove_fn: Callback on removing item; receives absolute item index,
2156 item and private data object as added by L{_PrepareContainerMods}
2159 for (op, identifier, params, private) in mods:
2162 if op == constants.DDM_ADD:
2163 # Calculate where item will be added
2164 # When adding an item, identifier can only be an index
2166 idx = int(identifier)
2168 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2169 " identifier for %s" % constants.DDM_ADD,
2172 addidx = len(container)
2175 raise IndexError("Not accepting negative indices other than -1")
2176 elif idx > len(container):
2177 raise IndexError("Got %s index %s, but there are only %s" %
2178 (kind, idx, len(container)))
2181 if create_fn is None:
2184 (item, changes) = create_fn(addidx, params, private)
2187 container.append(item)
2190 assert idx <= len(container)
2191 # list.insert does so before the specified index
2192 container.insert(idx, item)
2194 # Retrieve existing item
2195 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2197 if op == constants.DDM_REMOVE:
2200 if remove_fn is not None:
2201 remove_fn(absidx, item, private)
2203 changes = [("%s/%s" % (kind, absidx), "remove")]
2205 assert container[absidx] == item
2206 del container[absidx]
2207 elif op == constants.DDM_MODIFY:
2208 if modify_fn is not None:
2209 changes = modify_fn(absidx, item, params, private)
2211 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2213 assert _TApplyContModsCbChanges(changes)
2215 if not (chgdesc is None or changes is None):
2216 chgdesc.extend(changes)
2219 def _UpdateIvNames(base_index, disks):
2220 """Updates the C{iv_name} attribute of disks.
2222 @type disks: list of L{objects.Disk}
2225 for (idx, disk) in enumerate(disks):
2226 disk.iv_name = "disk/%s" % (base_index + idx, )
2229 class LUInstanceSetParams(LogicalUnit):
2230 """Modifies an instances's parameters.
2233 HPATH = "instance-modify"
2234 HTYPE = constants.HTYPE_INSTANCE
2238 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2239 assert ht.TList(mods)
2240 assert not mods or len(mods[0]) in (2, 3)
2242 if mods and len(mods[0]) == 2:
2246 for op, params in mods:
2247 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2248 result.append((op, -1, params))
2252 raise errors.OpPrereqError("Only one %s add or remove operation is"
2253 " supported at a time" % kind,
2256 result.append((constants.DDM_MODIFY, op, params))
2258 assert verify_fn(result)
2265 def _CheckMods(kind, mods, key_types, item_fn):
2266 """Ensures requested disk/NIC modifications are valid.
2269 for (op, _, params) in mods:
2270 assert ht.TDict(params)
2272 # If 'key_types' is an empty dict, we assume we have an
2273 # 'ext' template and thus do not ForceDictType
2275 utils.ForceDictType(params, key_types)
2277 if op == constants.DDM_REMOVE:
2279 raise errors.OpPrereqError("No settings should be passed when"
2280 " removing a %s" % kind,
2282 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2285 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2288 def _VerifyDiskModification(op, params, excl_stor):
2289 """Verifies a disk modification.
2292 if op == constants.DDM_ADD:
2293 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2294 if mode not in constants.DISK_ACCESS_SET:
2295 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2298 size = params.get(constants.IDISK_SIZE, None)
2300 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2301 constants.IDISK_SIZE, errors.ECODE_INVAL)
2304 params[constants.IDISK_SIZE] = size
2305 name = params.get(constants.IDISK_NAME, None)
2306 if name is not None and name.lower() == constants.VALUE_NONE:
2307 params[constants.IDISK_NAME] = None
2309 CheckSpindlesExclusiveStorage(params, excl_stor, True)
2311 elif op == constants.DDM_MODIFY:
2312 if constants.IDISK_SIZE in params:
2313 raise errors.OpPrereqError("Disk size change not possible, use"
2314 " grow-disk", errors.ECODE_INVAL)
2316 raise errors.OpPrereqError("Disk modification doesn't support"
2317 " additional arbitrary parameters",
2319 name = params.get(constants.IDISK_NAME, None)
2320 if name is not None and name.lower() == constants.VALUE_NONE:
2321 params[constants.IDISK_NAME] = None
2324 def _VerifyNicModification(op, params):
2325 """Verifies a network interface modification.
2328 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2329 ip = params.get(constants.INIC_IP, None)
2330 name = params.get(constants.INIC_NAME, None)
2331 req_net = params.get(constants.INIC_NETWORK, None)
2332 link = params.get(constants.NIC_LINK, None)
2333 mode = params.get(constants.NIC_MODE, None)
2334 if name is not None and name.lower() == constants.VALUE_NONE:
2335 params[constants.INIC_NAME] = None
2336 if req_net is not None:
2337 if req_net.lower() == constants.VALUE_NONE:
2338 params[constants.INIC_NETWORK] = None
2340 elif link is not None or mode is not None:
2341 raise errors.OpPrereqError("If network is given"
2342 " mode or link should not",
2345 if op == constants.DDM_ADD:
2346 macaddr = params.get(constants.INIC_MAC, None)
2348 params[constants.INIC_MAC] = constants.VALUE_AUTO
2351 if ip.lower() == constants.VALUE_NONE:
2352 params[constants.INIC_IP] = None
2354 if ip.lower() == constants.NIC_IP_POOL:
2355 if op == constants.DDM_ADD and req_net is None:
2356 raise errors.OpPrereqError("If ip=pool, parameter network"
2360 if not netutils.IPAddress.IsValid(ip):
2361 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2364 if constants.INIC_MAC in params:
2365 macaddr = params[constants.INIC_MAC]
2366 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2367 macaddr = utils.NormalizeAndValidateMac(macaddr)
2369 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2370 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2371 " modifying an existing NIC",
2374 def CheckArguments(self):
2375 if not (self.op.nics or self.op.disks or self.op.disk_template or
2376 self.op.hvparams or self.op.beparams or self.op.os_name or
2377 self.op.osparams or self.op.offline is not None or
2378 self.op.runtime_mem or self.op.pnode):
2379 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2381 if self.op.hvparams:
2382 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2383 "hypervisor", "instance", "cluster")
2385 self.op.disks = self._UpgradeDiskNicMods(
2386 "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2387 self.op.nics = self._UpgradeDiskNicMods(
2388 "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2390 if self.op.disks and self.op.disk_template is not None:
2391 raise errors.OpPrereqError("Disk template conversion and other disk"
2392 " changes not supported at the same time",
2395 if (self.op.disk_template and
2396 self.op.disk_template in constants.DTS_INT_MIRROR and
2397 self.op.remote_node is None):
2398 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2399 " one requires specifying a secondary node",
2402 # Check NIC modifications
2403 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2404 self._VerifyNicModification)
2407 (self.op.pnode_uuid, self.op.pnode) = \
2408 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2410 def ExpandNames(self):
2411 self._ExpandAndLockInstance()
2412 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2413 # Can't even acquire node locks in shared mode as upcoming changes in
2414 # Ganeti 2.6 will start to modify the node object on disk conversion
2415 self.needed_locks[locking.LEVEL_NODE] = []
2416 self.needed_locks[locking.LEVEL_NODE_RES] = []
2417 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2418 # Look node group to look up the ipolicy
2419 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2421 def DeclareLocks(self, level):
2422 if level == locking.LEVEL_NODEGROUP:
2423 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2424 # Acquire locks for the instance's nodegroups optimistically. Needs
2425 # to be verified in CheckPrereq
2426 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2427 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2428 elif level == locking.LEVEL_NODE:
2429 self._LockInstancesNodes()
2430 if self.op.disk_template and self.op.remote_node:
2431 (self.op.remote_node_uuid, self.op.remote_node) = \
2432 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2433 self.op.remote_node)
2434 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2435 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2437 self.needed_locks[locking.LEVEL_NODE_RES] = \
2438 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2440 def BuildHooksEnv(self):
2443 This runs on the master, primary and secondaries.
2447 if constants.BE_MINMEM in self.be_new:
2448 args["minmem"] = self.be_new[constants.BE_MINMEM]
2449 if constants.BE_MAXMEM in self.be_new:
2450 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2451 if constants.BE_VCPUS in self.be_new:
2452 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2453 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2454 # information at all.
2456 if self._new_nics is not None:
2459 for nic in self._new_nics:
2460 n = copy.deepcopy(nic)
2461 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2462 n.nicparams = nicparams
2463 nics.append(NICToTuple(self, n))
2467 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2468 if self.op.disk_template:
2469 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2470 if self.op.runtime_mem:
2471 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2475 def BuildHooksNodes(self):
2476 """Build hooks nodes.
2479 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2482 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2483 old_params, cluster, pnode_uuid):
2485 update_params_dict = dict([(key, params[key])
2486 for key in constants.NICS_PARAMETERS
2489 req_link = update_params_dict.get(constants.NIC_LINK, None)
2490 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2493 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2494 if new_net_uuid_or_name:
2495 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2496 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2499 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2502 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2504 raise errors.OpPrereqError("No netparams found for the network"
2505 " %s, probably not connected" %
2506 new_net_obj.name, errors.ECODE_INVAL)
2507 new_params = dict(netparams)
2509 new_params = GetUpdatedParams(old_params, update_params_dict)
2511 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2513 new_filled_params = cluster.SimpleFillNIC(new_params)
2514 objects.NIC.CheckParameterSyntax(new_filled_params)
2516 new_mode = new_filled_params[constants.NIC_MODE]
2517 if new_mode == constants.NIC_MODE_BRIDGED:
2518 bridge = new_filled_params[constants.NIC_LINK]
2519 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2521 msg = "Error checking bridges on node '%s': %s" % \
2522 (self.cfg.GetNodeName(pnode_uuid), msg)
2524 self.warn.append(msg)
2526 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2528 elif new_mode == constants.NIC_MODE_ROUTED:
2529 ip = params.get(constants.INIC_IP, old_ip)
2531 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2532 " on a routed NIC", errors.ECODE_INVAL)
2534 elif new_mode == constants.NIC_MODE_OVS:
2535 # TODO: check OVS link
2536 self.LogInfo("OVS links are currently not checked for correctness")
2538 if constants.INIC_MAC in params:
2539 mac = params[constants.INIC_MAC]
2541 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2543 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2544 # otherwise generate the MAC address
2545 params[constants.INIC_MAC] = \
2546 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2548 # or validate/reserve the current one
2550 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2551 except errors.ReservationError:
2552 raise errors.OpPrereqError("MAC address '%s' already in use"
2553 " in cluster" % mac,
2554 errors.ECODE_NOTUNIQUE)
2555 elif new_net_uuid != old_net_uuid:
2557 def get_net_prefix(net_uuid):
2560 nobj = self.cfg.GetNetwork(net_uuid)
2561 mac_prefix = nobj.mac_prefix
2565 new_prefix = get_net_prefix(new_net_uuid)
2566 old_prefix = get_net_prefix(old_net_uuid)
2567 if old_prefix != new_prefix:
2568 params[constants.INIC_MAC] = \
2569 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2571 # if there is a change in (ip, network) tuple
2572 new_ip = params.get(constants.INIC_IP, old_ip)
2573 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2575 # if IP is pool then require a network and generate one IP
2576 if new_ip.lower() == constants.NIC_IP_POOL:
2579 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2580 except errors.ReservationError:
2581 raise errors.OpPrereqError("Unable to get a free IP"
2582 " from the address pool",
2584 self.LogInfo("Chose IP %s from network %s",
2587 params[constants.INIC_IP] = new_ip
2589 raise errors.OpPrereqError("ip=pool, but no network found",
2591 # Reserve new IP if in the new network if any
2594 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2595 self.LogInfo("Reserving IP %s in network %s",
2596 new_ip, new_net_obj.name)
2597 except errors.ReservationError:
2598 raise errors.OpPrereqError("IP %s not available in network %s" %
2599 (new_ip, new_net_obj.name),
2600 errors.ECODE_NOTUNIQUE)
2601 # new network is None so check if new IP is a conflicting IP
2602 elif self.op.conflicts_check:
2603 _CheckForConflictingIp(self, new_ip, pnode_uuid)
2605 # release old IP if old network is not None
2606 if old_ip and old_net_uuid:
2608 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2609 except errors.AddressPoolError:
2610 logging.warning("Release IP %s not contained in network %s",
2611 old_ip, old_net_obj.name)
2613 # there are no changes in (ip, network) tuple and old network is not None
2614 elif (old_net_uuid is not None and
2615 (req_link is not None or req_mode is not None)):
2616 raise errors.OpPrereqError("Not allowed to change link or mode of"
2617 " a NIC that is connected to a network",
2620 private.params = new_params
2621 private.filled = new_filled_params
2623 def _PreCheckDiskTemplate(self, pnode_info):
2624 """CheckPrereq checks related to a new disk template."""
2625 # Arguments are passed to avoid configuration lookups
2626 pnode_uuid = self.instance.primary_node
2627 if self.instance.disk_template == self.op.disk_template:
2628 raise errors.OpPrereqError("Instance already has disk template %s" %
2629 self.instance.disk_template,
2632 if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2633 raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2634 " cluster." % self.op.disk_template)
2636 if (self.instance.disk_template,
2637 self.op.disk_template) not in self._DISK_CONVERSIONS:
2638 raise errors.OpPrereqError("Unsupported disk template conversion from"
2639 " %s to %s" % (self.instance.disk_template,
2640 self.op.disk_template),
2642 CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2643 msg="cannot change disk template")
2644 if self.op.disk_template in constants.DTS_INT_MIRROR:
2645 if self.op.remote_node_uuid == pnode_uuid:
2646 raise errors.OpPrereqError("Given new secondary node %s is the same"
2647 " as the primary node of the instance" %
2648 self.op.remote_node, errors.ECODE_STATE)
2649 CheckNodeOnline(self, self.op.remote_node_uuid)
2650 CheckNodeNotDrained(self, self.op.remote_node_uuid)
2651 # FIXME: here we assume that the old instance type is DT_PLAIN
2652 assert self.instance.disk_template == constants.DT_PLAIN
2653 disks = [{constants.IDISK_SIZE: d.size,
2654 constants.IDISK_VG: d.logical_id[0]}
2655 for d in self.instance.disks]
2656 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2657 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2659 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2660 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2661 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2663 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2664 ignore=self.op.ignore_ipolicy)
2665 if pnode_info.group != snode_info.group:
2666 self.LogWarning("The primary and secondary nodes are in two"
2667 " different node groups; the disk parameters"
2668 " from the first disk's node group will be"
2671 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2672 # Make sure none of the nodes require exclusive storage
2673 nodes = [pnode_info]
2674 if self.op.disk_template in constants.DTS_INT_MIRROR:
2676 nodes.append(snode_info)
2677 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2678 if compat.any(map(has_es, nodes)):
2679 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2680 " storage is enabled" % (self.instance.disk_template,
2681 self.op.disk_template))
2682 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2684 def _PreCheckDisks(self, ispec):
2685 """CheckPrereq checks related to disk changes.
2688 @param ispec: instance specs to be updated with the new disks
2691 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2693 excl_stor = compat.any(
2694 rpc.GetExclusiveStorageForNodes(self.cfg,
2695 self.instance.all_nodes).values()
2698 # Check disk modifications. This is done here and not in CheckArguments
2699 # (as with NICs), because we need to know the instance's disk template
2700 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2701 if self.instance.disk_template == constants.DT_EXT:
2702 self._CheckMods("disk", self.op.disks, {}, ver_fn)
2704 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2707 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2709 # Check the validity of the `provider' parameter
2710 if self.instance.disk_template in constants.DT_EXT:
2711 for mod in self.diskmod:
2712 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2713 if mod[0] == constants.DDM_ADD:
2714 if ext_provider is None:
2715 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2716 " '%s' missing, during disk add" %
2718 constants.IDISK_PROVIDER),
2720 elif mod[0] == constants.DDM_MODIFY:
2722 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2724 constants.IDISK_PROVIDER,
2727 for mod in self.diskmod:
2728 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2729 if ext_provider is not None:
2730 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2731 " instances of type '%s'" %
2732 (constants.IDISK_PROVIDER,
2736 if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2737 raise errors.OpPrereqError("Disk operations not supported for"
2738 " diskless instances", errors.ECODE_INVAL)
2740 def _PrepareDiskMod(_, disk, params, __):
2741 disk.name = params.get(constants.IDISK_NAME, None)
2743 # Verify disk changes (operating on a copy)
2744 disks = copy.deepcopy(self.instance.disks)
2745 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2746 _PrepareDiskMod, None)
2747 utils.ValidateDeviceNames("disk", disks)
2748 if len(disks) > constants.MAX_DISKS:
2749 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2750 " more" % constants.MAX_DISKS,
2752 disk_sizes = [disk.size for disk in self.instance.disks]
2753 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2754 self.diskmod if op == constants.DDM_ADD)
2755 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2756 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2758 if self.op.offline is not None and self.op.offline:
2759 CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2760 msg="can't change to offline")
2762 def CheckPrereq(self):
2763 """Check prerequisites.
2765 This only checks the instance list against the existing names.
2768 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2769 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2770 self.cluster = self.cfg.GetClusterInfo()
2772 assert self.instance is not None, \
2773 "Cannot retrieve locked instance %s" % self.op.instance_name
2775 pnode_uuid = self.instance.primary_node
2779 if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2781 # verify that the instance is not up
2782 instance_info = self.rpc.call_instance_info(
2783 pnode_uuid, self.instance.name, self.instance.hypervisor,
2784 self.instance.hvparams)
2785 if instance_info.fail_msg:
2786 self.warn.append("Can't get instance runtime information: %s" %
2787 instance_info.fail_msg)
2788 elif instance_info.payload:
2789 raise errors.OpPrereqError("Instance is still running on %s" %
2790 self.cfg.GetNodeName(pnode_uuid),
2793 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2794 node_uuids = list(self.instance.all_nodes)
2795 pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2797 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2798 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2799 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2801 # dictionary with instance information after the modification
2804 # Prepare NIC modifications
2805 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2808 if self.op.os_name and not self.op.force:
2809 CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2810 self.op.force_variant)
2811 instance_os = self.op.os_name
2813 instance_os = self.instance.os
2815 assert not (self.op.disk_template and self.op.disks), \
2816 "Can't modify disk template and apply disk changes at the same time"
2818 if self.op.disk_template:
2819 self._PreCheckDiskTemplate(pnode_info)
2821 self._PreCheckDisks(ispec)
2823 # hvparams processing
2824 if self.op.hvparams:
2825 hv_type = self.instance.hypervisor
2826 i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2827 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2828 hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2831 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2832 CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2833 self.hv_proposed = self.hv_new = hv_new # the new actual values
2834 self.hv_inst = i_hvdict # the new dict (without defaults)
2836 self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2838 self.instance.hvparams)
2839 self.hv_new = self.hv_inst = {}
2841 # beparams processing
2842 if self.op.beparams:
2843 i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2845 objects.UpgradeBeParams(i_bedict)
2846 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2847 be_new = self.cluster.SimpleFillBE(i_bedict)
2848 self.be_proposed = self.be_new = be_new # the new actual values
2849 self.be_inst = i_bedict # the new dict (without defaults)
2851 self.be_new = self.be_inst = {}
2852 self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2853 be_old = self.cluster.FillBE(self.instance)
2855 # CPU param validation -- checking every time a parameter is
2856 # changed to cover all cases where either CPU mask or vcpus have
2858 if (constants.BE_VCPUS in self.be_proposed and
2859 constants.HV_CPU_MASK in self.hv_proposed):
2861 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2862 # Verify mask is consistent with number of vCPUs. Can skip this
2863 # test if only 1 entry in the CPU mask, which means same mask
2864 # is applied to all vCPUs.
2865 if (len(cpu_list) > 1 and
2866 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2867 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2869 (self.be_proposed[constants.BE_VCPUS],
2870 self.hv_proposed[constants.HV_CPU_MASK]),
2873 # Only perform this test if a new CPU mask is given
2874 if constants.HV_CPU_MASK in self.hv_new:
2875 # Calculate the largest CPU number requested
2876 max_requested_cpu = max(map(max, cpu_list))
2877 # Check that all of the instance's nodes have enough physical CPUs to
2878 # satisfy the requested CPU mask
2879 hvspecs = [(self.instance.hypervisor,
2880 self.cfg.GetClusterInfo()
2881 .hvparams[self.instance.hypervisor])]
2882 _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2883 max_requested_cpu + 1,
2886 # osparams processing
2887 if self.op.osparams:
2888 i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2889 CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2890 self.os_inst = i_osdict # the new dict (without defaults)
2894 #TODO(dynmem): do the appropriate check involving MINMEM
2895 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2896 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2897 mem_check_list = [pnode_uuid]
2898 if be_new[constants.BE_AUTO_BALANCE]:
2899 # either we changed auto_balance to yes or it was from before
2900 mem_check_list.extend(self.instance.secondary_nodes)
2901 instance_info = self.rpc.call_instance_info(
2902 pnode_uuid, self.instance.name, self.instance.hypervisor,
2903 self.instance.hvparams)
2904 hvspecs = [(self.instance.hypervisor,
2905 self.cluster.hvparams[self.instance.hypervisor])]
2906 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2908 pninfo = nodeinfo[pnode_uuid]
2909 msg = pninfo.fail_msg
2911 # Assume the primary node is unreachable and go ahead
2912 self.warn.append("Can't get info from primary node %s: %s" %
2913 (self.cfg.GetNodeName(pnode_uuid), msg))
2915 (_, _, (pnhvinfo, )) = pninfo.payload
2916 if not isinstance(pnhvinfo.get("memory_free", None), int):
2917 self.warn.append("Node data from primary node %s doesn't contain"
2918 " free memory information" %
2919 self.cfg.GetNodeName(pnode_uuid))
2920 elif instance_info.fail_msg:
2921 self.warn.append("Can't get instance runtime information: %s" %
2922 instance_info.fail_msg)
2924 if instance_info.payload:
2925 current_mem = int(instance_info.payload["memory"])
2927 # Assume instance not running
2928 # (there is a slight race condition here, but it's not very
2929 # probable, and we have no other way to check)
2930 # TODO: Describe race condition
2932 #TODO(dynmem): do the appropriate check involving MINMEM
2933 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2934 pnhvinfo["memory_free"])
2936 raise errors.OpPrereqError("This change will prevent the instance"
2937 " from starting, due to %d MB of memory"
2938 " missing on its primary node" %
2939 miss_mem, errors.ECODE_NORES)
2941 if be_new[constants.BE_AUTO_BALANCE]:
2942 for node_uuid, nres in nodeinfo.items():
2943 if node_uuid not in self.instance.secondary_nodes:
2945 nres.Raise("Can't get info from secondary node %s" %
2946 self.cfg.GetNodeName(node_uuid), prereq=True,
2947 ecode=errors.ECODE_STATE)
2948 (_, _, (nhvinfo, )) = nres.payload
2949 if not isinstance(nhvinfo.get("memory_free", None), int):
2950 raise errors.OpPrereqError("Secondary node %s didn't return free"
2951 " memory information" %
2952 self.cfg.GetNodeName(node_uuid),
2954 #TODO(dynmem): do the appropriate check involving MINMEM
2955 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2956 raise errors.OpPrereqError("This change will prevent the instance"
2957 " from failover to its secondary node"
2958 " %s, due to not enough memory" %
2959 self.cfg.GetNodeName(node_uuid),
2962 if self.op.runtime_mem:
2963 remote_info = self.rpc.call_instance_info(
2964 self.instance.primary_node, self.instance.name,
2965 self.instance.hypervisor,
2966 self.cluster.hvparams[self.instance.hypervisor])
2967 remote_info.Raise("Error checking node %s" %
2968 self.cfg.GetNodeName(self.instance.primary_node))
2969 if not remote_info.payload: # not running already
2970 raise errors.OpPrereqError("Instance %s is not running" %
2971 self.instance.name, errors.ECODE_STATE)
2973 current_memory = remote_info.payload["memory"]
2974 if (not self.op.force and
2975 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2976 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2977 raise errors.OpPrereqError("Instance %s must have memory between %d"
2978 " and %d MB of memory unless --force is"
2980 (self.instance.name,
2981 self.be_proposed[constants.BE_MINMEM],
2982 self.be_proposed[constants.BE_MAXMEM]),
2985 delta = self.op.runtime_mem - current_memory
2987 CheckNodeFreeMemory(
2988 self, self.instance.primary_node,
2989 "ballooning memory for instance %s" % self.instance.name, delta,
2990 self.instance.hypervisor,
2991 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
2993 # make self.cluster visible in the functions below
2994 cluster = self.cluster
2996 def _PrepareNicCreate(_, params, private):
2997 self._PrepareNicModification(params, private, None, None,
2998 {}, cluster, pnode_uuid)
3001 def _PrepareNicMod(_, nic, params, private):
3002 self._PrepareNicModification(params, private, nic.ip, nic.network,
3003 nic.nicparams, cluster, pnode_uuid)
3006 def _PrepareNicRemove(_, params, __):
3008 net = params.network
3009 if net is not None and ip is not None:
3010 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3012 # Verify NIC changes (operating on copy)
3013 nics = self.instance.nics[:]
3014 _ApplyContainerMods("NIC", nics, None, self.nicmod,
3015 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3016 if len(nics) > constants.MAX_NICS:
3017 raise errors.OpPrereqError("Instance has too many network interfaces"
3018 " (%d), cannot add more" % constants.MAX_NICS,
3021 # Pre-compute NIC changes (necessary to use result in hooks)
3022 self._nic_chgdesc = []
3024 # Operate on copies as this is still in prereq
3025 nics = [nic.Copy() for nic in self.instance.nics]
3026 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3027 self._CreateNewNic, self._ApplyNicMods, None)
3028 # Verify that NIC names are unique and valid
3029 utils.ValidateDeviceNames("NIC", nics)
3030 self._new_nics = nics
3031 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3033 self._new_nics = None
3034 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3036 if not self.op.ignore_ipolicy:
3037 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3040 # Fill ispec with backend parameters
3041 ispec[constants.ISPEC_SPINDLE_USE] = \
3042 self.be_new.get(constants.BE_SPINDLE_USE, None)
3043 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3046 # Copy ispec to verify parameters with min/max values separately
3047 if self.op.disk_template:
3048 new_disk_template = self.op.disk_template
3050 new_disk_template = self.instance.disk_template
3051 ispec_max = ispec.copy()
3052 ispec_max[constants.ISPEC_MEM_SIZE] = \
3053 self.be_new.get(constants.BE_MAXMEM, None)
3054 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3056 ispec_min = ispec.copy()
3057 ispec_min[constants.ISPEC_MEM_SIZE] = \
3058 self.be_new.get(constants.BE_MINMEM, None)
3059 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3062 if (res_max or res_min):
3063 # FIXME: Improve error message by including information about whether
3064 # the upper or lower limit of the parameter fails the ipolicy.
3065 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3066 (group_info, group_info.name,
3067 utils.CommaJoin(set(res_max + res_min))))
3068 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3070 def _ConvertPlainToDrbd(self, feedback_fn):
3071 """Converts an instance from plain to drbd.
3074 feedback_fn("Converting template to drbd")
3075 pnode_uuid = self.instance.primary_node
3076 snode_uuid = self.op.remote_node_uuid
3078 assert self.instance.disk_template == constants.DT_PLAIN
3080 # create a fake disk info for _GenerateDiskTemplate
3081 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3082 constants.IDISK_VG: d.logical_id[0],
3083 constants.IDISK_NAME: d.name}
3084 for d in self.instance.disks]
3085 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3086 self.instance.uuid, pnode_uuid,
3087 [snode_uuid], disk_info, None, None, 0,
3088 feedback_fn, self.diskparams)
3089 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3091 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3092 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3093 info = GetInstanceInfoText(self.instance)
3094 feedback_fn("Creating additional volumes...")
3095 # first, create the missing data and meta devices
3096 for disk in anno_disks:
3097 # unfortunately this is... not too nice
3098 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3099 info, True, p_excl_stor)
3100 for child in disk.children:
3101 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3103 # at this stage, all new LVs have been created, we can rename the
3105 feedback_fn("Renaming original volumes...")
3106 rename_list = [(o, n.children[0].logical_id)
3107 for (o, n) in zip(self.instance.disks, new_disks)]
3108 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3109 result.Raise("Failed to rename original LVs")
3111 feedback_fn("Initializing DRBD devices...")
3112 # all child devices are in place, we can now create the DRBD devices
3114 for disk in anno_disks:
3115 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3116 (snode_uuid, s_excl_stor)]:
3117 f_create = node_uuid == pnode_uuid
3118 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3119 f_create, excl_stor)
3120 except errors.GenericError, e:
3121 feedback_fn("Initializing of DRBD devices failed;"
3122 " renaming back original volumes...")
3123 for disk in new_disks:
3124 self.cfg.SetDiskID(disk, pnode_uuid)
3125 rename_back_list = [(n.children[0], o.logical_id)
3126 for (n, o) in zip(new_disks, self.instance.disks)]
3127 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3128 result.Raise("Failed to rename LVs back after error %s" % str(e))
3131 # at this point, the instance has been modified
3132 self.instance.disk_template = constants.DT_DRBD8
3133 self.instance.disks = new_disks
3134 self.cfg.Update(self.instance, feedback_fn)
3136 # Release node locks while waiting for sync
3137 ReleaseLocks(self, locking.LEVEL_NODE)
3139 # disks are created, waiting for sync
3140 disk_abort = not WaitForSync(self, self.instance,
3141 oneshot=not self.op.wait_for_sync)
3143 raise errors.OpExecError("There are some degraded disks for"
3144 " this instance, please cleanup manually")
3146 # Node resource locks will be released by caller
3148 def _ConvertDrbdToPlain(self, feedback_fn):
3149 """Converts an instance from drbd to plain.
3152 assert len(self.instance.secondary_nodes) == 1
3153 assert self.instance.disk_template == constants.DT_DRBD8
3155 pnode_uuid = self.instance.primary_node
3156 snode_uuid = self.instance.secondary_nodes[0]
3157 feedback_fn("Converting template to plain")
3159 old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3160 new_disks = [d.children[0] for d in self.instance.disks]
3162 # copy over size, mode and name
3163 for parent, child in zip(old_disks, new_disks):
3164 child.size = parent.size
3165 child.mode = parent.mode
3166 child.name = parent.name
3168 # this is a DRBD disk, return its port to the pool
3169 # NOTE: this must be done right before the call to cfg.Update!
3170 for disk in old_disks:
3171 tcp_port = disk.logical_id[2]
3172 self.cfg.AddTcpUdpPort(tcp_port)
3174 # update instance structure
3175 self.instance.disks = new_disks
3176 self.instance.disk_template = constants.DT_PLAIN
3177 _UpdateIvNames(0, self.instance.disks)
3178 self.cfg.Update(self.instance, feedback_fn)
3180 # Release locks in case removing disks takes a while
3181 ReleaseLocks(self, locking.LEVEL_NODE)
3183 feedback_fn("Removing volumes on the secondary node...")
3184 for disk in old_disks:
3185 self.cfg.SetDiskID(disk, snode_uuid)
3186 result = self.rpc.call_blockdev_remove(snode_uuid, disk)
3187 result.Warn("Could not remove block device %s on node %s,"
3188 " continuing anyway" %
3189 (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3192 feedback_fn("Removing unneeded volumes on the primary node...")
3193 for idx, disk in enumerate(old_disks):
3194 meta = disk.children[1]
3195 self.cfg.SetDiskID(meta, pnode_uuid)
3196 result = self.rpc.call_blockdev_remove(pnode_uuid, meta)
3197 result.Warn("Could not remove metadata for disk %d on node %s,"
3198 " continuing anyway" %
3199 (idx, self.cfg.GetNodeName(pnode_uuid)),
3202 def _CreateNewDisk(self, idx, params, _):
3203 """Creates a new disk.
3207 if self.instance.disk_template in constants.DTS_FILEBASED:
3208 (file_driver, file_path) = self.instance.disks[0].logical_id
3209 file_path = os.path.dirname(file_path)
3211 file_driver = file_path = None
3214 GenerateDiskTemplate(self, self.instance.disk_template,
3215 self.instance.uuid, self.instance.primary_node,
3216 self.instance.secondary_nodes, [params], file_path,
3217 file_driver, idx, self.Log, self.diskparams)[0]
3219 new_disks = CreateDisks(self, self.instance, disks=[disk])
3221 if self.cluster.prealloc_wipe_disks:
3223 WipeOrCleanupDisks(self, self.instance,
3224 disks=[(idx, disk, 0)],
3228 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3232 def _ModifyDisk(idx, disk, params, _):
3237 mode = params.get(constants.IDISK_MODE, None)
3240 changes.append(("disk.mode/%d" % idx, disk.mode))
3242 name = params.get(constants.IDISK_NAME, None)
3244 changes.append(("disk.name/%d" % idx, disk.name))
3248 def _RemoveDisk(self, idx, root, _):
3252 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3253 for node_uuid, disk in anno_disk.ComputeNodeTree(
3254 self.instance.primary_node):
3255 self.cfg.SetDiskID(disk, node_uuid)
3256 msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3258 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3259 " continuing anyway", idx,
3260 self.cfg.GetNodeName(node_uuid), msg)
3262 # if this is a DRBD disk, return its port to the pool
3263 if root.dev_type in constants.LDS_DRBD:
3264 self.cfg.AddTcpUdpPort(root.logical_id[2])
3266 def _CreateNewNic(self, idx, params, private):
3267 """Creates data structure for a new network interface.
3270 mac = params[constants.INIC_MAC]
3271 ip = params.get(constants.INIC_IP, None)
3272 net = params.get(constants.INIC_NETWORK, None)
3273 name = params.get(constants.INIC_NAME, None)
3274 net_uuid = self.cfg.LookupNetwork(net)
3275 #TODO: not private.filled?? can a nic have no nicparams??
3276 nicparams = private.filled
3277 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3278 nicparams=nicparams)
3279 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3283 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3284 (mac, ip, private.filled[constants.NIC_MODE],
3285 private.filled[constants.NIC_LINK],
3289 def _ApplyNicMods(self, idx, nic, params, private):
3290 """Modifies a network interface.
3295 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3297 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3298 setattr(nic, key, params[key])
3300 new_net = params.get(constants.INIC_NETWORK, nic.network)
3301 new_net_uuid = self.cfg.LookupNetwork(new_net)
3302 if new_net_uuid != nic.network:
3303 changes.append(("nic.network/%d" % idx, new_net))
3304 nic.network = new_net_uuid
3307 nic.nicparams = private.filled
3309 for (key, val) in nic.nicparams.items():
3310 changes.append(("nic.%s/%d" % (key, idx), val))
3314 def Exec(self, feedback_fn):
3315 """Modifies an instance.
3317 All parameters take effect only at the next restart of the instance.
3320 # Process here the warnings from CheckPrereq, as we don't have a
3321 # feedback_fn there.
3322 # TODO: Replace with self.LogWarning
3323 for warn in self.warn:
3324 feedback_fn("WARNING: %s" % warn)
3326 assert ((self.op.disk_template is None) ^
3327 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3328 "Not owning any node resource locks"
3333 if self.op.pnode_uuid:
3334 self.instance.primary_node = self.op.pnode_uuid
3337 if self.op.runtime_mem:
3338 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3340 self.op.runtime_mem)
3341 rpcres.Raise("Cannot modify instance runtime memory")
3342 result.append(("runtime_memory", self.op.runtime_mem))
3344 # Apply disk changes
3345 _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3346 self._CreateNewDisk, self._ModifyDisk,
3348 _UpdateIvNames(0, self.instance.disks)
3350 if self.op.disk_template:
3352 check_nodes = set(self.instance.all_nodes)
3353 if self.op.remote_node_uuid:
3354 check_nodes.add(self.op.remote_node_uuid)
3355 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3356 owned = self.owned_locks(level)
3357 assert not (check_nodes - owned), \
3358 ("Not owning the correct locks, owning %r, expected at least %r" %
3359 (owned, check_nodes))
3361 r_shut = ShutdownInstanceDisks(self, self.instance)
3363 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3364 " proceed with disk template conversion")
3365 mode = (self.instance.disk_template, self.op.disk_template)
3367 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3369 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3371 result.append(("disk_template", self.op.disk_template))
3373 assert self.instance.disk_template == self.op.disk_template, \
3374 ("Expected disk template '%s', found '%s'" %
3375 (self.op.disk_template, self.instance.disk_template))
3377 # Release node and resource locks if there are any (they might already have
3378 # been released during disk conversion)
3379 ReleaseLocks(self, locking.LEVEL_NODE)
3380 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3383 if self._new_nics is not None:
3384 self.instance.nics = self._new_nics
3385 result.extend(self._nic_chgdesc)
3388 if self.op.hvparams:
3389 self.instance.hvparams = self.hv_inst
3390 for key, val in self.op.hvparams.iteritems():
3391 result.append(("hv/%s" % key, val))
3394 if self.op.beparams:
3395 self.instance.beparams = self.be_inst
3396 for key, val in self.op.beparams.iteritems():
3397 result.append(("be/%s" % key, val))
3401 self.instance.os = self.op.os_name
3404 if self.op.osparams:
3405 self.instance.osparams = self.os_inst
3406 for key, val in self.op.osparams.iteritems():
3407 result.append(("os/%s" % key, val))
3409 if self.op.offline is None:
3412 elif self.op.offline:
3413 # Mark instance as offline
3414 self.cfg.MarkInstanceOffline(self.instance.uuid)
3415 result.append(("admin_state", constants.ADMINST_OFFLINE))
3417 # Mark instance as online, but stopped
3418 self.cfg.MarkInstanceDown(self.instance.uuid)
3419 result.append(("admin_state", constants.ADMINST_DOWN))
3421 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3423 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3424 self.owned_locks(locking.LEVEL_NODE)), \
3425 "All node locks should have been released by now"
3429 _DISK_CONVERSIONS = {
3430 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3431 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3435 class LUInstanceChangeGroup(LogicalUnit):
3436 HPATH = "instance-change-group"
3437 HTYPE = constants.HTYPE_INSTANCE
3440 def ExpandNames(self):
3441 self.share_locks = ShareAll()
3443 self.needed_locks = {
3444 locking.LEVEL_NODEGROUP: [],
3445 locking.LEVEL_NODE: [],
3446 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3449 self._ExpandAndLockInstance()
3451 if self.op.target_groups:
3452 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3453 self.op.target_groups)
3455 self.req_target_uuids = None
3457 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3459 def DeclareLocks(self, level):
3460 if level == locking.LEVEL_NODEGROUP:
3461 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3463 if self.req_target_uuids:
3464 lock_groups = set(self.req_target_uuids)
3466 # Lock all groups used by instance optimistically; this requires going
3467 # via the node before it's locked, requiring verification later on
3468 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3469 lock_groups.update(instance_groups)
3471 # No target groups, need to lock all of them
3472 lock_groups = locking.ALL_SET
3474 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3476 elif level == locking.LEVEL_NODE:
3477 if self.req_target_uuids:
3478 # Lock all nodes used by instances
3479 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3480 self._LockInstancesNodes()
3482 # Lock all nodes in all potential target groups
3483 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3484 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3485 member_nodes = [node_uuid
3486 for group in lock_groups
3487 for node_uuid in self.cfg.GetNodeGroup(group).members]
3488 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3490 # Lock all nodes as all groups are potential targets
3491 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3493 def CheckPrereq(self):
3494 owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3495 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3496 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3498 assert (self.req_target_uuids is None or
3499 owned_groups.issuperset(self.req_target_uuids))
3500 assert owned_instance_names == set([self.op.instance_name])
3502 # Get instance information
3503 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3505 # Check if node groups for locked instance are still correct
3506 assert owned_nodes.issuperset(self.instance.all_nodes), \
3507 ("Instance %s's nodes changed while we kept the lock" %
3508 self.op.instance_name)
3510 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3513 if self.req_target_uuids:
3514 # User requested specific target groups
3515 self.target_uuids = frozenset(self.req_target_uuids)
3517 # All groups except those used by the instance are potential targets
3518 self.target_uuids = owned_groups - inst_groups
3520 conflicting_groups = self.target_uuids & inst_groups
3521 if conflicting_groups:
3522 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3523 " used by the instance '%s'" %
3524 (utils.CommaJoin(conflicting_groups),
3525 self.op.instance_name),
3528 if not self.target_uuids:
3529 raise errors.OpPrereqError("There are no possible target groups",
3532 def BuildHooksEnv(self):
3536 assert self.target_uuids
3539 "TARGET_GROUPS": " ".join(self.target_uuids),
3542 env.update(BuildInstanceHookEnvByObject(self, self.instance))
3546 def BuildHooksNodes(self):
3547 """Build hooks nodes.
3550 mn = self.cfg.GetMasterNode()
3553 def Exec(self, feedback_fn):
3554 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3556 assert instances == [self.op.instance_name], "Instance not locked"
3558 req = iallocator.IAReqGroupChange(instances=instances,
3559 target_groups=list(self.target_uuids))
3560 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3562 ial.Run(self.op.iallocator)
3565 raise errors.OpPrereqError("Can't compute solution for changing group of"
3566 " instance '%s' using iallocator '%s': %s" %
3567 (self.op.instance_name, self.op.iallocator,
3568 ial.info), errors.ECODE_NORES)
3570 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3572 self.LogInfo("Iallocator returned %s job(s) for changing group of"
3573 " instance '%s'", len(jobs), self.op.instance_name)
3575 return ResultWithJobs(jobs)