4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Logical units dealing with instances."""
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65 import ganeti.masterd.instance
68 #: Type description for changes as returned by L{_ApplyContainerMods}'s
70 _TApplyContModsCbChanges = \
71 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
77 def _CheckHostnameSane(lu, name):
78 """Ensures that a given hostname resolves to a 'sane' name.
80 The given name is required to be a prefix of the resolved hostname,
81 to prevent accidental mismatches.
83 @param lu: the logical unit on behalf of which we're checking
84 @param name: the name we should resolve and check
85 @return: the resolved hostname object
88 hostname = netutils.GetHostname(name=name)
89 if hostname.name != name:
90 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91 if not utils.MatchNameComponent(name, [hostname.name]):
92 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93 " same as given hostname '%s'") %
94 (hostname.name, name), errors.ECODE_INVAL)
98 def _CheckOpportunisticLocking(op):
99 """Generate error if opportunistic locking is not possible.
102 if op.opportunistic_locking and not op.iallocator:
103 raise errors.OpPrereqError("Opportunistic locking is only available in"
104 " combination with an instance allocator",
108 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109 """Wrapper around IAReqInstanceAlloc.
111 @param op: The instance opcode
112 @param disks: The computed disks
113 @param nics: The computed nics
114 @param beparams: The full filled beparams
115 @param node_whitelist: List of nodes which should appear as online to the
116 allocator (unless the node is already marked offline)
118 @returns: A filled L{iallocator.IAReqInstanceAlloc}
121 spindle_use = beparams[constants.BE_SPINDLE_USE]
122 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123 disk_template=op.disk_template,
126 vcpus=beparams[constants.BE_VCPUS],
127 memory=beparams[constants.BE_MAXMEM],
128 spindle_use=spindle_use,
130 nics=[n.ToDict() for n in nics],
131 hypervisor=op.hypervisor,
132 node_whitelist=node_whitelist)
135 def _ComputeFullBeParams(op, cluster):
136 """Computes the full beparams.
138 @param op: The instance opcode
139 @param cluster: The cluster config object
141 @return: The fully filled beparams
144 default_beparams = cluster.beparams[constants.PP_DEFAULT]
145 for param, value in op.beparams.iteritems():
146 if value == constants.VALUE_AUTO:
147 op.beparams[param] = default_beparams[param]
148 objects.UpgradeBeParams(op.beparams)
149 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150 return cluster.SimpleFillBE(op.beparams)
153 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154 """Computes the nics.
156 @param op: The instance opcode
157 @param cluster: Cluster configuration object
158 @param default_ip: The default ip to assign
159 @param cfg: An instance of the configuration object
160 @param ec_id: Execution context ID
162 @returns: The build up nics
167 nic_mode_req = nic.get(constants.INIC_MODE, None)
168 nic_mode = nic_mode_req
169 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172 net = nic.get(constants.INIC_NETWORK, None)
173 link = nic.get(constants.NIC_LINK, None)
174 ip = nic.get(constants.INIC_IP, None)
176 if net is None or net.lower() == constants.VALUE_NONE:
179 if nic_mode_req is not None or link is not None:
180 raise errors.OpPrereqError("If network is given, no mode or link"
181 " is allowed to be passed",
185 if ip is None or ip.lower() == constants.VALUE_NONE:
187 elif ip.lower() == constants.VALUE_AUTO:
188 if not op.name_check:
189 raise errors.OpPrereqError("IP address set to auto but name checks"
190 " have been skipped",
194 # We defer pool operations until later, so that the iallocator has
195 # filled in the instance's node(s) dimara
196 if ip.lower() == constants.NIC_IP_POOL:
198 raise errors.OpPrereqError("if ip=pool, parameter network"
199 " must be passed too",
202 elif not netutils.IPAddress.IsValid(ip):
203 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
208 # TODO: check the ip address for uniqueness
209 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210 raise errors.OpPrereqError("Routed nic mode requires an ip address",
213 # MAC address verification
214 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216 mac = utils.NormalizeAndValidateMac(mac)
219 # TODO: We need to factor this out
220 cfg.ReserveMAC(mac, ec_id)
221 except errors.ReservationError:
222 raise errors.OpPrereqError("MAC address %s already in use"
224 errors.ECODE_NOTUNIQUE)
226 # Build nic parameters
229 nicparams[constants.NIC_MODE] = nic_mode
231 nicparams[constants.NIC_LINK] = link
233 check_params = cluster.SimpleFillNIC(nicparams)
234 objects.NIC.CheckParameterSyntax(check_params)
235 net_uuid = cfg.LookupNetwork(net)
236 name = nic.get(constants.INIC_NAME, None)
237 if name is not None and name.lower() == constants.VALUE_NONE:
239 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240 network=net_uuid, nicparams=nicparams)
241 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
247 def _CheckForConflictingIp(lu, ip, node):
248 """In case of conflicting IP address raise error.
251 @param ip: IP address
253 @param node: node name
256 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257 if conf_net is not None:
258 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259 " network %s, but the target NIC does not." %
266 def _ComputeIPolicyInstanceSpecViolation(
267 ipolicy, instance_spec, disk_template,
268 _compute_fn=ComputeIPolicySpecViolation):
269 """Compute if instance specs meets the specs of ipolicy.
272 @param ipolicy: The ipolicy to verify against
273 @param instance_spec: dict
274 @param instance_spec: The instance spec to verify
275 @type disk_template: string
276 @param disk_template: the disk template of the instance
277 @param _compute_fn: The function to verify ipolicy (unittest only)
278 @see: L{ComputeIPolicySpecViolation}
281 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
288 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289 disk_sizes, spindle_use, disk_template)
292 def _CheckOSVariant(os_obj, name):
293 """Check whether an OS name conforms to the os variants specification.
295 @type os_obj: L{objects.OS}
296 @param os_obj: OS object to check
298 @param name: OS name passed by the user, to check for validity
301 variant = objects.OS.GetVariant(name)
302 if not os_obj.supported_variants:
304 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305 " passed)" % (os_obj.name, variant),
309 raise errors.OpPrereqError("OS name must include a variant",
312 if variant not in os_obj.supported_variants:
313 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
316 class LUInstanceCreate(LogicalUnit):
317 """Create an instance.
320 HPATH = "instance-add"
321 HTYPE = constants.HTYPE_INSTANCE
324 def CheckArguments(self):
328 # do not require name_check to ease forward/backward compatibility
330 if self.op.no_install and self.op.start:
331 self.LogInfo("No-installation mode selected, disabling startup")
332 self.op.start = False
333 # validate/normalize the instance name
334 self.op.instance_name = \
335 netutils.Hostname.GetNormalizedName(self.op.instance_name)
337 if self.op.ip_check and not self.op.name_check:
338 # TODO: make the ip check more flexible and not depend on the name check
339 raise errors.OpPrereqError("Cannot do IP address check without a name"
340 " check", errors.ECODE_INVAL)
342 # check nics' parameter names
343 for nic in self.op.nics:
344 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345 # check that NIC's parameters names are unique and valid
346 utils.ValidateDeviceNames("NIC", self.op.nics)
348 # check that disk's names are unique and valid
349 utils.ValidateDeviceNames("disk", self.op.disks)
351 cluster = self.cfg.GetClusterInfo()
352 if not self.op.disk_template in cluster.enabled_disk_templates:
353 raise errors.OpPrereqError("Cannot create an instance with disk template"
354 " '%s', because it is not enabled in the"
355 " cluster. Enabled disk templates are: %s." %
356 (self.op.disk_template,
357 ",".join(cluster.enabled_disk_templates)))
359 # check disks. parameter names and consistent adopt/no-adopt strategy
360 has_adopt = has_no_adopt = False
361 for disk in self.op.disks:
362 if self.op.disk_template != constants.DT_EXT:
363 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364 if constants.IDISK_ADOPT in disk:
368 if has_adopt and has_no_adopt:
369 raise errors.OpPrereqError("Either all disks are adopted or none is",
372 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373 raise errors.OpPrereqError("Disk adoption is not supported for the"
374 " '%s' disk template" %
375 self.op.disk_template,
377 if self.op.iallocator is not None:
378 raise errors.OpPrereqError("Disk adoption not allowed with an"
379 " iallocator script", errors.ECODE_INVAL)
380 if self.op.mode == constants.INSTANCE_IMPORT:
381 raise errors.OpPrereqError("Disk adoption not allowed for"
382 " instance import", errors.ECODE_INVAL)
384 if self.op.disk_template in constants.DTS_MUST_ADOPT:
385 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386 " but no 'adopt' parameter given" %
387 self.op.disk_template,
390 self.adopt_disks = has_adopt
392 # instance name verification
393 if self.op.name_check:
394 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395 self.op.instance_name = self.hostname1.name
396 # used in CheckPrereq for ip ping check
397 self.check_ip = self.hostname1.ip
401 # file storage checks
402 if (self.op.file_driver and
403 not self.op.file_driver in constants.FILE_DRIVER):
404 raise errors.OpPrereqError("Invalid file driver name '%s'" %
405 self.op.file_driver, errors.ECODE_INVAL)
407 # set default file_driver if unset and required
408 if (not self.op.file_driver and
409 self.op.disk_template in [constants.DT_FILE,
410 constants.DT_SHARED_FILE]):
411 self.op.file_driver = constants.FD_LOOP
413 if self.op.disk_template == constants.DT_FILE:
414 opcodes.RequireFileStorage()
415 elif self.op.disk_template == constants.DT_SHARED_FILE:
416 opcodes.RequireSharedFileStorage()
418 ### Node/iallocator related checks
419 CheckIAllocatorOrNode(self, "iallocator", "pnode")
421 if self.op.pnode is not None:
422 if self.op.disk_template in constants.DTS_INT_MIRROR:
423 if self.op.snode is None:
424 raise errors.OpPrereqError("The networked disk templates need"
425 " a mirror node", errors.ECODE_INVAL)
427 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
431 _CheckOpportunisticLocking(self.op)
433 self._cds = GetClusterDomainSecret()
435 if self.op.mode == constants.INSTANCE_IMPORT:
436 # On import force_variant must be True, because if we forced it at
437 # initial install, our only chance when importing it back is that it
439 self.op.force_variant = True
441 if self.op.no_install:
442 self.LogInfo("No-installation mode has no effect during import")
444 elif self.op.mode == constants.INSTANCE_CREATE:
445 if self.op.os_type is None:
446 raise errors.OpPrereqError("No guest OS specified",
448 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
449 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
450 " installation" % self.op.os_type,
452 if self.op.disk_template is None:
453 raise errors.OpPrereqError("No disk template specified",
456 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
457 # Check handshake to ensure both clusters have the same domain secret
458 src_handshake = self.op.source_handshake
459 if not src_handshake:
460 raise errors.OpPrereqError("Missing source handshake",
463 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
466 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
469 # Load and check source CA
470 self.source_x509_ca_pem = self.op.source_x509_ca
471 if not self.source_x509_ca_pem:
472 raise errors.OpPrereqError("Missing source X509 CA",
476 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
478 except OpenSSL.crypto.Error, err:
479 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
480 (err, ), errors.ECODE_INVAL)
482 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
483 if errcode is not None:
484 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
487 self.source_x509_ca = cert
489 src_instance_name = self.op.source_instance_name
490 if not src_instance_name:
491 raise errors.OpPrereqError("Missing source instance name",
494 self.source_instance_name = \
495 netutils.GetHostname(name=src_instance_name).name
498 raise errors.OpPrereqError("Invalid instance creation mode %r" %
499 self.op.mode, errors.ECODE_INVAL)
501 def ExpandNames(self):
502 """ExpandNames for CreateInstance.
504 Figure out the right locks for instance creation.
507 self.needed_locks = {}
509 instance_name = self.op.instance_name
510 # this is just a preventive check, but someone might still add this
511 # instance in the meantime, and creation will fail at lock-add time
512 if instance_name in self.cfg.GetInstanceList():
513 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
514 instance_name, errors.ECODE_EXISTS)
516 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
518 if self.op.iallocator:
519 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
520 # specifying a group on instance creation and then selecting nodes from
522 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
523 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
525 if self.op.opportunistic_locking:
526 self.opportunistic_locks[locking.LEVEL_NODE] = True
527 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
529 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
530 nodelist = [self.op.pnode]
531 if self.op.snode is not None:
532 self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
533 nodelist.append(self.op.snode)
534 self.needed_locks[locking.LEVEL_NODE] = nodelist
536 # in case of import lock the source node too
537 if self.op.mode == constants.INSTANCE_IMPORT:
538 src_node = self.op.src_node
539 src_path = self.op.src_path
542 self.op.src_path = src_path = self.op.instance_name
545 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
546 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
547 self.op.src_node = None
548 if os.path.isabs(src_path):
549 raise errors.OpPrereqError("Importing an instance from a path"
550 " requires a source node option",
553 self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
554 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
555 self.needed_locks[locking.LEVEL_NODE].append(src_node)
556 if not os.path.isabs(src_path):
557 self.op.src_path = src_path = \
558 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
560 self.needed_locks[locking.LEVEL_NODE_RES] = \
561 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
563 def _RunAllocator(self):
564 """Run the allocator based on input opcode.
567 if self.op.opportunistic_locking:
568 # Only consider nodes for which a lock is held
569 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
571 node_whitelist = None
573 #TODO Export network to iallocator so that it chooses a pnode
574 # in a nodegroup that has the desired network connected to
575 req = _CreateInstanceAllocRequest(self.op, self.disks,
576 self.nics, self.be_full,
578 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
580 ial.Run(self.op.iallocator)
583 # When opportunistic locks are used only a temporary failure is generated
584 if self.op.opportunistic_locking:
585 ecode = errors.ECODE_TEMP_NORES
587 ecode = errors.ECODE_NORES
589 raise errors.OpPrereqError("Can't compute nodes using"
590 " iallocator '%s': %s" %
591 (self.op.iallocator, ial.info),
594 self.op.pnode = ial.result[0]
595 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
596 self.op.instance_name, self.op.iallocator,
597 utils.CommaJoin(ial.result))
599 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
601 if req.RequiredNodes() == 2:
602 self.op.snode = ial.result[1]
604 def BuildHooksEnv(self):
607 This runs on master, primary and secondary nodes of the instance.
611 "ADD_MODE": self.op.mode,
613 if self.op.mode == constants.INSTANCE_IMPORT:
614 env["SRC_NODE"] = self.op.src_node
615 env["SRC_PATH"] = self.op.src_path
616 env["SRC_IMAGES"] = self.src_images
618 env.update(BuildInstanceHookEnv(
619 name=self.op.instance_name,
620 primary_node=self.op.pnode,
621 secondary_nodes=self.secondaries,
622 status=self.op.start,
623 os_type=self.op.os_type,
624 minmem=self.be_full[constants.BE_MINMEM],
625 maxmem=self.be_full[constants.BE_MAXMEM],
626 vcpus=self.be_full[constants.BE_VCPUS],
627 nics=NICListToTuple(self, self.nics),
628 disk_template=self.op.disk_template,
629 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
630 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
631 for d in self.disks],
634 hypervisor_name=self.op.hypervisor,
640 def BuildHooksNodes(self):
641 """Build hooks nodes.
644 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
647 def _ReadExportInfo(self):
648 """Reads the export information from disk.
650 It will override the opcode source node and path with the actual
651 information, if these two were not specified before.
653 @return: the export information
656 assert self.op.mode == constants.INSTANCE_IMPORT
658 src_node = self.op.src_node
659 src_path = self.op.src_path
662 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
663 exp_list = self.rpc.call_export_list(locked_nodes)
665 for node in exp_list:
666 if exp_list[node].fail_msg:
668 if src_path in exp_list[node].payload:
670 self.op.src_node = src_node = node
671 self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
675 raise errors.OpPrereqError("No export found for relative path %s" %
676 src_path, errors.ECODE_INVAL)
678 CheckNodeOnline(self, src_node)
679 result = self.rpc.call_export_info(src_node, src_path)
680 result.Raise("No export or invalid export found in dir %s" % src_path)
682 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
683 if not export_info.has_section(constants.INISECT_EXP):
684 raise errors.ProgrammerError("Corrupted export config",
685 errors.ECODE_ENVIRON)
687 ei_version = export_info.get(constants.INISECT_EXP, "version")
688 if (int(ei_version) != constants.EXPORT_VERSION):
689 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
690 (ei_version, constants.EXPORT_VERSION),
691 errors.ECODE_ENVIRON)
694 def _ReadExportParams(self, einfo):
695 """Use export parameters as defaults.
697 In case the opcode doesn't specify (as in override) some instance
698 parameters, then try to use them from the export information, if
702 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
704 if self.op.disk_template is None:
705 if einfo.has_option(constants.INISECT_INS, "disk_template"):
706 self.op.disk_template = einfo.get(constants.INISECT_INS,
708 if self.op.disk_template not in constants.DISK_TEMPLATES:
709 raise errors.OpPrereqError("Disk template specified in configuration"
710 " file is not one of the allowed values:"
712 " ".join(constants.DISK_TEMPLATES),
715 raise errors.OpPrereqError("No disk template specified and the export"
716 " is missing the disk_template information",
719 if not self.op.disks:
721 # TODO: import the disk iv_name too
722 for idx in range(constants.MAX_DISKS):
723 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
724 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
725 disks.append({constants.IDISK_SIZE: disk_sz})
726 self.op.disks = disks
727 if not disks and self.op.disk_template != constants.DT_DISKLESS:
728 raise errors.OpPrereqError("No disk info specified and the export"
729 " is missing the disk information",
734 for idx in range(constants.MAX_NICS):
735 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
737 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
738 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
745 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
746 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
748 if (self.op.hypervisor is None and
749 einfo.has_option(constants.INISECT_INS, "hypervisor")):
750 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
752 if einfo.has_section(constants.INISECT_HYP):
753 # use the export parameters but do not override the ones
754 # specified by the user
755 for name, value in einfo.items(constants.INISECT_HYP):
756 if name not in self.op.hvparams:
757 self.op.hvparams[name] = value
759 if einfo.has_section(constants.INISECT_BEP):
760 # use the parameters, without overriding
761 for name, value in einfo.items(constants.INISECT_BEP):
762 if name not in self.op.beparams:
763 self.op.beparams[name] = value
764 # Compatibility for the old "memory" be param
765 if name == constants.BE_MEMORY:
766 if constants.BE_MAXMEM not in self.op.beparams:
767 self.op.beparams[constants.BE_MAXMEM] = value
768 if constants.BE_MINMEM not in self.op.beparams:
769 self.op.beparams[constants.BE_MINMEM] = value
771 # try to read the parameters old style, from the main section
772 for name in constants.BES_PARAMETERS:
773 if (name not in self.op.beparams and
774 einfo.has_option(constants.INISECT_INS, name)):
775 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
777 if einfo.has_section(constants.INISECT_OSP):
778 # use the parameters, without overriding
779 for name, value in einfo.items(constants.INISECT_OSP):
780 if name not in self.op.osparams:
781 self.op.osparams[name] = value
783 def _RevertToDefaults(self, cluster):
784 """Revert the instance parameters to the default values.
788 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
789 for name in self.op.hvparams.keys():
790 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
791 del self.op.hvparams[name]
793 be_defs = cluster.SimpleFillBE({})
794 for name in self.op.beparams.keys():
795 if name in be_defs and be_defs[name] == self.op.beparams[name]:
796 del self.op.beparams[name]
798 nic_defs = cluster.SimpleFillNIC({})
799 for nic in self.op.nics:
800 for name in constants.NICS_PARAMETERS:
801 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
804 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
805 for name in self.op.osparams.keys():
806 if name in os_defs and os_defs[name] == self.op.osparams[name]:
807 del self.op.osparams[name]
809 def _CalculateFileStorageDir(self):
810 """Calculate final instance file storage dir.
813 # file storage dir calculation/check
814 self.instance_file_storage_dir = None
815 if self.op.disk_template in constants.DTS_FILEBASED:
816 # build the full file storage dir path
819 if self.op.disk_template == constants.DT_SHARED_FILE:
820 get_fsd_fn = self.cfg.GetSharedFileStorageDir
822 get_fsd_fn = self.cfg.GetFileStorageDir
824 cfg_storagedir = get_fsd_fn()
825 if not cfg_storagedir:
826 raise errors.OpPrereqError("Cluster file storage dir not defined",
828 joinargs.append(cfg_storagedir)
830 if self.op.file_storage_dir is not None:
831 joinargs.append(self.op.file_storage_dir)
833 joinargs.append(self.op.instance_name)
835 # pylint: disable=W0142
836 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
838 def CheckPrereq(self): # pylint: disable=R0914
839 """Check prerequisites.
842 self._CalculateFileStorageDir()
844 if self.op.mode == constants.INSTANCE_IMPORT:
845 export_info = self._ReadExportInfo()
846 self._ReadExportParams(export_info)
847 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
849 self._old_instance_name = None
851 if (not self.cfg.GetVGName() and
852 self.op.disk_template not in constants.DTS_NOT_LVM):
853 raise errors.OpPrereqError("Cluster does not support lvm-based"
854 " instances", errors.ECODE_STATE)
856 if (self.op.hypervisor is None or
857 self.op.hypervisor == constants.VALUE_AUTO):
858 self.op.hypervisor = self.cfg.GetHypervisorType()
860 cluster = self.cfg.GetClusterInfo()
861 enabled_hvs = cluster.enabled_hypervisors
862 if self.op.hypervisor not in enabled_hvs:
863 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
865 (self.op.hypervisor, ",".join(enabled_hvs)),
869 for tag in self.op.tags:
870 objects.TaggableObject.ValidateTag(tag)
872 # check hypervisor parameter syntax (locally)
873 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
874 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
876 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
877 hv_type.CheckParameterSyntax(filled_hvp)
878 self.hv_full = filled_hvp
879 # check that we don't specify global parameters on an instance
880 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
881 "instance", "cluster")
883 # fill and remember the beparams dict
884 self.be_full = _ComputeFullBeParams(self.op, cluster)
886 # build os parameters
887 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
889 # now that hvp/bep are in final format, let's reset to defaults,
891 if self.op.identify_defaults:
892 self._RevertToDefaults(cluster)
895 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
898 # disk checks/pre-build
899 default_vg = self.cfg.GetVGName()
900 self.disks = ComputeDisks(self.op, default_vg)
902 if self.op.mode == constants.INSTANCE_IMPORT:
904 for idx in range(len(self.disks)):
905 option = "disk%d_dump" % idx
906 if export_info.has_option(constants.INISECT_INS, option):
907 # FIXME: are the old os-es, disk sizes, etc. useful?
908 export_name = export_info.get(constants.INISECT_INS, option)
909 image = utils.PathJoin(self.op.src_path, export_name)
910 disk_images.append(image)
912 disk_images.append(False)
914 self.src_images = disk_images
916 if self.op.instance_name == self._old_instance_name:
917 for idx, nic in enumerate(self.nics):
918 if nic.mac == constants.VALUE_AUTO:
919 nic_mac_ini = "nic%d_mac" % idx
920 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
922 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
924 # ip ping checks (we use the same ip that was resolved in ExpandNames)
926 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
927 raise errors.OpPrereqError("IP %s of instance %s already in use" %
928 (self.check_ip, self.op.instance_name),
929 errors.ECODE_NOTUNIQUE)
931 #### mac address generation
932 # By generating here the mac address both the allocator and the hooks get
933 # the real final mac address rather than the 'auto' or 'generate' value.
934 # There is a race condition between the generation and the instance object
935 # creation, which means that we know the mac is valid now, but we're not
936 # sure it will be when we actually add the instance. If things go bad
937 # adding the instance will abort because of a duplicate mac, and the
938 # creation job will fail.
939 for nic in self.nics:
940 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
941 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
945 if self.op.iallocator is not None:
948 # Release all unneeded node locks
949 keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
950 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
951 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
952 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
954 assert (self.owned_locks(locking.LEVEL_NODE) ==
955 self.owned_locks(locking.LEVEL_NODE_RES)), \
956 "Node locks differ from node resource locks"
958 #### node related checks
961 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
962 assert self.pnode is not None, \
963 "Cannot retrieve locked node %s" % self.op.pnode
965 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
966 pnode.name, errors.ECODE_STATE)
968 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
969 pnode.name, errors.ECODE_STATE)
970 if not pnode.vm_capable:
971 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
972 " '%s'" % pnode.name, errors.ECODE_STATE)
974 self.secondaries = []
976 # Fill in any IPs from IP pools. This must happen here, because we need to
977 # know the nic's primary node, as specified by the iallocator
978 for idx, nic in enumerate(self.nics):
979 net_uuid = nic.network
980 if net_uuid is not None:
981 nobj = self.cfg.GetNetwork(net_uuid)
982 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
983 if netparams is None:
984 raise errors.OpPrereqError("No netparams found for network"
985 " %s. Propably not connected to"
986 " node's %s nodegroup" %
987 (nobj.name, self.pnode.name),
989 self.LogInfo("NIC/%d inherits netparams %s" %
990 (idx, netparams.values()))
991 nic.nicparams = dict(netparams)
992 if nic.ip is not None:
993 if nic.ip.lower() == constants.NIC_IP_POOL:
995 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
996 except errors.ReservationError:
997 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
998 " from the address pool" % idx,
1000 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1003 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1004 check=self.op.conflicts_check)
1005 except errors.ReservationError:
1006 raise errors.OpPrereqError("IP address %s already in use"
1007 " or does not belong to network %s" %
1008 (nic.ip, nobj.name),
1009 errors.ECODE_NOTUNIQUE)
1011 # net is None, ip None or given
1012 elif self.op.conflicts_check:
1013 _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1015 # mirror node verification
1016 if self.op.disk_template in constants.DTS_INT_MIRROR:
1017 if self.op.snode == pnode.name:
1018 raise errors.OpPrereqError("The secondary node cannot be the"
1019 " primary node", errors.ECODE_INVAL)
1020 CheckNodeOnline(self, self.op.snode)
1021 CheckNodeNotDrained(self, self.op.snode)
1022 CheckNodeVmCapable(self, self.op.snode)
1023 self.secondaries.append(self.op.snode)
1025 snode = self.cfg.GetNodeInfo(self.op.snode)
1026 if pnode.group != snode.group:
1027 self.LogWarning("The primary and secondary nodes are in two"
1028 " different node groups; the disk parameters"
1029 " from the first disk's node group will be"
1032 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1034 if self.op.disk_template in constants.DTS_INT_MIRROR:
1036 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1037 if compat.any(map(has_es, nodes)):
1038 raise errors.OpPrereqError("Disk template %s not supported with"
1039 " exclusive storage" % self.op.disk_template,
1042 nodenames = [pnode.name] + self.secondaries
1044 if not self.adopt_disks:
1045 if self.op.disk_template == constants.DT_RBD:
1046 # _CheckRADOSFreeSpace() is just a placeholder.
1047 # Any function that checks prerequisites can be placed here.
1048 # Check if there is enough space on the RADOS cluster.
1049 CheckRADOSFreeSpace()
1050 elif self.op.disk_template == constants.DT_EXT:
1051 # FIXME: Function that checks prereqs if needed
1054 # Check lv size requirements, if not adopting
1055 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1056 CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1058 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1059 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1060 disk[constants.IDISK_ADOPT])
1061 for disk in self.disks])
1062 if len(all_lvs) != len(self.disks):
1063 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1065 for lv_name in all_lvs:
1067 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1068 # to ReserveLV uses the same syntax
1069 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1070 except errors.ReservationError:
1071 raise errors.OpPrereqError("LV named %s used by another instance" %
1072 lv_name, errors.ECODE_NOTUNIQUE)
1074 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1075 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1077 node_lvs = self.rpc.call_lv_list([pnode.name],
1078 vg_names.payload.keys())[pnode.name]
1079 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1080 node_lvs = node_lvs.payload
1082 delta = all_lvs.difference(node_lvs.keys())
1084 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1085 utils.CommaJoin(delta),
1087 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1089 raise errors.OpPrereqError("Online logical volumes found, cannot"
1090 " adopt: %s" % utils.CommaJoin(online_lvs),
1092 # update the size of disk based on what is found
1093 for dsk in self.disks:
1094 dsk[constants.IDISK_SIZE] = \
1095 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1096 dsk[constants.IDISK_ADOPT])][0]))
1098 elif self.op.disk_template == constants.DT_BLOCK:
1099 # Normalize and de-duplicate device paths
1100 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1101 for disk in self.disks])
1102 if len(all_disks) != len(self.disks):
1103 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1105 baddisks = [d for d in all_disks
1106 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1108 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1109 " cannot be adopted" %
1110 (utils.CommaJoin(baddisks),
1111 constants.ADOPTABLE_BLOCKDEV_ROOT),
1114 node_disks = self.rpc.call_bdev_sizes([pnode.name],
1115 list(all_disks))[pnode.name]
1116 node_disks.Raise("Cannot get block device information from node %s" %
1118 node_disks = node_disks.payload
1119 delta = all_disks.difference(node_disks.keys())
1121 raise errors.OpPrereqError("Missing block device(s): %s" %
1122 utils.CommaJoin(delta),
1124 for dsk in self.disks:
1125 dsk[constants.IDISK_SIZE] = \
1126 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1128 # Verify instance specs
1129 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1131 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1132 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1133 constants.ISPEC_DISK_COUNT: len(self.disks),
1134 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1135 for disk in self.disks],
1136 constants.ISPEC_NIC_COUNT: len(self.nics),
1137 constants.ISPEC_SPINDLE_USE: spindle_use,
1140 group_info = self.cfg.GetNodeGroup(pnode.group)
1141 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1142 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1143 self.op.disk_template)
1144 if not self.op.ignore_ipolicy and res:
1145 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1146 (pnode.group, group_info.name, utils.CommaJoin(res)))
1147 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1149 CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1151 CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1152 # check OS parameters (remotely)
1153 CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1155 CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1157 #TODO: _CheckExtParams (remotely)
1158 # Check parameters for extstorage
1160 # memory check on primary node
1161 #TODO(dynmem): use MINMEM for checking
1163 CheckNodeFreeMemory(self, self.pnode.name,
1164 "creating instance %s" % self.op.instance_name,
1165 self.be_full[constants.BE_MAXMEM],
1168 self.dry_run_result = list(nodenames)
1170 def Exec(self, feedback_fn):
1171 """Create and add the instance to the cluster.
1174 instance = self.op.instance_name
1175 pnode_name = self.pnode.name
1177 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1178 self.owned_locks(locking.LEVEL_NODE)), \
1179 "Node locks differ from node resource locks"
1180 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1182 ht_kind = self.op.hypervisor
1183 if ht_kind in constants.HTS_REQ_PORT:
1184 network_port = self.cfg.AllocatePort()
1188 # This is ugly but we got a chicken-egg problem here
1189 # We can only take the group disk parameters, as the instance
1190 # has no disks yet (we are generating them right here).
1191 node = self.cfg.GetNodeInfo(pnode_name)
1192 nodegroup = self.cfg.GetNodeGroup(node.group)
1193 disks = GenerateDiskTemplate(self,
1194 self.op.disk_template,
1195 instance, pnode_name,
1198 self.instance_file_storage_dir,
1199 self.op.file_driver,
1202 self.cfg.GetGroupDiskParams(nodegroup))
1204 iobj = objects.Instance(name=instance, os=self.op.os_type,
1205 primary_node=pnode_name,
1206 nics=self.nics, disks=disks,
1207 disk_template=self.op.disk_template,
1209 admin_state=constants.ADMINST_DOWN,
1210 network_port=network_port,
1211 beparams=self.op.beparams,
1212 hvparams=self.op.hvparams,
1213 hypervisor=self.op.hypervisor,
1214 osparams=self.op.osparams,
1218 for tag in self.op.tags:
1221 if self.adopt_disks:
1222 if self.op.disk_template == constants.DT_PLAIN:
1223 # rename LVs to the newly-generated names; we need to construct
1224 # 'fake' LV disks with the old data, plus the new unique_id
1225 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1227 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1228 rename_to.append(t_dsk.logical_id)
1229 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1230 self.cfg.SetDiskID(t_dsk, pnode_name)
1231 result = self.rpc.call_blockdev_rename(pnode_name,
1232 zip(tmp_disks, rename_to))
1233 result.Raise("Failed to rename adoped LVs")
1235 feedback_fn("* creating instance disks...")
1237 CreateDisks(self, iobj)
1238 except errors.OpExecError:
1239 self.LogWarning("Device creation failed")
1240 self.cfg.ReleaseDRBDMinors(instance)
1243 feedback_fn("adding instance %s to cluster config" % instance)
1245 self.cfg.AddInstance(iobj, self.proc.GetECId())
1247 # Declare that we don't want to remove the instance lock anymore, as we've
1248 # added the instance to the config
1249 del self.remove_locks[locking.LEVEL_INSTANCE]
1251 if self.op.mode == constants.INSTANCE_IMPORT:
1252 # Release unused nodes
1253 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1256 ReleaseLocks(self, locking.LEVEL_NODE)
1259 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1260 feedback_fn("* wiping instance disks...")
1262 WipeDisks(self, iobj)
1263 except errors.OpExecError, err:
1264 logging.exception("Wiping disks failed")
1265 self.LogWarning("Wiping instance disks failed (%s)", err)
1269 # Something is already wrong with the disks, don't do anything else
1271 elif self.op.wait_for_sync:
1272 disk_abort = not WaitForSync(self, iobj)
1273 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1274 # make sure the disks are not degraded (still sync-ing is ok)
1275 feedback_fn("* checking mirrors status")
1276 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1281 RemoveDisks(self, iobj)
1282 self.cfg.RemoveInstance(iobj.name)
1283 # Make sure the instance lock gets removed
1284 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1285 raise errors.OpExecError("There are some degraded disks for"
1288 # instance disks are now active
1289 iobj.disks_active = True
1291 # Release all node resource locks
1292 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1294 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1295 # we need to set the disks ID to the primary node, since the
1296 # preceding code might or might have not done it, depending on
1297 # disk template and other options
1298 for disk in iobj.disks:
1299 self.cfg.SetDiskID(disk, pnode_name)
1300 if self.op.mode == constants.INSTANCE_CREATE:
1301 if not self.op.no_install:
1302 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1303 not self.op.wait_for_sync)
1305 feedback_fn("* pausing disk sync to install instance OS")
1306 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1309 for idx, success in enumerate(result.payload):
1311 logging.warn("pause-sync of instance %s for disk %d failed",
1314 feedback_fn("* running the instance OS create scripts...")
1315 # FIXME: pass debug option from opcode to backend
1317 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1318 self.op.debug_level)
1320 feedback_fn("* resuming disk sync")
1321 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1324 for idx, success in enumerate(result.payload):
1326 logging.warn("resume-sync of instance %s for disk %d failed",
1329 os_add_result.Raise("Could not add os for instance %s"
1330 " on node %s" % (instance, pnode_name))
1333 if self.op.mode == constants.INSTANCE_IMPORT:
1334 feedback_fn("* running the instance OS import scripts...")
1338 for idx, image in enumerate(self.src_images):
1342 # FIXME: pass debug option from opcode to backend
1343 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1344 constants.IEIO_FILE, (image, ),
1345 constants.IEIO_SCRIPT,
1346 (iobj.disks[idx], idx),
1348 transfers.append(dt)
1351 masterd.instance.TransferInstanceData(self, feedback_fn,
1352 self.op.src_node, pnode_name,
1353 self.pnode.secondary_ip,
1355 if not compat.all(import_result):
1356 self.LogWarning("Some disks for instance %s on node %s were not"
1357 " imported successfully" % (instance, pnode_name))
1359 rename_from = self._old_instance_name
1361 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1362 feedback_fn("* preparing remote import...")
1363 # The source cluster will stop the instance before attempting to make
1364 # a connection. In some cases stopping an instance can take a long
1365 # time, hence the shutdown timeout is added to the connection
1367 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1368 self.op.source_shutdown_timeout)
1369 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1371 assert iobj.primary_node == self.pnode.name
1373 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1374 self.source_x509_ca,
1375 self._cds, timeouts)
1376 if not compat.all(disk_results):
1377 # TODO: Should the instance still be started, even if some disks
1378 # failed to import (valid for local imports, too)?
1379 self.LogWarning("Some disks for instance %s on node %s were not"
1380 " imported successfully" % (instance, pnode_name))
1382 rename_from = self.source_instance_name
1385 # also checked in the prereq part
1386 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1389 # Run rename script on newly imported instance
1390 assert iobj.name == instance
1391 feedback_fn("Running rename script for %s" % instance)
1392 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1394 self.op.debug_level)
1396 self.LogWarning("Failed to run rename script for %s on node"
1397 " %s: %s" % (instance, pnode_name, result.fail_msg))
1399 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1402 iobj.admin_state = constants.ADMINST_UP
1403 self.cfg.Update(iobj, feedback_fn)
1404 logging.info("Starting instance %s on node %s", instance, pnode_name)
1405 feedback_fn("* starting instance...")
1406 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1407 False, self.op.reason)
1408 result.Raise("Could not start instance")
1410 return list(iobj.all_nodes)
1413 class LUInstanceRename(LogicalUnit):
1414 """Rename an instance.
1417 HPATH = "instance-rename"
1418 HTYPE = constants.HTYPE_INSTANCE
1420 def CheckArguments(self):
1424 if self.op.ip_check and not self.op.name_check:
1425 # TODO: make the ip check more flexible and not depend on the name check
1426 raise errors.OpPrereqError("IP address check requires a name check",
1429 def BuildHooksEnv(self):
1432 This runs on master, primary and secondary nodes of the instance.
1435 env = BuildInstanceHookEnvByObject(self, self.instance)
1436 env["INSTANCE_NEW_NAME"] = self.op.new_name
1439 def BuildHooksNodes(self):
1440 """Build hooks nodes.
1443 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1446 def CheckPrereq(self):
1447 """Check prerequisites.
1449 This checks that the instance is in the cluster and is not running.
1452 self.op.instance_name = ExpandInstanceName(self.cfg,
1453 self.op.instance_name)
1454 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1455 assert instance is not None
1456 CheckNodeOnline(self, instance.primary_node)
1457 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1458 msg="cannot rename")
1459 self.instance = instance
1461 new_name = self.op.new_name
1462 if self.op.name_check:
1463 hostname = _CheckHostnameSane(self, new_name)
1464 new_name = self.op.new_name = hostname.name
1465 if (self.op.ip_check and
1466 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1467 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1468 (hostname.ip, new_name),
1469 errors.ECODE_NOTUNIQUE)
1471 instance_list = self.cfg.GetInstanceList()
1472 if new_name in instance_list and new_name != instance.name:
1473 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1474 new_name, errors.ECODE_EXISTS)
1476 def Exec(self, feedback_fn):
1477 """Rename the instance.
1480 inst = self.instance
1481 old_name = inst.name
1483 rename_file_storage = False
1484 if (inst.disk_template in constants.DTS_FILEBASED and
1485 self.op.new_name != inst.name):
1486 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1487 rename_file_storage = True
1489 self.cfg.RenameInstance(inst.name, self.op.new_name)
1490 # Change the instance lock. This is definitely safe while we hold the BGL.
1491 # Otherwise the new lock would have to be added in acquired mode.
1493 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1494 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1495 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1497 # re-read the instance from the configuration after rename
1498 inst = self.cfg.GetInstanceInfo(self.op.new_name)
1500 if rename_file_storage:
1501 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1502 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1503 old_file_storage_dir,
1504 new_file_storage_dir)
1505 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1506 " (but the instance has been renamed in Ganeti)" %
1507 (inst.primary_node, old_file_storage_dir,
1508 new_file_storage_dir))
1510 StartInstanceDisks(self, inst, None)
1511 # update info on disks
1512 info = GetInstanceInfoText(inst)
1513 for (idx, disk) in enumerate(inst.disks):
1514 for node in inst.all_nodes:
1515 self.cfg.SetDiskID(disk, node)
1516 result = self.rpc.call_blockdev_setinfo(node, disk, info)
1518 self.LogWarning("Error setting info on node %s for disk %s: %s",
1519 node, idx, result.fail_msg)
1521 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1522 old_name, self.op.debug_level)
1523 msg = result.fail_msg
1525 msg = ("Could not run OS rename script for instance %s on node %s"
1526 " (but the instance has been renamed in Ganeti): %s" %
1527 (inst.name, inst.primary_node, msg))
1528 self.LogWarning(msg)
1530 ShutdownInstanceDisks(self, inst)
1535 class LUInstanceRemove(LogicalUnit):
1536 """Remove an instance.
1539 HPATH = "instance-remove"
1540 HTYPE = constants.HTYPE_INSTANCE
1543 def ExpandNames(self):
1544 self._ExpandAndLockInstance()
1545 self.needed_locks[locking.LEVEL_NODE] = []
1546 self.needed_locks[locking.LEVEL_NODE_RES] = []
1547 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1549 def DeclareLocks(self, level):
1550 if level == locking.LEVEL_NODE:
1551 self._LockInstancesNodes()
1552 elif level == locking.LEVEL_NODE_RES:
1554 self.needed_locks[locking.LEVEL_NODE_RES] = \
1555 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1557 def BuildHooksEnv(self):
1560 This runs on master, primary and secondary nodes of the instance.
1563 env = BuildInstanceHookEnvByObject(self, self.instance)
1564 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1567 def BuildHooksNodes(self):
1568 """Build hooks nodes.
1571 nl = [self.cfg.GetMasterNode()]
1572 nl_post = list(self.instance.all_nodes) + nl
1573 return (nl, nl_post)
1575 def CheckPrereq(self):
1576 """Check prerequisites.
1578 This checks that the instance is in the cluster.
1581 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1582 assert self.instance is not None, \
1583 "Cannot retrieve locked instance %s" % self.op.instance_name
1585 def Exec(self, feedback_fn):
1586 """Remove the instance.
1589 instance = self.instance
1590 logging.info("Shutting down instance %s on node %s",
1591 instance.name, instance.primary_node)
1593 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1594 self.op.shutdown_timeout,
1596 msg = result.fail_msg
1598 if self.op.ignore_failures:
1599 feedback_fn("Warning: can't shutdown instance: %s" % msg)
1601 raise errors.OpExecError("Could not shutdown instance %s on"
1603 (instance.name, instance.primary_node, msg))
1605 assert (self.owned_locks(locking.LEVEL_NODE) ==
1606 self.owned_locks(locking.LEVEL_NODE_RES))
1607 assert not (set(instance.all_nodes) -
1608 self.owned_locks(locking.LEVEL_NODE)), \
1609 "Not owning correct locks"
1611 RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1614 class LUInstanceMove(LogicalUnit):
1615 """Move an instance by data-copying.
1618 HPATH = "instance-move"
1619 HTYPE = constants.HTYPE_INSTANCE
1622 def ExpandNames(self):
1623 self._ExpandAndLockInstance()
1624 target_node = ExpandNodeName(self.cfg, self.op.target_node)
1625 self.op.target_node = target_node
1626 self.needed_locks[locking.LEVEL_NODE] = [target_node]
1627 self.needed_locks[locking.LEVEL_NODE_RES] = []
1628 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1630 def DeclareLocks(self, level):
1631 if level == locking.LEVEL_NODE:
1632 self._LockInstancesNodes(primary_only=True)
1633 elif level == locking.LEVEL_NODE_RES:
1635 self.needed_locks[locking.LEVEL_NODE_RES] = \
1636 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1638 def BuildHooksEnv(self):
1641 This runs on master, primary and secondary nodes of the instance.
1645 "TARGET_NODE": self.op.target_node,
1646 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1648 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1651 def BuildHooksNodes(self):
1652 """Build hooks nodes.
1656 self.cfg.GetMasterNode(),
1657 self.instance.primary_node,
1658 self.op.target_node,
1662 def CheckPrereq(self):
1663 """Check prerequisites.
1665 This checks that the instance is in the cluster.
1668 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1669 assert self.instance is not None, \
1670 "Cannot retrieve locked instance %s" % self.op.instance_name
1672 if instance.disk_template not in constants.DTS_COPYABLE:
1673 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1674 instance.disk_template, errors.ECODE_STATE)
1676 node = self.cfg.GetNodeInfo(self.op.target_node)
1677 assert node is not None, \
1678 "Cannot retrieve locked node %s" % self.op.target_node
1680 self.target_node = target_node = node.name
1682 if target_node == instance.primary_node:
1683 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1684 (instance.name, target_node),
1687 bep = self.cfg.GetClusterInfo().FillBE(instance)
1689 for idx, dsk in enumerate(instance.disks):
1690 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1691 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1692 " cannot copy" % idx, errors.ECODE_STATE)
1694 CheckNodeOnline(self, target_node)
1695 CheckNodeNotDrained(self, target_node)
1696 CheckNodeVmCapable(self, target_node)
1697 cluster = self.cfg.GetClusterInfo()
1698 group_info = self.cfg.GetNodeGroup(node.group)
1699 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1700 CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1701 ignore=self.op.ignore_ipolicy)
1703 if instance.admin_state == constants.ADMINST_UP:
1704 # check memory requirements on the secondary node
1705 CheckNodeFreeMemory(self, target_node,
1706 "failing over instance %s" %
1707 instance.name, bep[constants.BE_MAXMEM],
1708 instance.hypervisor)
1710 self.LogInfo("Not checking memory on the secondary node as"
1711 " instance will not be started")
1713 # check bridge existance
1714 CheckInstanceBridgesExist(self, instance, node=target_node)
1716 def Exec(self, feedback_fn):
1717 """Move an instance.
1719 The move is done by shutting it down on its present node, copying
1720 the data over (slow) and starting it on the new node.
1723 instance = self.instance
1725 source_node = instance.primary_node
1726 target_node = self.target_node
1728 self.LogInfo("Shutting down instance %s on source node %s",
1729 instance.name, source_node)
1731 assert (self.owned_locks(locking.LEVEL_NODE) ==
1732 self.owned_locks(locking.LEVEL_NODE_RES))
1734 result = self.rpc.call_instance_shutdown(source_node, instance,
1735 self.op.shutdown_timeout,
1737 msg = result.fail_msg
1739 if self.op.ignore_consistency:
1740 self.LogWarning("Could not shutdown instance %s on node %s."
1741 " Proceeding anyway. Please make sure node"
1742 " %s is down. Error details: %s",
1743 instance.name, source_node, source_node, msg)
1745 raise errors.OpExecError("Could not shutdown instance %s on"
1747 (instance.name, source_node, msg))
1749 # create the target disks
1751 CreateDisks(self, instance, target_node=target_node)
1752 except errors.OpExecError:
1753 self.LogWarning("Device creation failed")
1754 self.cfg.ReleaseDRBDMinors(instance.name)
1757 cluster_name = self.cfg.GetClusterInfo().cluster_name
1760 # activate, get path, copy the data over
1761 for idx, disk in enumerate(instance.disks):
1762 self.LogInfo("Copying data for disk %d", idx)
1763 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1764 instance.name, True, idx)
1766 self.LogWarning("Can't assemble newly created disk %d: %s",
1767 idx, result.fail_msg)
1768 errs.append(result.fail_msg)
1770 dev_path, _ = result.payload
1771 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1772 target_node, dev_path,
1775 self.LogWarning("Can't copy data over for disk %d: %s",
1776 idx, result.fail_msg)
1777 errs.append(result.fail_msg)
1781 self.LogWarning("Some disks failed to copy, aborting")
1783 RemoveDisks(self, instance, target_node=target_node)
1785 self.cfg.ReleaseDRBDMinors(instance.name)
1786 raise errors.OpExecError("Errors during disk copy: %s" %
1789 instance.primary_node = target_node
1790 self.cfg.Update(instance, feedback_fn)
1792 self.LogInfo("Removing the disks on the original node")
1793 RemoveDisks(self, instance, target_node=source_node)
1795 # Only start the instance if it's marked as up
1796 if instance.admin_state == constants.ADMINST_UP:
1797 self.LogInfo("Starting instance %s on node %s",
1798 instance.name, target_node)
1800 disks_ok, _ = AssembleInstanceDisks(self, instance,
1801 ignore_secondaries=True)
1803 ShutdownInstanceDisks(self, instance)
1804 raise errors.OpExecError("Can't activate the instance's disks")
1806 result = self.rpc.call_instance_start(target_node,
1807 (instance, None, None), False,
1809 msg = result.fail_msg
1811 ShutdownInstanceDisks(self, instance)
1812 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1813 (instance.name, target_node, msg))
1816 class LUInstanceMultiAlloc(NoHooksLU):
1817 """Allocates multiple instances at the same time.
1822 def CheckArguments(self):
1827 for inst in self.op.instances:
1828 if inst.iallocator is not None:
1829 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1830 " instance objects", errors.ECODE_INVAL)
1831 nodes.append(bool(inst.pnode))
1832 if inst.disk_template in constants.DTS_INT_MIRROR:
1833 nodes.append(bool(inst.snode))
1835 has_nodes = compat.any(nodes)
1836 if compat.all(nodes) ^ has_nodes:
1837 raise errors.OpPrereqError("There are instance objects providing"
1838 " pnode/snode while others do not",
1841 if not has_nodes and self.op.iallocator is None:
1842 default_iallocator = self.cfg.GetDefaultIAllocator()
1843 if default_iallocator:
1844 self.op.iallocator = default_iallocator
1846 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1847 " given and no cluster-wide default"
1848 " iallocator found; please specify either"
1849 " an iallocator or nodes on the instances"
1850 " or set a cluster-wide default iallocator",
1853 _CheckOpportunisticLocking(self.op)
1855 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1857 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1858 utils.CommaJoin(dups), errors.ECODE_INVAL)
1860 def ExpandNames(self):
1861 """Calculate the locks.
1864 self.share_locks = ShareAll()
1865 self.needed_locks = {
1866 # iallocator will select nodes and even if no iallocator is used,
1867 # collisions with LUInstanceCreate should be avoided
1868 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1871 if self.op.iallocator:
1872 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1873 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1875 if self.op.opportunistic_locking:
1876 self.opportunistic_locks[locking.LEVEL_NODE] = True
1877 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1880 for inst in self.op.instances:
1881 inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1882 nodeslist.append(inst.pnode)
1883 if inst.snode is not None:
1884 inst.snode = ExpandNodeName(self.cfg, inst.snode)
1885 nodeslist.append(inst.snode)
1887 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1888 # Lock resources of instance's primary and secondary nodes (copy to
1889 # prevent accidential modification)
1890 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1892 def CheckPrereq(self):
1893 """Check prerequisite.
1896 if self.op.iallocator:
1897 cluster = self.cfg.GetClusterInfo()
1898 default_vg = self.cfg.GetVGName()
1899 ec_id = self.proc.GetECId()
1901 if self.op.opportunistic_locking:
1902 # Only consider nodes for which a lock is held
1903 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1905 node_whitelist = None
1907 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1908 _ComputeNics(op, cluster, None,
1910 _ComputeFullBeParams(op, cluster),
1912 for op in self.op.instances]
1914 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1915 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1917 ial.Run(self.op.iallocator)
1920 raise errors.OpPrereqError("Can't compute nodes using"
1921 " iallocator '%s': %s" %
1922 (self.op.iallocator, ial.info),
1925 self.ia_result = ial.result
1928 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1929 constants.JOB_IDS_KEY: [],
1932 def _ConstructPartialResult(self):
1933 """Contructs the partial result.
1936 if self.op.iallocator:
1937 (allocatable, failed_insts) = self.ia_result
1938 allocatable_insts = map(compat.fst, allocatable)
1940 allocatable_insts = [op.instance_name for op in self.op.instances]
1944 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
1945 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
1948 def Exec(self, feedback_fn):
1949 """Executes the opcode.
1953 if self.op.iallocator:
1954 op2inst = dict((op.instance_name, op) for op in self.op.instances)
1955 (allocatable, failed) = self.ia_result
1957 for (name, nodes) in allocatable:
1958 op = op2inst.pop(name)
1961 (op.pnode, op.snode) = nodes
1967 missing = set(op2inst.keys()) - set(failed)
1968 assert not missing, \
1969 "Iallocator did return incomplete result: %s" % \
1970 utils.CommaJoin(missing)
1972 jobs.extend([op] for op in self.op.instances)
1974 return ResultWithJobs(jobs, **self._ConstructPartialResult())
1977 class _InstNicModPrivate:
1978 """Data structure for network interface modifications.
1980 Used by L{LUInstanceSetParams}.
1988 def _PrepareContainerMods(mods, private_fn):
1989 """Prepares a list of container modifications by adding a private data field.
1991 @type mods: list of tuples; (operation, index, parameters)
1992 @param mods: List of modifications
1993 @type private_fn: callable or None
1994 @param private_fn: Callable for constructing a private data field for a
1999 if private_fn is None:
2004 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2007 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
2008 """Checks if nodes have enough physical CPUs
2010 This function checks if all given nodes have the needed number of
2011 physical CPUs. In case any node has less CPUs or we cannot get the
2012 information from the node, this function raises an OpPrereqError
2015 @type lu: C{LogicalUnit}
2016 @param lu: a logical unit from which we get configuration data
2017 @type nodenames: C{list}
2018 @param nodenames: the list of node names to check
2019 @type requested: C{int}
2020 @param requested: the minimum acceptable number of physical CPUs
2021 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2022 or we cannot check the node
2025 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2026 for node in nodenames:
2027 info = nodeinfo[node]
2028 info.Raise("Cannot get current information from node %s" % node,
2029 prereq=True, ecode=errors.ECODE_ENVIRON)
2030 (_, _, (hv_info, )) = info.payload
2031 num_cpus = hv_info.get("cpu_total", None)
2032 if not isinstance(num_cpus, int):
2033 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2034 " on node %s, result was '%s'" %
2035 (node, num_cpus), errors.ECODE_ENVIRON)
2036 if requested > num_cpus:
2037 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2038 "required" % (node, num_cpus, requested),
2042 def GetItemFromContainer(identifier, kind, container):
2043 """Return the item refered by the identifier.
2045 @type identifier: string
2046 @param identifier: Item index or name or UUID
2048 @param kind: One-word item description
2049 @type container: list
2050 @param container: Container to get the item from
2055 idx = int(identifier)
2058 absidx = len(container) - 1
2060 raise IndexError("Not accepting negative indices other than -1")
2061 elif idx > len(container):
2062 raise IndexError("Got %s index %s, but there are only %s" %
2063 (kind, idx, len(container)))
2066 return (absidx, container[idx])
2070 for idx, item in enumerate(container):
2071 if item.uuid == identifier or item.name == identifier:
2074 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2075 (kind, identifier), errors.ECODE_NOENT)
2078 def _ApplyContainerMods(kind, container, chgdesc, mods,
2079 create_fn, modify_fn, remove_fn):
2080 """Applies descriptions in C{mods} to C{container}.
2083 @param kind: One-word item description
2084 @type container: list
2085 @param container: Container to modify
2086 @type chgdesc: None or list
2087 @param chgdesc: List of applied changes
2089 @param mods: Modifications as returned by L{_PrepareContainerMods}
2090 @type create_fn: callable
2091 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2092 receives absolute item index, parameters and private data object as added
2093 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2095 @type modify_fn: callable
2096 @param modify_fn: Callback for modifying an existing item
2097 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2098 and private data object as added by L{_PrepareContainerMods}, returns
2100 @type remove_fn: callable
2101 @param remove_fn: Callback on removing item; receives absolute item index,
2102 item and private data object as added by L{_PrepareContainerMods}
2105 for (op, identifier, params, private) in mods:
2108 if op == constants.DDM_ADD:
2109 # Calculate where item will be added
2110 # When adding an item, identifier can only be an index
2112 idx = int(identifier)
2114 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2115 " identifier for %s" % constants.DDM_ADD,
2118 addidx = len(container)
2121 raise IndexError("Not accepting negative indices other than -1")
2122 elif idx > len(container):
2123 raise IndexError("Got %s index %s, but there are only %s" %
2124 (kind, idx, len(container)))
2127 if create_fn is None:
2130 (item, changes) = create_fn(addidx, params, private)
2133 container.append(item)
2136 assert idx <= len(container)
2137 # list.insert does so before the specified index
2138 container.insert(idx, item)
2140 # Retrieve existing item
2141 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2143 if op == constants.DDM_REMOVE:
2146 if remove_fn is not None:
2147 remove_fn(absidx, item, private)
2149 changes = [("%s/%s" % (kind, absidx), "remove")]
2151 assert container[absidx] == item
2152 del container[absidx]
2153 elif op == constants.DDM_MODIFY:
2154 if modify_fn is not None:
2155 changes = modify_fn(absidx, item, params, private)
2157 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2159 assert _TApplyContModsCbChanges(changes)
2161 if not (chgdesc is None or changes is None):
2162 chgdesc.extend(changes)
2165 def _UpdateIvNames(base_index, disks):
2166 """Updates the C{iv_name} attribute of disks.
2168 @type disks: list of L{objects.Disk}
2171 for (idx, disk) in enumerate(disks):
2172 disk.iv_name = "disk/%s" % (base_index + idx, )
2175 class LUInstanceSetParams(LogicalUnit):
2176 """Modifies an instances's parameters.
2179 HPATH = "instance-modify"
2180 HTYPE = constants.HTYPE_INSTANCE
2184 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2185 assert ht.TList(mods)
2186 assert not mods or len(mods[0]) in (2, 3)
2188 if mods and len(mods[0]) == 2:
2192 for op, params in mods:
2193 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2194 result.append((op, -1, params))
2198 raise errors.OpPrereqError("Only one %s add or remove operation is"
2199 " supported at a time" % kind,
2202 result.append((constants.DDM_MODIFY, op, params))
2204 assert verify_fn(result)
2211 def _CheckMods(kind, mods, key_types, item_fn):
2212 """Ensures requested disk/NIC modifications are valid.
2215 for (op, _, params) in mods:
2216 assert ht.TDict(params)
2218 # If 'key_types' is an empty dict, we assume we have an
2219 # 'ext' template and thus do not ForceDictType
2221 utils.ForceDictType(params, key_types)
2223 if op == constants.DDM_REMOVE:
2225 raise errors.OpPrereqError("No settings should be passed when"
2226 " removing a %s" % kind,
2228 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2231 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2234 def _VerifyDiskModification(op, params):
2235 """Verifies a disk modification.
2238 if op == constants.DDM_ADD:
2239 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2240 if mode not in constants.DISK_ACCESS_SET:
2241 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2244 size = params.get(constants.IDISK_SIZE, None)
2246 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2247 constants.IDISK_SIZE, errors.ECODE_INVAL)
2251 except (TypeError, ValueError), err:
2252 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2255 params[constants.IDISK_SIZE] = size
2256 name = params.get(constants.IDISK_NAME, None)
2257 if name is not None and name.lower() == constants.VALUE_NONE:
2258 params[constants.IDISK_NAME] = None
2260 elif op == constants.DDM_MODIFY:
2261 if constants.IDISK_SIZE in params:
2262 raise errors.OpPrereqError("Disk size change not possible, use"
2263 " grow-disk", errors.ECODE_INVAL)
2265 raise errors.OpPrereqError("Disk modification doesn't support"
2266 " additional arbitrary parameters",
2268 name = params.get(constants.IDISK_NAME, None)
2269 if name is not None and name.lower() == constants.VALUE_NONE:
2270 params[constants.IDISK_NAME] = None
2273 def _VerifyNicModification(op, params):
2274 """Verifies a network interface modification.
2277 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2278 ip = params.get(constants.INIC_IP, None)
2279 name = params.get(constants.INIC_NAME, None)
2280 req_net = params.get(constants.INIC_NETWORK, None)
2281 link = params.get(constants.NIC_LINK, None)
2282 mode = params.get(constants.NIC_MODE, None)
2283 if name is not None and name.lower() == constants.VALUE_NONE:
2284 params[constants.INIC_NAME] = None
2285 if req_net is not None:
2286 if req_net.lower() == constants.VALUE_NONE:
2287 params[constants.INIC_NETWORK] = None
2289 elif link is not None or mode is not None:
2290 raise errors.OpPrereqError("If network is given"
2291 " mode or link should not",
2294 if op == constants.DDM_ADD:
2295 macaddr = params.get(constants.INIC_MAC, None)
2297 params[constants.INIC_MAC] = constants.VALUE_AUTO
2300 if ip.lower() == constants.VALUE_NONE:
2301 params[constants.INIC_IP] = None
2303 if ip.lower() == constants.NIC_IP_POOL:
2304 if op == constants.DDM_ADD and req_net is None:
2305 raise errors.OpPrereqError("If ip=pool, parameter network"
2309 if not netutils.IPAddress.IsValid(ip):
2310 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2313 if constants.INIC_MAC in params:
2314 macaddr = params[constants.INIC_MAC]
2315 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2316 macaddr = utils.NormalizeAndValidateMac(macaddr)
2318 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2319 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2320 " modifying an existing NIC",
2323 def CheckArguments(self):
2324 if not (self.op.nics or self.op.disks or self.op.disk_template or
2325 self.op.hvparams or self.op.beparams or self.op.os_name or
2326 self.op.osparams or self.op.offline is not None or
2327 self.op.runtime_mem or self.op.pnode):
2328 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2330 if self.op.hvparams:
2331 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2332 "hypervisor", "instance", "cluster")
2334 self.op.disks = self._UpgradeDiskNicMods(
2335 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2336 self.op.nics = self._UpgradeDiskNicMods(
2337 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2339 if self.op.disks and self.op.disk_template is not None:
2340 raise errors.OpPrereqError("Disk template conversion and other disk"
2341 " changes not supported at the same time",
2344 if (self.op.disk_template and
2345 self.op.disk_template in constants.DTS_INT_MIRROR and
2346 self.op.remote_node is None):
2347 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2348 " one requires specifying a secondary node",
2351 # Check NIC modifications
2352 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2353 self._VerifyNicModification)
2356 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2358 def ExpandNames(self):
2359 self._ExpandAndLockInstance()
2360 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2361 # Can't even acquire node locks in shared mode as upcoming changes in
2362 # Ganeti 2.6 will start to modify the node object on disk conversion
2363 self.needed_locks[locking.LEVEL_NODE] = []
2364 self.needed_locks[locking.LEVEL_NODE_RES] = []
2365 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2366 # Look node group to look up the ipolicy
2367 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2369 def DeclareLocks(self, level):
2370 if level == locking.LEVEL_NODEGROUP:
2371 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2372 # Acquire locks for the instance's nodegroups optimistically. Needs
2373 # to be verified in CheckPrereq
2374 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2375 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2376 elif level == locking.LEVEL_NODE:
2377 self._LockInstancesNodes()
2378 if self.op.disk_template and self.op.remote_node:
2379 self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2380 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2381 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2383 self.needed_locks[locking.LEVEL_NODE_RES] = \
2384 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2386 def BuildHooksEnv(self):
2389 This runs on the master, primary and secondaries.
2393 if constants.BE_MINMEM in self.be_new:
2394 args["minmem"] = self.be_new[constants.BE_MINMEM]
2395 if constants.BE_MAXMEM in self.be_new:
2396 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2397 if constants.BE_VCPUS in self.be_new:
2398 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2399 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2400 # information at all.
2402 if self._new_nics is not None:
2405 for nic in self._new_nics:
2406 n = copy.deepcopy(nic)
2407 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2408 n.nicparams = nicparams
2409 nics.append(NICToTuple(self, n))
2413 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2414 if self.op.disk_template:
2415 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2416 if self.op.runtime_mem:
2417 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2421 def BuildHooksNodes(self):
2422 """Build hooks nodes.
2425 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2428 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2429 old_params, cluster, pnode):
2431 update_params_dict = dict([(key, params[key])
2432 for key in constants.NICS_PARAMETERS
2435 req_link = update_params_dict.get(constants.NIC_LINK, None)
2436 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2439 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2440 if new_net_uuid_or_name:
2441 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2442 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2445 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2448 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2450 raise errors.OpPrereqError("No netparams found for the network"
2451 " %s, probably not connected" %
2452 new_net_obj.name, errors.ECODE_INVAL)
2453 new_params = dict(netparams)
2455 new_params = GetUpdatedParams(old_params, update_params_dict)
2457 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2459 new_filled_params = cluster.SimpleFillNIC(new_params)
2460 objects.NIC.CheckParameterSyntax(new_filled_params)
2462 new_mode = new_filled_params[constants.NIC_MODE]
2463 if new_mode == constants.NIC_MODE_BRIDGED:
2464 bridge = new_filled_params[constants.NIC_LINK]
2465 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2467 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2469 self.warn.append(msg)
2471 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2473 elif new_mode == constants.NIC_MODE_ROUTED:
2474 ip = params.get(constants.INIC_IP, old_ip)
2476 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2477 " on a routed NIC", errors.ECODE_INVAL)
2479 elif new_mode == constants.NIC_MODE_OVS:
2480 # TODO: check OVS link
2481 self.LogInfo("OVS links are currently not checked for correctness")
2483 if constants.INIC_MAC in params:
2484 mac = params[constants.INIC_MAC]
2486 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2488 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2489 # otherwise generate the MAC address
2490 params[constants.INIC_MAC] = \
2491 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2493 # or validate/reserve the current one
2495 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2496 except errors.ReservationError:
2497 raise errors.OpPrereqError("MAC address '%s' already in use"
2498 " in cluster" % mac,
2499 errors.ECODE_NOTUNIQUE)
2500 elif new_net_uuid != old_net_uuid:
2502 def get_net_prefix(net_uuid):
2505 nobj = self.cfg.GetNetwork(net_uuid)
2506 mac_prefix = nobj.mac_prefix
2510 new_prefix = get_net_prefix(new_net_uuid)
2511 old_prefix = get_net_prefix(old_net_uuid)
2512 if old_prefix != new_prefix:
2513 params[constants.INIC_MAC] = \
2514 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2516 # if there is a change in (ip, network) tuple
2517 new_ip = params.get(constants.INIC_IP, old_ip)
2518 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2520 # if IP is pool then require a network and generate one IP
2521 if new_ip.lower() == constants.NIC_IP_POOL:
2524 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2525 except errors.ReservationError:
2526 raise errors.OpPrereqError("Unable to get a free IP"
2527 " from the address pool",
2529 self.LogInfo("Chose IP %s from network %s",
2532 params[constants.INIC_IP] = new_ip
2534 raise errors.OpPrereqError("ip=pool, but no network found",
2536 # Reserve new IP if in the new network if any
2539 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2540 check=self.op.conflicts_check)
2541 self.LogInfo("Reserving IP %s in network %s",
2542 new_ip, new_net_obj.name)
2543 except errors.ReservationError:
2544 raise errors.OpPrereqError("IP %s not available in network %s" %
2545 (new_ip, new_net_obj.name),
2546 errors.ECODE_NOTUNIQUE)
2547 # new network is None so check if new IP is a conflicting IP
2548 elif self.op.conflicts_check:
2549 _CheckForConflictingIp(self, new_ip, pnode)
2551 # release old IP if old network is not None
2552 if old_ip and old_net_uuid:
2554 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2555 except errors.AddressPoolError:
2556 logging.warning("Release IP %s not contained in network %s",
2557 old_ip, old_net_obj.name)
2559 # there are no changes in (ip, network) tuple and old network is not None
2560 elif (old_net_uuid is not None and
2561 (req_link is not None or req_mode is not None)):
2562 raise errors.OpPrereqError("Not allowed to change link or mode of"
2563 " a NIC that is connected to a network",
2566 private.params = new_params
2567 private.filled = new_filled_params
2569 def _PreCheckDiskTemplate(self, pnode_info):
2570 """CheckPrereq checks related to a new disk template."""
2571 # Arguments are passed to avoid configuration lookups
2572 instance = self.instance
2573 pnode = instance.primary_node
2574 cluster = self.cluster
2575 if instance.disk_template == self.op.disk_template:
2576 raise errors.OpPrereqError("Instance already has disk template %s" %
2577 instance.disk_template, errors.ECODE_INVAL)
2579 if (instance.disk_template,
2580 self.op.disk_template) not in self._DISK_CONVERSIONS:
2581 raise errors.OpPrereqError("Unsupported disk template conversion from"
2582 " %s to %s" % (instance.disk_template,
2583 self.op.disk_template),
2585 CheckInstanceState(self, instance, INSTANCE_DOWN,
2586 msg="cannot change disk template")
2587 if self.op.disk_template in constants.DTS_INT_MIRROR:
2588 if self.op.remote_node == pnode:
2589 raise errors.OpPrereqError("Given new secondary node %s is the same"
2590 " as the primary node of the instance" %
2591 self.op.remote_node, errors.ECODE_STATE)
2592 CheckNodeOnline(self, self.op.remote_node)
2593 CheckNodeNotDrained(self, self.op.remote_node)
2594 # FIXME: here we assume that the old instance type is DT_PLAIN
2595 assert instance.disk_template == constants.DT_PLAIN
2596 disks = [{constants.IDISK_SIZE: d.size,
2597 constants.IDISK_VG: d.logical_id[0]}
2598 for d in instance.disks]
2599 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2600 CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2602 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2603 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2604 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2606 CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2607 ignore=self.op.ignore_ipolicy)
2608 if pnode_info.group != snode_info.group:
2609 self.LogWarning("The primary and secondary nodes are in two"
2610 " different node groups; the disk parameters"
2611 " from the first disk's node group will be"
2614 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2615 # Make sure none of the nodes require exclusive storage
2616 nodes = [pnode_info]
2617 if self.op.disk_template in constants.DTS_INT_MIRROR:
2619 nodes.append(snode_info)
2620 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2621 if compat.any(map(has_es, nodes)):
2622 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2623 " storage is enabled" % (instance.disk_template,
2624 self.op.disk_template))
2625 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2627 def CheckPrereq(self): # pylint: disable=R0914
2628 """Check prerequisites.
2630 This only checks the instance list against the existing names.
2633 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2634 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2636 cluster = self.cluster = self.cfg.GetClusterInfo()
2637 assert self.instance is not None, \
2638 "Cannot retrieve locked instance %s" % self.op.instance_name
2640 pnode = instance.primary_node
2644 if (self.op.pnode is not None and self.op.pnode != pnode and
2646 # verify that the instance is not up
2647 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2648 instance.hypervisor)
2649 if instance_info.fail_msg:
2650 self.warn.append("Can't get instance runtime information: %s" %
2651 instance_info.fail_msg)
2652 elif instance_info.payload:
2653 raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2656 assert pnode in self.owned_locks(locking.LEVEL_NODE)
2657 nodelist = list(instance.all_nodes)
2658 pnode_info = self.cfg.GetNodeInfo(pnode)
2659 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2661 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2662 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2663 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2665 # dictionary with instance information after the modification
2668 if self.op.hotplug or self.op.hotplug_if_possible:
2669 result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2673 result.Raise("Hotplug is not possible: %s" % result.fail_msg,
2676 self.LogWarning(result.fail_msg)
2677 self.op.hotplug = False
2678 self.LogInfo("Modification will take place without hotplugging.")
2680 self.op.hotplug = True
2682 # Check disk modifications. This is done here and not in CheckArguments
2683 # (as with NICs), because we need to know the instance's disk template
2684 if instance.disk_template == constants.DT_EXT:
2685 self._CheckMods("disk", self.op.disks, {},
2686 self._VerifyDiskModification)
2688 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2689 self._VerifyDiskModification)
2691 # Prepare disk/NIC modifications
2692 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2693 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2695 # Check the validity of the `provider' parameter
2696 if instance.disk_template in constants.DT_EXT:
2697 for mod in self.diskmod:
2698 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2699 if mod[0] == constants.DDM_ADD:
2700 if ext_provider is None:
2701 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2702 " '%s' missing, during disk add" %
2704 constants.IDISK_PROVIDER),
2706 elif mod[0] == constants.DDM_MODIFY:
2708 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2710 constants.IDISK_PROVIDER,
2713 for mod in self.diskmod:
2714 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2715 if ext_provider is not None:
2716 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2717 " instances of type '%s'" %
2718 (constants.IDISK_PROVIDER,
2723 if self.op.os_name and not self.op.force:
2724 CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2725 self.op.force_variant)
2726 instance_os = self.op.os_name
2728 instance_os = instance.os
2730 assert not (self.op.disk_template and self.op.disks), \
2731 "Can't modify disk template and apply disk changes at the same time"
2733 if self.op.disk_template:
2734 self._PreCheckDiskTemplate(pnode_info)
2736 # hvparams processing
2737 if self.op.hvparams:
2738 hv_type = instance.hypervisor
2739 i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2740 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2741 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2744 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2745 CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2746 self.hv_proposed = self.hv_new = hv_new # the new actual values
2747 self.hv_inst = i_hvdict # the new dict (without defaults)
2749 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2751 self.hv_new = self.hv_inst = {}
2753 # beparams processing
2754 if self.op.beparams:
2755 i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2757 objects.UpgradeBeParams(i_bedict)
2758 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2759 be_new = cluster.SimpleFillBE(i_bedict)
2760 self.be_proposed = self.be_new = be_new # the new actual values
2761 self.be_inst = i_bedict # the new dict (without defaults)
2763 self.be_new = self.be_inst = {}
2764 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2765 be_old = cluster.FillBE(instance)
2767 # CPU param validation -- checking every time a parameter is
2768 # changed to cover all cases where either CPU mask or vcpus have
2770 if (constants.BE_VCPUS in self.be_proposed and
2771 constants.HV_CPU_MASK in self.hv_proposed):
2773 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2774 # Verify mask is consistent with number of vCPUs. Can skip this
2775 # test if only 1 entry in the CPU mask, which means same mask
2776 # is applied to all vCPUs.
2777 if (len(cpu_list) > 1 and
2778 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2779 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2781 (self.be_proposed[constants.BE_VCPUS],
2782 self.hv_proposed[constants.HV_CPU_MASK]),
2785 # Only perform this test if a new CPU mask is given
2786 if constants.HV_CPU_MASK in self.hv_new:
2787 # Calculate the largest CPU number requested
2788 max_requested_cpu = max(map(max, cpu_list))
2789 # Check that all of the instance's nodes have enough physical CPUs to
2790 # satisfy the requested CPU mask
2791 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2792 max_requested_cpu + 1, instance.hypervisor)
2794 # osparams processing
2795 if self.op.osparams:
2796 i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2797 CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2798 self.os_inst = i_osdict # the new dict (without defaults)
2802 #TODO(dynmem): do the appropriate check involving MINMEM
2803 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2804 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2805 mem_check_list = [pnode]
2806 if be_new[constants.BE_AUTO_BALANCE]:
2807 # either we changed auto_balance to yes or it was from before
2808 mem_check_list.extend(instance.secondary_nodes)
2809 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2810 instance.hypervisor)
2811 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2812 [instance.hypervisor], False)
2813 pninfo = nodeinfo[pnode]
2814 msg = pninfo.fail_msg
2816 # Assume the primary node is unreachable and go ahead
2817 self.warn.append("Can't get info from primary node %s: %s" %
2820 (_, _, (pnhvinfo, )) = pninfo.payload
2821 if not isinstance(pnhvinfo.get("memory_free", None), int):
2822 self.warn.append("Node data from primary node %s doesn't contain"
2823 " free memory information" % pnode)
2824 elif instance_info.fail_msg:
2825 self.warn.append("Can't get instance runtime information: %s" %
2826 instance_info.fail_msg)
2828 if instance_info.payload:
2829 current_mem = int(instance_info.payload["memory"])
2831 # Assume instance not running
2832 # (there is a slight race condition here, but it's not very
2833 # probable, and we have no other way to check)
2834 # TODO: Describe race condition
2836 #TODO(dynmem): do the appropriate check involving MINMEM
2837 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2838 pnhvinfo["memory_free"])
2840 raise errors.OpPrereqError("This change will prevent the instance"
2841 " from starting, due to %d MB of memory"
2842 " missing on its primary node" %
2843 miss_mem, errors.ECODE_NORES)
2845 if be_new[constants.BE_AUTO_BALANCE]:
2846 for node, nres in nodeinfo.items():
2847 if node not in instance.secondary_nodes:
2849 nres.Raise("Can't get info from secondary node %s" % node,
2850 prereq=True, ecode=errors.ECODE_STATE)
2851 (_, _, (nhvinfo, )) = nres.payload
2852 if not isinstance(nhvinfo.get("memory_free", None), int):
2853 raise errors.OpPrereqError("Secondary node %s didn't return free"
2854 " memory information" % node,
2856 #TODO(dynmem): do the appropriate check involving MINMEM
2857 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2858 raise errors.OpPrereqError("This change will prevent the instance"
2859 " from failover to its secondary node"
2860 " %s, due to not enough memory" % node,
2863 if self.op.runtime_mem:
2864 remote_info = self.rpc.call_instance_info(instance.primary_node,
2866 instance.hypervisor)
2867 remote_info.Raise("Error checking node %s" % instance.primary_node)
2868 if not remote_info.payload: # not running already
2869 raise errors.OpPrereqError("Instance %s is not running" %
2870 instance.name, errors.ECODE_STATE)
2872 current_memory = remote_info.payload["memory"]
2873 if (not self.op.force and
2874 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2875 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2876 raise errors.OpPrereqError("Instance %s must have memory between %d"
2877 " and %d MB of memory unless --force is"
2880 self.be_proposed[constants.BE_MINMEM],
2881 self.be_proposed[constants.BE_MAXMEM]),
2884 delta = self.op.runtime_mem - current_memory
2886 CheckNodeFreeMemory(self, instance.primary_node,
2887 "ballooning memory for instance %s" %
2888 instance.name, delta, instance.hypervisor)
2890 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2891 raise errors.OpPrereqError("Disk operations not supported for"
2892 " diskless instances", errors.ECODE_INVAL)
2894 def _PrepareNicCreate(_, params, private):
2895 self._PrepareNicModification(params, private, None, None,
2899 def _PrepareNicMod(_, nic, params, private):
2900 self._PrepareNicModification(params, private, nic.ip, nic.network,
2901 nic.nicparams, cluster, pnode)
2904 def _PrepareNicRemove(_, params, __):
2906 net = params.network
2907 if net is not None and ip is not None:
2908 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2910 # Verify NIC changes (operating on copy)
2911 nics = instance.nics[:]
2912 _ApplyContainerMods("NIC", nics, None, self.nicmod,
2913 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2914 if len(nics) > constants.MAX_NICS:
2915 raise errors.OpPrereqError("Instance has too many network interfaces"
2916 " (%d), cannot add more" % constants.MAX_NICS,
2919 def _PrepareDiskMod(_, disk, params, __):
2920 disk.name = params.get(constants.IDISK_NAME, None)
2922 # Verify disk changes (operating on a copy)
2923 disks = copy.deepcopy(instance.disks)
2924 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2925 _PrepareDiskMod, None)
2926 utils.ValidateDeviceNames("disk", disks)
2927 if len(disks) > constants.MAX_DISKS:
2928 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2929 " more" % constants.MAX_DISKS,
2931 disk_sizes = [disk.size for disk in instance.disks]
2932 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2933 self.diskmod if op == constants.DDM_ADD)
2934 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2935 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2937 if self.op.offline is not None and self.op.offline:
2938 CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2939 msg="can't change to offline")
2941 # Pre-compute NIC changes (necessary to use result in hooks)
2942 self._nic_chgdesc = []
2944 # Operate on copies as this is still in prereq
2945 nics = [nic.Copy() for nic in instance.nics]
2946 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2947 self._CreateNewNic, self._ApplyNicMods,
2949 # Verify that NIC names are unique and valid
2950 utils.ValidateDeviceNames("NIC", nics)
2951 self._new_nics = nics
2952 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2954 self._new_nics = None
2955 ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2957 if not self.op.ignore_ipolicy:
2958 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2961 # Fill ispec with backend parameters
2962 ispec[constants.ISPEC_SPINDLE_USE] = \
2963 self.be_new.get(constants.BE_SPINDLE_USE, None)
2964 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2967 # Copy ispec to verify parameters with min/max values separately
2968 if self.op.disk_template:
2969 new_disk_template = self.op.disk_template
2971 new_disk_template = instance.disk_template
2972 ispec_max = ispec.copy()
2973 ispec_max[constants.ISPEC_MEM_SIZE] = \
2974 self.be_new.get(constants.BE_MAXMEM, None)
2975 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2977 ispec_min = ispec.copy()
2978 ispec_min[constants.ISPEC_MEM_SIZE] = \
2979 self.be_new.get(constants.BE_MINMEM, None)
2980 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2983 if (res_max or res_min):
2984 # FIXME: Improve error message by including information about whether
2985 # the upper or lower limit of the parameter fails the ipolicy.
2986 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
2987 (group_info, group_info.name,
2988 utils.CommaJoin(set(res_max + res_min))))
2989 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2991 def _ConvertPlainToDrbd(self, feedback_fn):
2992 """Converts an instance from plain to drbd.
2995 feedback_fn("Converting template to drbd")
2996 instance = self.instance
2997 pnode = instance.primary_node
2998 snode = self.op.remote_node
3000 assert instance.disk_template == constants.DT_PLAIN
3002 # create a fake disk info for _GenerateDiskTemplate
3003 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3004 constants.IDISK_VG: d.logical_id[0],
3005 constants.IDISK_NAME: d.name}
3006 for d in instance.disks]
3007 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3008 instance.name, pnode, [snode],
3009 disk_info, None, None, 0, feedback_fn,
3011 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3013 p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3014 s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3015 info = GetInstanceInfoText(instance)
3016 feedback_fn("Creating additional volumes...")
3017 # first, create the missing data and meta devices
3018 for disk in anno_disks:
3019 # unfortunately this is... not too nice
3020 CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3021 info, True, p_excl_stor)
3022 for child in disk.children:
3023 CreateSingleBlockDev(self, snode, instance, child, info, True,
3025 # at this stage, all new LVs have been created, we can rename the
3027 feedback_fn("Renaming original volumes...")
3028 rename_list = [(o, n.children[0].logical_id)
3029 for (o, n) in zip(instance.disks, new_disks)]
3030 result = self.rpc.call_blockdev_rename(pnode, rename_list)
3031 result.Raise("Failed to rename original LVs")
3033 feedback_fn("Initializing DRBD devices...")
3034 # all child devices are in place, we can now create the DRBD devices
3036 for disk in anno_disks:
3037 for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3038 f_create = node == pnode
3039 CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3041 except errors.GenericError, e:
3042 feedback_fn("Initializing of DRBD devices failed;"
3043 " renaming back original volumes...")
3044 for disk in new_disks:
3045 self.cfg.SetDiskID(disk, pnode)
3046 rename_back_list = [(n.children[0], o.logical_id)
3047 for (n, o) in zip(new_disks, instance.disks)]
3048 result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3049 result.Raise("Failed to rename LVs back after error %s" % str(e))
3052 # at this point, the instance has been modified
3053 instance.disk_template = constants.DT_DRBD8
3054 instance.disks = new_disks
3055 self.cfg.Update(instance, feedback_fn)
3057 # Release node locks while waiting for sync
3058 ReleaseLocks(self, locking.LEVEL_NODE)
3060 # disks are created, waiting for sync
3061 disk_abort = not WaitForSync(self, instance,
3062 oneshot=not self.op.wait_for_sync)
3064 raise errors.OpExecError("There are some degraded disks for"
3065 " this instance, please cleanup manually")
3067 # Node resource locks will be released by caller
3069 def _ConvertDrbdToPlain(self, feedback_fn):
3070 """Converts an instance from drbd to plain.
3073 instance = self.instance
3075 assert len(instance.secondary_nodes) == 1
3076 assert instance.disk_template == constants.DT_DRBD8
3078 pnode = instance.primary_node
3079 snode = instance.secondary_nodes[0]
3080 feedback_fn("Converting template to plain")
3082 old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3083 new_disks = [d.children[0] for d in instance.disks]
3085 # copy over size, mode and name
3086 for parent, child in zip(old_disks, new_disks):
3087 child.size = parent.size
3088 child.mode = parent.mode
3089 child.name = parent.name
3091 # this is a DRBD disk, return its port to the pool
3092 # NOTE: this must be done right before the call to cfg.Update!
3093 for disk in old_disks:
3094 tcp_port = disk.logical_id[2]
3095 self.cfg.AddTcpUdpPort(tcp_port)
3097 # update instance structure
3098 instance.disks = new_disks
3099 instance.disk_template = constants.DT_PLAIN
3100 _UpdateIvNames(0, instance.disks)
3101 self.cfg.Update(instance, feedback_fn)
3103 # Release locks in case removing disks takes a while
3104 ReleaseLocks(self, locking.LEVEL_NODE)
3106 feedback_fn("Removing volumes on the secondary node...")
3107 for disk in old_disks:
3108 self.cfg.SetDiskID(disk, snode)
3109 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3111 self.LogWarning("Could not remove block device %s on node %s,"
3112 " continuing anyway: %s", disk.iv_name, snode, msg)
3114 feedback_fn("Removing unneeded volumes on the primary node...")
3115 for idx, disk in enumerate(old_disks):
3116 meta = disk.children[1]
3117 self.cfg.SetDiskID(meta, pnode)
3118 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3120 self.LogWarning("Could not remove metadata for disk %d on node %s,"
3121 " continuing anyway: %s", idx, pnode, msg)
3123 def _HotplugDevice(self, action, dev_type, device, extra, seq):
3124 self.LogInfo("Trying to hotplug device...")
3125 result = self.rpc.call_hotplug_device(self.instance.primary_node,
3126 self.instance, action, dev_type,
3127 (device, self.instance),
3130 self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3131 self.LogInfo("Continuing execution..")
3133 self.LogInfo("Hotplug done.")
3135 def _CreateNewDisk(self, idx, params, _):
3136 """Creates a new disk.
3139 instance = self.instance
3142 if instance.disk_template in constants.DTS_FILEBASED:
3143 (file_driver, file_path) = instance.disks[0].logical_id
3144 file_path = os.path.dirname(file_path)
3146 file_driver = file_path = None
3149 GenerateDiskTemplate(self, instance.disk_template, instance.name,
3150 instance.primary_node, instance.secondary_nodes,
3151 [params], file_path, file_driver, idx,
3152 self.Log, self.diskparams)[0]
3154 new_disks = CreateDisks(self, instance, disks=[disk])
3156 if self.cluster.prealloc_wipe_disks:
3158 WipeOrCleanupDisks(self, instance,
3159 disks=[(idx, disk, 0)],
3163 # _, device_info = AssembleInstanceDisks(self, self.instance,
3164 # [disk], check=False)
3165 self.cfg.SetDiskID(disk, self.instance.primary_node)
3166 result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3167 (disk, self.instance),
3168 self.instance.name, True, idx)
3170 self.LogWarning("Can't assemble newly created disk %d: %s",
3171 idx, result.fail_msg)
3173 # _, _, dev_path = device_info[0]
3174 _, link_name = result.payload
3175 self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3176 constants.HOTPLUG_TARGET_DISK,
3177 disk, link_name, idx)
3180 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3184 def _ModifyDisk(idx, disk, params, _):
3189 mode = params.get(constants.IDISK_MODE, None)
3192 changes.append(("disk.mode/%d" % idx, disk.mode))
3194 name = params.get(constants.IDISK_NAME, None)
3196 changes.append(("disk.name/%d" % idx, disk.name))
3200 def _RemoveDisk(self, idx, root, _):
3205 self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3206 constants.HOTPLUG_TARGET_DISK,
3208 ShutdownInstanceDisks(self, self.instance, [root])
3210 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3211 for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3212 if self.op.keep_disks and disk.dev_type in constants.DT_EXT:
3214 self.cfg.SetDiskID(disk, node)
3215 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3217 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3218 " continuing anyway", idx, node, msg)
3220 # if this is a DRBD disk, return its port to the pool
3221 if root.dev_type in constants.LDS_DRBD:
3222 self.cfg.AddTcpUdpPort(root.logical_id[2])
3224 def _CreateNewNic(self, idx, params, private):
3225 """Creates data structure for a new network interface.
3228 mac = params[constants.INIC_MAC]
3229 ip = params.get(constants.INIC_IP, None)
3230 net = params.get(constants.INIC_NETWORK, None)
3231 name = params.get(constants.INIC_NAME, None)
3232 net_uuid = self.cfg.LookupNetwork(net)
3233 #TODO: not private.filled?? can a nic have no nicparams??
3234 nicparams = private.filled
3235 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3236 nicparams=nicparams)
3237 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3240 self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3241 constants.HOTPLUG_TARGET_NIC,
3246 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3247 (mac, ip, private.filled[constants.NIC_MODE],
3248 private.filled[constants.NIC_LINK], net)),
3253 def _ApplyNicMods(self, idx, nic, params, private):
3254 """Modifies a network interface.
3259 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3261 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3262 setattr(nic, key, params[key])
3264 new_net = params.get(constants.INIC_NETWORK, nic.network)
3265 new_net_uuid = self.cfg.LookupNetwork(new_net)
3266 if new_net_uuid != nic.network:
3267 changes.append(("nic.network/%d" % idx, new_net))
3268 nic.network = new_net_uuid
3271 nic.nicparams = private.filled
3273 for (key, val) in nic.nicparams.items():
3274 changes.append(("nic.%s/%d" % (key, idx), val))
3277 self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3278 constants.HOTPLUG_TARGET_NIC,
3283 def _RemoveNic(self, idx, nic, _):
3285 self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3286 constants.HOTPLUG_TARGET_NIC,
3289 def Exec(self, feedback_fn):
3290 """Modifies an instance.
3292 All parameters take effect only at the next restart of the instance.
3295 # Process here the warnings from CheckPrereq, as we don't have a
3296 # feedback_fn there.
3297 # TODO: Replace with self.LogWarning
3298 for warn in self.warn:
3299 feedback_fn("WARNING: %s" % warn)
3301 assert ((self.op.disk_template is None) ^
3302 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3303 "Not owning any node resource locks"
3306 instance = self.instance
3310 instance.primary_node = self.op.pnode
3313 if self.op.runtime_mem:
3314 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3316 self.op.runtime_mem)
3317 rpcres.Raise("Cannot modify instance runtime memory")
3318 result.append(("runtime_memory", self.op.runtime_mem))
3320 # Apply disk changes
3321 _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3322 self._CreateNewDisk, self._ModifyDisk,
3324 _UpdateIvNames(0, instance.disks)
3326 if self.op.disk_template:
3328 check_nodes = set(instance.all_nodes)
3329 if self.op.remote_node:
3330 check_nodes.add(self.op.remote_node)
3331 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3332 owned = self.owned_locks(level)
3333 assert not (check_nodes - owned), \
3334 ("Not owning the correct locks, owning %r, expected at least %r" %
3335 (owned, check_nodes))
3337 r_shut = ShutdownInstanceDisks(self, instance)
3339 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3340 " proceed with disk template conversion")
3341 mode = (instance.disk_template, self.op.disk_template)
3343 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3345 self.cfg.ReleaseDRBDMinors(instance.name)
3347 result.append(("disk_template", self.op.disk_template))
3349 assert instance.disk_template == self.op.disk_template, \
3350 ("Expected disk template '%s', found '%s'" %
3351 (self.op.disk_template, instance.disk_template))
3353 # Release node and resource locks if there are any (they might already have
3354 # been released during disk conversion)
3355 ReleaseLocks(self, locking.LEVEL_NODE)
3356 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3359 if self._new_nics is not None:
3360 instance.nics = self._new_nics
3361 result.extend(self._nic_chgdesc)
3364 if self.op.hvparams:
3365 instance.hvparams = self.hv_inst
3366 for key, val in self.op.hvparams.iteritems():
3367 result.append(("hv/%s" % key, val))
3370 if self.op.beparams:
3371 instance.beparams = self.be_inst
3372 for key, val in self.op.beparams.iteritems():
3373 result.append(("be/%s" % key, val))
3377 instance.os = self.op.os_name
3380 if self.op.osparams:
3381 instance.osparams = self.os_inst
3382 for key, val in self.op.osparams.iteritems():
3383 result.append(("os/%s" % key, val))
3385 if self.op.offline is None:
3388 elif self.op.offline:
3389 # Mark instance as offline
3390 self.cfg.MarkInstanceOffline(instance.name)
3391 result.append(("admin_state", constants.ADMINST_OFFLINE))
3393 # Mark instance as online, but stopped
3394 self.cfg.MarkInstanceDown(instance.name)
3395 result.append(("admin_state", constants.ADMINST_DOWN))
3397 self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3399 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3400 self.owned_locks(locking.LEVEL_NODE)), \
3401 "All node locks should have been released by now"
3405 _DISK_CONVERSIONS = {
3406 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3407 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3411 class LUInstanceChangeGroup(LogicalUnit):
3412 HPATH = "instance-change-group"
3413 HTYPE = constants.HTYPE_INSTANCE
3416 def ExpandNames(self):
3417 self.share_locks = ShareAll()
3419 self.needed_locks = {
3420 locking.LEVEL_NODEGROUP: [],
3421 locking.LEVEL_NODE: [],
3422 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3425 self._ExpandAndLockInstance()
3427 if self.op.target_groups:
3428 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3429 self.op.target_groups)
3431 self.req_target_uuids = None
3433 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3435 def DeclareLocks(self, level):
3436 if level == locking.LEVEL_NODEGROUP:
3437 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3439 if self.req_target_uuids:
3440 lock_groups = set(self.req_target_uuids)
3442 # Lock all groups used by instance optimistically; this requires going
3443 # via the node before it's locked, requiring verification later on
3444 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3445 lock_groups.update(instance_groups)
3447 # No target groups, need to lock all of them
3448 lock_groups = locking.ALL_SET
3450 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3452 elif level == locking.LEVEL_NODE:
3453 if self.req_target_uuids:
3454 # Lock all nodes used by instances
3455 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3456 self._LockInstancesNodes()
3458 # Lock all nodes in all potential target groups
3459 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3460 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3461 member_nodes = [node_name
3462 for group in lock_groups
3463 for node_name in self.cfg.GetNodeGroup(group).members]
3464 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3466 # Lock all nodes as all groups are potential targets
3467 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3469 def CheckPrereq(self):
3470 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3471 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3472 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3474 assert (self.req_target_uuids is None or
3475 owned_groups.issuperset(self.req_target_uuids))
3476 assert owned_instances == set([self.op.instance_name])
3478 # Get instance information
3479 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3481 # Check if node groups for locked instance are still correct
3482 assert owned_nodes.issuperset(self.instance.all_nodes), \
3483 ("Instance %s's nodes changed while we kept the lock" %
3484 self.op.instance_name)
3486 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3489 if self.req_target_uuids:
3490 # User requested specific target groups
3491 self.target_uuids = frozenset(self.req_target_uuids)
3493 # All groups except those used by the instance are potential targets
3494 self.target_uuids = owned_groups - inst_groups
3496 conflicting_groups = self.target_uuids & inst_groups
3497 if conflicting_groups:
3498 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3499 " used by the instance '%s'" %
3500 (utils.CommaJoin(conflicting_groups),
3501 self.op.instance_name),
3504 if not self.target_uuids:
3505 raise errors.OpPrereqError("There are no possible target groups",
3508 def BuildHooksEnv(self):
3512 assert self.target_uuids
3515 "TARGET_GROUPS": " ".join(self.target_uuids),
3518 env.update(BuildInstanceHookEnvByObject(self, self.instance))
3522 def BuildHooksNodes(self):
3523 """Build hooks nodes.
3526 mn = self.cfg.GetMasterNode()
3529 def Exec(self, feedback_fn):
3530 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3532 assert instances == [self.op.instance_name], "Instance not locked"
3534 req = iallocator.IAReqGroupChange(instances=instances,
3535 target_groups=list(self.target_uuids))
3536 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3538 ial.Run(self.op.iallocator)
3541 raise errors.OpPrereqError("Can't compute solution for changing group of"
3542 " instance '%s' using iallocator '%s': %s" %
3543 (self.op.instance_name, self.op.iallocator,
3544 ial.info), errors.ECODE_NORES)
3546 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3548 self.LogInfo("Iallocator returned %s job(s) for changing group of"
3549 " instance '%s'", len(jobs), self.op.instance_name)
3551 return ResultWithJobs(jobs)