4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Logical units dealing with instances."""
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65 import ganeti.masterd.instance
68 #: Type description for changes as returned by L{_ApplyContainerMods}'s
70 _TApplyContModsCbChanges = \
71 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
77 def _CheckHostnameSane(lu, name):
78 """Ensures that a given hostname resolves to a 'sane' name.
80 The given name is required to be a prefix of the resolved hostname,
81 to prevent accidental mismatches.
83 @param lu: the logical unit on behalf of which we're checking
84 @param name: the name we should resolve and check
85 @return: the resolved hostname object
88 hostname = netutils.GetHostname(name=name)
89 if hostname.name != name:
90 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91 if not utils.MatchNameComponent(name, [hostname.name]):
92 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93 " same as given hostname '%s'") %
94 (hostname.name, name), errors.ECODE_INVAL)
98 def _CheckOpportunisticLocking(op):
99 """Generate error if opportunistic locking is not possible.
102 if op.opportunistic_locking and not op.iallocator:
103 raise errors.OpPrereqError("Opportunistic locking is only available in"
104 " combination with an instance allocator",
108 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109 """Wrapper around IAReqInstanceAlloc.
111 @param op: The instance opcode
112 @param disks: The computed disks
113 @param nics: The computed nics
114 @param beparams: The full filled beparams
115 @param node_whitelist: List of nodes which should appear as online to the
116 allocator (unless the node is already marked offline)
118 @returns: A filled L{iallocator.IAReqInstanceAlloc}
121 spindle_use = beparams[constants.BE_SPINDLE_USE]
122 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123 disk_template=op.disk_template,
126 vcpus=beparams[constants.BE_VCPUS],
127 memory=beparams[constants.BE_MAXMEM],
128 spindle_use=spindle_use,
130 nics=[n.ToDict() for n in nics],
131 hypervisor=op.hypervisor,
132 node_whitelist=node_whitelist)
135 def _ComputeFullBeParams(op, cluster):
136 """Computes the full beparams.
138 @param op: The instance opcode
139 @param cluster: The cluster config object
141 @return: The fully filled beparams
144 default_beparams = cluster.beparams[constants.PP_DEFAULT]
145 for param, value in op.beparams.iteritems():
146 if value == constants.VALUE_AUTO:
147 op.beparams[param] = default_beparams[param]
148 objects.UpgradeBeParams(op.beparams)
149 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150 return cluster.SimpleFillBE(op.beparams)
153 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154 """Computes the nics.
156 @param op: The instance opcode
157 @param cluster: Cluster configuration object
158 @param default_ip: The default ip to assign
159 @param cfg: An instance of the configuration object
160 @param ec_id: Execution context ID
162 @returns: The build up nics
167 nic_mode_req = nic.get(constants.INIC_MODE, None)
168 nic_mode = nic_mode_req
169 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172 net = nic.get(constants.INIC_NETWORK, None)
173 link = nic.get(constants.NIC_LINK, None)
174 ip = nic.get(constants.INIC_IP, None)
176 if net is None or net.lower() == constants.VALUE_NONE:
179 if nic_mode_req is not None or link is not None:
180 raise errors.OpPrereqError("If network is given, no mode or link"
181 " is allowed to be passed",
185 if ip is None or ip.lower() == constants.VALUE_NONE:
187 elif ip.lower() == constants.VALUE_AUTO:
188 if not op.name_check:
189 raise errors.OpPrereqError("IP address set to auto but name checks"
190 " have been skipped",
194 # We defer pool operations until later, so that the iallocator has
195 # filled in the instance's node(s) dimara
196 if ip.lower() == constants.NIC_IP_POOL:
198 raise errors.OpPrereqError("if ip=pool, parameter network"
199 " must be passed too",
202 elif not netutils.IPAddress.IsValid(ip):
203 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
208 # TODO: check the ip address for uniqueness
209 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210 raise errors.OpPrereqError("Routed nic mode requires an ip address",
213 # MAC address verification
214 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216 mac = utils.NormalizeAndValidateMac(mac)
219 # TODO: We need to factor this out
220 cfg.ReserveMAC(mac, ec_id)
221 except errors.ReservationError:
222 raise errors.OpPrereqError("MAC address %s already in use"
224 errors.ECODE_NOTUNIQUE)
226 # Build nic parameters
229 nicparams[constants.NIC_MODE] = nic_mode
231 nicparams[constants.NIC_LINK] = link
233 check_params = cluster.SimpleFillNIC(nicparams)
234 objects.NIC.CheckParameterSyntax(check_params)
235 net_uuid = cfg.LookupNetwork(net)
236 name = nic.get(constants.INIC_NAME, None)
237 if name is not None and name.lower() == constants.VALUE_NONE:
239 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240 network=net_uuid, nicparams=nicparams)
241 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
247 def _CheckForConflictingIp(lu, ip, node):
248 """In case of conflicting IP address raise error.
251 @param ip: IP address
253 @param node: node name
256 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257 if conf_net is not None:
258 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259 " network %s, but the target NIC does not." %
266 def _ComputeIPolicyInstanceSpecViolation(
267 ipolicy, instance_spec, disk_template,
268 _compute_fn=ComputeIPolicySpecViolation):
269 """Compute if instance specs meets the specs of ipolicy.
272 @param ipolicy: The ipolicy to verify against
273 @param instance_spec: dict
274 @param instance_spec: The instance spec to verify
275 @type disk_template: string
276 @param disk_template: the disk template of the instance
277 @param _compute_fn: The function to verify ipolicy (unittest only)
278 @see: L{ComputeIPolicySpecViolation}
281 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
288 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289 disk_sizes, spindle_use, disk_template)
292 def _CheckOSVariant(os_obj, name):
293 """Check whether an OS name conforms to the os variants specification.
295 @type os_obj: L{objects.OS}
296 @param os_obj: OS object to check
298 @param name: OS name passed by the user, to check for validity
301 variant = objects.OS.GetVariant(name)
302 if not os_obj.supported_variants:
304 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305 " passed)" % (os_obj.name, variant),
309 raise errors.OpPrereqError("OS name must include a variant",
312 if variant not in os_obj.supported_variants:
313 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
316 class LUInstanceCreate(LogicalUnit):
317 """Create an instance.
320 HPATH = "instance-add"
321 HTYPE = constants.HTYPE_INSTANCE
324 def CheckArguments(self):
328 # do not require name_check to ease forward/backward compatibility
330 if self.op.no_install and self.op.start:
331 self.LogInfo("No-installation mode selected, disabling startup")
332 self.op.start = False
333 # validate/normalize the instance name
334 self.op.instance_name = \
335 netutils.Hostname.GetNormalizedName(self.op.instance_name)
337 if self.op.ip_check and not self.op.name_check:
338 # TODO: make the ip check more flexible and not depend on the name check
339 raise errors.OpPrereqError("Cannot do IP address check without a name"
340 " check", errors.ECODE_INVAL)
342 # check nics' parameter names
343 for nic in self.op.nics:
344 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345 # check that NIC's parameters names are unique and valid
346 utils.ValidateDeviceNames("NIC", self.op.nics)
348 # check that disk's names are unique and valid
349 utils.ValidateDeviceNames("disk", self.op.disks)
351 cluster = self.cfg.GetClusterInfo()
352 if not self.op.disk_template in cluster.enabled_disk_templates:
353 raise errors.OpPrereqError("Cannot create an instance with disk template"
354 " '%s', because it is not enabled in the"
355 " cluster. Enabled disk templates are: %s." %
356 (self.op.disk_template,
357 ",".join(cluster.enabled_disk_templates)))
359 # check disks. parameter names and consistent adopt/no-adopt strategy
360 has_adopt = has_no_adopt = False
361 for disk in self.op.disks:
362 if self.op.disk_template != constants.DT_EXT:
363 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364 if constants.IDISK_ADOPT in disk:
368 if has_adopt and has_no_adopt:
369 raise errors.OpPrereqError("Either all disks are adopted or none is",
372 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373 raise errors.OpPrereqError("Disk adoption is not supported for the"
374 " '%s' disk template" %
375 self.op.disk_template,
377 if self.op.iallocator is not None:
378 raise errors.OpPrereqError("Disk adoption not allowed with an"
379 " iallocator script", errors.ECODE_INVAL)
380 if self.op.mode == constants.INSTANCE_IMPORT:
381 raise errors.OpPrereqError("Disk adoption not allowed for"
382 " instance import", errors.ECODE_INVAL)
384 if self.op.disk_template in constants.DTS_MUST_ADOPT:
385 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386 " but no 'adopt' parameter given" %
387 self.op.disk_template,
390 self.adopt_disks = has_adopt
392 # instance name verification
393 if self.op.name_check:
394 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395 self.op.instance_name = self.hostname1.name
396 # used in CheckPrereq for ip ping check
397 self.check_ip = self.hostname1.ip
401 # file storage checks
402 if (self.op.file_driver and
403 not self.op.file_driver in constants.FILE_DRIVER):
404 raise errors.OpPrereqError("Invalid file driver name '%s'" %
405 self.op.file_driver, errors.ECODE_INVAL)
407 # set default file_driver if unset and required
408 if (not self.op.file_driver and
409 self.op.disk_template in [constants.DT_FILE,
410 constants.DT_SHARED_FILE]):
411 self.op.file_driver = constants.FD_LOOP
413 if self.op.disk_template == constants.DT_FILE:
414 opcodes.RequireFileStorage()
415 elif self.op.disk_template == constants.DT_SHARED_FILE:
416 opcodes.RequireSharedFileStorage()
418 ### Node/iallocator related checks
419 CheckIAllocatorOrNode(self, "iallocator", "pnode")
421 if self.op.pnode is not None:
422 if self.op.disk_template in constants.DTS_INT_MIRROR:
423 if self.op.snode is None:
424 raise errors.OpPrereqError("The networked disk templates need"
425 " a mirror node", errors.ECODE_INVAL)
427 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
431 _CheckOpportunisticLocking(self.op)
433 self._cds = GetClusterDomainSecret()
435 if self.op.mode == constants.INSTANCE_IMPORT:
436 # On import force_variant must be True, because if we forced it at
437 # initial install, our only chance when importing it back is that it
439 self.op.force_variant = True
441 if self.op.no_install:
442 self.LogInfo("No-installation mode has no effect during import")
444 elif self.op.mode == constants.INSTANCE_CREATE:
445 if self.op.os_type is None:
446 raise errors.OpPrereqError("No guest OS specified",
448 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
449 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
450 " installation" % self.op.os_type,
452 if self.op.disk_template is None:
453 raise errors.OpPrereqError("No disk template specified",
456 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
457 # Check handshake to ensure both clusters have the same domain secret
458 src_handshake = self.op.source_handshake
459 if not src_handshake:
460 raise errors.OpPrereqError("Missing source handshake",
463 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
466 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
469 # Load and check source CA
470 self.source_x509_ca_pem = self.op.source_x509_ca
471 if not self.source_x509_ca_pem:
472 raise errors.OpPrereqError("Missing source X509 CA",
476 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
478 except OpenSSL.crypto.Error, err:
479 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
480 (err, ), errors.ECODE_INVAL)
482 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
483 if errcode is not None:
484 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
487 self.source_x509_ca = cert
489 src_instance_name = self.op.source_instance_name
490 if not src_instance_name:
491 raise errors.OpPrereqError("Missing source instance name",
494 self.source_instance_name = \
495 netutils.GetHostname(name=src_instance_name).name
498 raise errors.OpPrereqError("Invalid instance creation mode %r" %
499 self.op.mode, errors.ECODE_INVAL)
501 def ExpandNames(self):
502 """ExpandNames for CreateInstance.
504 Figure out the right locks for instance creation.
507 self.needed_locks = {}
509 instance_name = self.op.instance_name
510 # this is just a preventive check, but someone might still add this
511 # instance in the meantime, and creation will fail at lock-add time
512 if instance_name in self.cfg.GetInstanceList():
513 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
514 instance_name, errors.ECODE_EXISTS)
516 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
518 if self.op.iallocator:
519 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
520 # specifying a group on instance creation and then selecting nodes from
522 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
523 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
525 if self.op.opportunistic_locking:
526 self.opportunistic_locks[locking.LEVEL_NODE] = True
527 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
529 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
530 nodelist = [self.op.pnode]
531 if self.op.snode is not None:
532 self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
533 nodelist.append(self.op.snode)
534 self.needed_locks[locking.LEVEL_NODE] = nodelist
536 # in case of import lock the source node too
537 if self.op.mode == constants.INSTANCE_IMPORT:
538 src_node = self.op.src_node
539 src_path = self.op.src_path
542 self.op.src_path = src_path = self.op.instance_name
545 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
546 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
547 self.op.src_node = None
548 if os.path.isabs(src_path):
549 raise errors.OpPrereqError("Importing an instance from a path"
550 " requires a source node option",
553 self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
554 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
555 self.needed_locks[locking.LEVEL_NODE].append(src_node)
556 if not os.path.isabs(src_path):
557 self.op.src_path = src_path = \
558 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
560 self.needed_locks[locking.LEVEL_NODE_RES] = \
561 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
563 def _RunAllocator(self):
564 """Run the allocator based on input opcode.
567 if self.op.opportunistic_locking:
568 # Only consider nodes for which a lock is held
569 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
571 node_whitelist = None
573 #TODO Export network to iallocator so that it chooses a pnode
574 # in a nodegroup that has the desired network connected to
575 req = _CreateInstanceAllocRequest(self.op, self.disks,
576 self.nics, self.be_full,
578 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
580 ial.Run(self.op.iallocator)
583 # When opportunistic locks are used only a temporary failure is generated
584 if self.op.opportunistic_locking:
585 ecode = errors.ECODE_TEMP_NORES
587 ecode = errors.ECODE_NORES
589 raise errors.OpPrereqError("Can't compute nodes using"
590 " iallocator '%s': %s" %
591 (self.op.iallocator, ial.info),
594 self.op.pnode = ial.result[0]
595 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
596 self.op.instance_name, self.op.iallocator,
597 utils.CommaJoin(ial.result))
599 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
601 if req.RequiredNodes() == 2:
602 self.op.snode = ial.result[1]
604 def BuildHooksEnv(self):
607 This runs on master, primary and secondary nodes of the instance.
611 "ADD_MODE": self.op.mode,
613 if self.op.mode == constants.INSTANCE_IMPORT:
614 env["SRC_NODE"] = self.op.src_node
615 env["SRC_PATH"] = self.op.src_path
616 env["SRC_IMAGES"] = self.src_images
618 env.update(BuildInstanceHookEnv(
619 name=self.op.instance_name,
620 primary_node=self.op.pnode,
621 secondary_nodes=self.secondaries,
622 status=self.op.start,
623 os_type=self.op.os_type,
624 minmem=self.be_full[constants.BE_MINMEM],
625 maxmem=self.be_full[constants.BE_MAXMEM],
626 vcpus=self.be_full[constants.BE_VCPUS],
627 nics=NICListToTuple(self, self.nics),
628 disk_template=self.op.disk_template,
629 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
630 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
631 for d in self.disks],
634 hypervisor_name=self.op.hypervisor,
640 def BuildHooksNodes(self):
641 """Build hooks nodes.
644 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
647 def _ReadExportInfo(self):
648 """Reads the export information from disk.
650 It will override the opcode source node and path with the actual
651 information, if these two were not specified before.
653 @return: the export information
656 assert self.op.mode == constants.INSTANCE_IMPORT
658 src_node = self.op.src_node
659 src_path = self.op.src_path
662 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
663 exp_list = self.rpc.call_export_list(locked_nodes)
665 for node in exp_list:
666 if exp_list[node].fail_msg:
668 if src_path in exp_list[node].payload:
670 self.op.src_node = src_node = node
671 self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
675 raise errors.OpPrereqError("No export found for relative path %s" %
676 src_path, errors.ECODE_INVAL)
678 CheckNodeOnline(self, src_node)
679 result = self.rpc.call_export_info(src_node, src_path)
680 result.Raise("No export or invalid export found in dir %s" % src_path)
682 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
683 if not export_info.has_section(constants.INISECT_EXP):
684 raise errors.ProgrammerError("Corrupted export config",
685 errors.ECODE_ENVIRON)
687 ei_version = export_info.get(constants.INISECT_EXP, "version")
688 if (int(ei_version) != constants.EXPORT_VERSION):
689 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
690 (ei_version, constants.EXPORT_VERSION),
691 errors.ECODE_ENVIRON)
694 def _ReadExportParams(self, einfo):
695 """Use export parameters as defaults.
697 In case the opcode doesn't specify (as in override) some instance
698 parameters, then try to use them from the export information, if
702 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
704 if self.op.disk_template is None:
705 if einfo.has_option(constants.INISECT_INS, "disk_template"):
706 self.op.disk_template = einfo.get(constants.INISECT_INS,
708 if self.op.disk_template not in constants.DISK_TEMPLATES:
709 raise errors.OpPrereqError("Disk template specified in configuration"
710 " file is not one of the allowed values:"
712 " ".join(constants.DISK_TEMPLATES),
715 raise errors.OpPrereqError("No disk template specified and the export"
716 " is missing the disk_template information",
719 if not self.op.disks:
721 # TODO: import the disk iv_name too
722 for idx in range(constants.MAX_DISKS):
723 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
724 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
725 disks.append({constants.IDISK_SIZE: disk_sz})
726 self.op.disks = disks
727 if not disks and self.op.disk_template != constants.DT_DISKLESS:
728 raise errors.OpPrereqError("No disk info specified and the export"
729 " is missing the disk information",
734 for idx in range(constants.MAX_NICS):
735 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
737 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
738 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
745 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
746 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
748 if (self.op.hypervisor is None and
749 einfo.has_option(constants.INISECT_INS, "hypervisor")):
750 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
752 if einfo.has_section(constants.INISECT_HYP):
753 # use the export parameters but do not override the ones
754 # specified by the user
755 for name, value in einfo.items(constants.INISECT_HYP):
756 if name not in self.op.hvparams:
757 self.op.hvparams[name] = value
759 if einfo.has_section(constants.INISECT_BEP):
760 # use the parameters, without overriding
761 for name, value in einfo.items(constants.INISECT_BEP):
762 if name not in self.op.beparams:
763 self.op.beparams[name] = value
764 # Compatibility for the old "memory" be param
765 if name == constants.BE_MEMORY:
766 if constants.BE_MAXMEM not in self.op.beparams:
767 self.op.beparams[constants.BE_MAXMEM] = value
768 if constants.BE_MINMEM not in self.op.beparams:
769 self.op.beparams[constants.BE_MINMEM] = value
771 # try to read the parameters old style, from the main section
772 for name in constants.BES_PARAMETERS:
773 if (name not in self.op.beparams and
774 einfo.has_option(constants.INISECT_INS, name)):
775 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
777 if einfo.has_section(constants.INISECT_OSP):
778 # use the parameters, without overriding
779 for name, value in einfo.items(constants.INISECT_OSP):
780 if name not in self.op.osparams:
781 self.op.osparams[name] = value
783 def _RevertToDefaults(self, cluster):
784 """Revert the instance parameters to the default values.
788 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
789 for name in self.op.hvparams.keys():
790 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
791 del self.op.hvparams[name]
793 be_defs = cluster.SimpleFillBE({})
794 for name in self.op.beparams.keys():
795 if name in be_defs and be_defs[name] == self.op.beparams[name]:
796 del self.op.beparams[name]
798 nic_defs = cluster.SimpleFillNIC({})
799 for nic in self.op.nics:
800 for name in constants.NICS_PARAMETERS:
801 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
804 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
805 for name in self.op.osparams.keys():
806 if name in os_defs and os_defs[name] == self.op.osparams[name]:
807 del self.op.osparams[name]
809 def _CalculateFileStorageDir(self):
810 """Calculate final instance file storage dir.
813 # file storage dir calculation/check
814 self.instance_file_storage_dir = None
815 if self.op.disk_template in constants.DTS_FILEBASED:
816 # build the full file storage dir path
819 if self.op.disk_template == constants.DT_SHARED_FILE:
820 get_fsd_fn = self.cfg.GetSharedFileStorageDir
822 get_fsd_fn = self.cfg.GetFileStorageDir
824 cfg_storagedir = get_fsd_fn()
825 if not cfg_storagedir:
826 raise errors.OpPrereqError("Cluster file storage dir not defined",
828 joinargs.append(cfg_storagedir)
830 if self.op.file_storage_dir is not None:
831 joinargs.append(self.op.file_storage_dir)
833 joinargs.append(self.op.instance_name)
835 # pylint: disable=W0142
836 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
838 def CheckPrereq(self): # pylint: disable=R0914
839 """Check prerequisites.
842 self._CalculateFileStorageDir()
844 if self.op.mode == constants.INSTANCE_IMPORT:
845 export_info = self._ReadExportInfo()
846 self._ReadExportParams(export_info)
847 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
849 self._old_instance_name = None
851 if (not self.cfg.GetVGName() and
852 self.op.disk_template not in constants.DTS_NOT_LVM):
853 raise errors.OpPrereqError("Cluster does not support lvm-based"
854 " instances", errors.ECODE_STATE)
856 if (self.op.hypervisor is None or
857 self.op.hypervisor == constants.VALUE_AUTO):
858 self.op.hypervisor = self.cfg.GetHypervisorType()
860 cluster = self.cfg.GetClusterInfo()
861 enabled_hvs = cluster.enabled_hypervisors
862 if self.op.hypervisor not in enabled_hvs:
863 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
865 (self.op.hypervisor, ",".join(enabled_hvs)),
869 for tag in self.op.tags:
870 objects.TaggableObject.ValidateTag(tag)
872 # check hypervisor parameter syntax (locally)
873 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
874 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
876 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
877 hv_type.CheckParameterSyntax(filled_hvp)
878 self.hv_full = filled_hvp
879 # check that we don't specify global parameters on an instance
880 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
881 "instance", "cluster")
883 # fill and remember the beparams dict
884 self.be_full = _ComputeFullBeParams(self.op, cluster)
886 # build os parameters
887 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
889 # now that hvp/bep are in final format, let's reset to defaults,
891 if self.op.identify_defaults:
892 self._RevertToDefaults(cluster)
895 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
898 # disk checks/pre-build
899 default_vg = self.cfg.GetVGName()
900 self.disks = ComputeDisks(self.op, default_vg)
902 if self.op.mode == constants.INSTANCE_IMPORT:
904 for idx in range(len(self.disks)):
905 option = "disk%d_dump" % idx
906 if export_info.has_option(constants.INISECT_INS, option):
907 # FIXME: are the old os-es, disk sizes, etc. useful?
908 export_name = export_info.get(constants.INISECT_INS, option)
909 image = utils.PathJoin(self.op.src_path, export_name)
910 disk_images.append(image)
912 disk_images.append(False)
914 self.src_images = disk_images
916 if self.op.instance_name == self._old_instance_name:
917 for idx, nic in enumerate(self.nics):
918 if nic.mac == constants.VALUE_AUTO:
919 nic_mac_ini = "nic%d_mac" % idx
920 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
922 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
924 # ip ping checks (we use the same ip that was resolved in ExpandNames)
926 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
927 raise errors.OpPrereqError("IP %s of instance %s already in use" %
928 (self.check_ip, self.op.instance_name),
929 errors.ECODE_NOTUNIQUE)
931 #### mac address generation
932 # By generating here the mac address both the allocator and the hooks get
933 # the real final mac address rather than the 'auto' or 'generate' value.
934 # There is a race condition between the generation and the instance object
935 # creation, which means that we know the mac is valid now, but we're not
936 # sure it will be when we actually add the instance. If things go bad
937 # adding the instance will abort because of a duplicate mac, and the
938 # creation job will fail.
939 for nic in self.nics:
940 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
941 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
945 if self.op.iallocator is not None:
948 # Release all unneeded node locks
949 keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
950 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
951 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
952 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
954 assert (self.owned_locks(locking.LEVEL_NODE) ==
955 self.owned_locks(locking.LEVEL_NODE_RES)), \
956 "Node locks differ from node resource locks"
958 #### node related checks
961 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
962 assert self.pnode is not None, \
963 "Cannot retrieve locked node %s" % self.op.pnode
965 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
966 pnode.name, errors.ECODE_STATE)
968 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
969 pnode.name, errors.ECODE_STATE)
970 if not pnode.vm_capable:
971 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
972 " '%s'" % pnode.name, errors.ECODE_STATE)
974 self.secondaries = []
976 # Fill in any IPs from IP pools. This must happen here, because we need to
977 # know the nic's primary node, as specified by the iallocator
978 for idx, nic in enumerate(self.nics):
979 net_uuid = nic.network
980 if net_uuid is not None:
981 nobj = self.cfg.GetNetwork(net_uuid)
982 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
983 if netparams is None:
984 raise errors.OpPrereqError("No netparams found for network"
985 " %s. Propably not connected to"
986 " node's %s nodegroup" %
987 (nobj.name, self.pnode.name),
989 self.LogInfo("NIC/%d inherits netparams %s" %
990 (idx, netparams.values()))
991 nic.nicparams = dict(netparams)
992 if nic.ip is not None:
993 if nic.ip.lower() == constants.NIC_IP_POOL:
995 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
996 except errors.ReservationError:
997 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
998 " from the address pool" % idx,
1000 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1003 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1004 except errors.ReservationError:
1005 raise errors.OpPrereqError("IP address %s already in use"
1006 " or does not belong to network %s" %
1007 (nic.ip, nobj.name),
1008 errors.ECODE_NOTUNIQUE)
1010 # net is None, ip None or given
1011 elif self.op.conflicts_check:
1012 _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1014 # mirror node verification
1015 if self.op.disk_template in constants.DTS_INT_MIRROR:
1016 if self.op.snode == pnode.name:
1017 raise errors.OpPrereqError("The secondary node cannot be the"
1018 " primary node", errors.ECODE_INVAL)
1019 CheckNodeOnline(self, self.op.snode)
1020 CheckNodeNotDrained(self, self.op.snode)
1021 CheckNodeVmCapable(self, self.op.snode)
1022 self.secondaries.append(self.op.snode)
1024 snode = self.cfg.GetNodeInfo(self.op.snode)
1025 if pnode.group != snode.group:
1026 self.LogWarning("The primary and secondary nodes are in two"
1027 " different node groups; the disk parameters"
1028 " from the first disk's node group will be"
1031 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1033 if self.op.disk_template in constants.DTS_INT_MIRROR:
1035 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1036 if compat.any(map(has_es, nodes)):
1037 raise errors.OpPrereqError("Disk template %s not supported with"
1038 " exclusive storage" % self.op.disk_template,
1041 nodenames = [pnode.name] + self.secondaries
1043 if not self.adopt_disks:
1044 if self.op.disk_template == constants.DT_RBD:
1045 # _CheckRADOSFreeSpace() is just a placeholder.
1046 # Any function that checks prerequisites can be placed here.
1047 # Check if there is enough space on the RADOS cluster.
1048 CheckRADOSFreeSpace()
1049 elif self.op.disk_template == constants.DT_EXT:
1050 # FIXME: Function that checks prereqs if needed
1053 # Check lv size requirements, if not adopting
1054 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1055 CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1057 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1058 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1059 disk[constants.IDISK_ADOPT])
1060 for disk in self.disks])
1061 if len(all_lvs) != len(self.disks):
1062 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1064 for lv_name in all_lvs:
1066 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1067 # to ReserveLV uses the same syntax
1068 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1069 except errors.ReservationError:
1070 raise errors.OpPrereqError("LV named %s used by another instance" %
1071 lv_name, errors.ECODE_NOTUNIQUE)
1073 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1074 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1076 node_lvs = self.rpc.call_lv_list([pnode.name],
1077 vg_names.payload.keys())[pnode.name]
1078 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1079 node_lvs = node_lvs.payload
1081 delta = all_lvs.difference(node_lvs.keys())
1083 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1084 utils.CommaJoin(delta),
1086 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1088 raise errors.OpPrereqError("Online logical volumes found, cannot"
1089 " adopt: %s" % utils.CommaJoin(online_lvs),
1091 # update the size of disk based on what is found
1092 for dsk in self.disks:
1093 dsk[constants.IDISK_SIZE] = \
1094 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1095 dsk[constants.IDISK_ADOPT])][0]))
1097 elif self.op.disk_template == constants.DT_BLOCK:
1098 # Normalize and de-duplicate device paths
1099 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1100 for disk in self.disks])
1101 if len(all_disks) != len(self.disks):
1102 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1104 baddisks = [d for d in all_disks
1105 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1107 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1108 " cannot be adopted" %
1109 (utils.CommaJoin(baddisks),
1110 constants.ADOPTABLE_BLOCKDEV_ROOT),
1113 node_disks = self.rpc.call_bdev_sizes([pnode.name],
1114 list(all_disks))[pnode.name]
1115 node_disks.Raise("Cannot get block device information from node %s" %
1117 node_disks = node_disks.payload
1118 delta = all_disks.difference(node_disks.keys())
1120 raise errors.OpPrereqError("Missing block device(s): %s" %
1121 utils.CommaJoin(delta),
1123 for dsk in self.disks:
1124 dsk[constants.IDISK_SIZE] = \
1125 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1127 # Verify instance specs
1128 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1130 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1131 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1132 constants.ISPEC_DISK_COUNT: len(self.disks),
1133 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1134 for disk in self.disks],
1135 constants.ISPEC_NIC_COUNT: len(self.nics),
1136 constants.ISPEC_SPINDLE_USE: spindle_use,
1139 group_info = self.cfg.GetNodeGroup(pnode.group)
1140 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1141 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1142 self.op.disk_template)
1143 if not self.op.ignore_ipolicy and res:
1144 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1145 (pnode.group, group_info.name, utils.CommaJoin(res)))
1146 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1148 CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1150 CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1151 # check OS parameters (remotely)
1152 CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1154 CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1156 #TODO: _CheckExtParams (remotely)
1157 # Check parameters for extstorage
1159 # memory check on primary node
1160 #TODO(dynmem): use MINMEM for checking
1162 CheckNodeFreeMemory(self, self.pnode.name,
1163 "creating instance %s" % self.op.instance_name,
1164 self.be_full[constants.BE_MAXMEM],
1167 self.dry_run_result = list(nodenames)
1169 def Exec(self, feedback_fn):
1170 """Create and add the instance to the cluster.
1173 instance = self.op.instance_name
1174 pnode_name = self.pnode.name
1176 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1177 self.owned_locks(locking.LEVEL_NODE)), \
1178 "Node locks differ from node resource locks"
1179 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1181 ht_kind = self.op.hypervisor
1182 if ht_kind in constants.HTS_REQ_PORT:
1183 network_port = self.cfg.AllocatePort()
1187 # This is ugly but we got a chicken-egg problem here
1188 # We can only take the group disk parameters, as the instance
1189 # has no disks yet (we are generating them right here).
1190 node = self.cfg.GetNodeInfo(pnode_name)
1191 nodegroup = self.cfg.GetNodeGroup(node.group)
1192 disks = GenerateDiskTemplate(self,
1193 self.op.disk_template,
1194 instance, pnode_name,
1197 self.instance_file_storage_dir,
1198 self.op.file_driver,
1201 self.cfg.GetGroupDiskParams(nodegroup))
1203 iobj = objects.Instance(name=instance, os=self.op.os_type,
1204 primary_node=pnode_name,
1205 nics=self.nics, disks=disks,
1206 disk_template=self.op.disk_template,
1208 admin_state=constants.ADMINST_DOWN,
1209 network_port=network_port,
1210 beparams=self.op.beparams,
1211 hvparams=self.op.hvparams,
1212 hypervisor=self.op.hypervisor,
1213 osparams=self.op.osparams,
1217 for tag in self.op.tags:
1220 if self.adopt_disks:
1221 if self.op.disk_template == constants.DT_PLAIN:
1222 # rename LVs to the newly-generated names; we need to construct
1223 # 'fake' LV disks with the old data, plus the new unique_id
1224 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1226 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1227 rename_to.append(t_dsk.logical_id)
1228 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1229 self.cfg.SetDiskID(t_dsk, pnode_name)
1230 result = self.rpc.call_blockdev_rename(pnode_name,
1231 zip(tmp_disks, rename_to))
1232 result.Raise("Failed to rename adoped LVs")
1234 feedback_fn("* creating instance disks...")
1236 CreateDisks(self, iobj)
1237 except errors.OpExecError:
1238 self.LogWarning("Device creation failed")
1239 self.cfg.ReleaseDRBDMinors(instance)
1242 feedback_fn("adding instance %s to cluster config" % instance)
1244 self.cfg.AddInstance(iobj, self.proc.GetECId())
1246 # Declare that we don't want to remove the instance lock anymore, as we've
1247 # added the instance to the config
1248 del self.remove_locks[locking.LEVEL_INSTANCE]
1250 if self.op.mode == constants.INSTANCE_IMPORT:
1251 # Release unused nodes
1252 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1255 ReleaseLocks(self, locking.LEVEL_NODE)
1258 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1259 feedback_fn("* wiping instance disks...")
1261 WipeDisks(self, iobj)
1262 except errors.OpExecError, err:
1263 logging.exception("Wiping disks failed")
1264 self.LogWarning("Wiping instance disks failed (%s)", err)
1268 # Something is already wrong with the disks, don't do anything else
1270 elif self.op.wait_for_sync:
1271 disk_abort = not WaitForSync(self, iobj)
1272 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1273 # make sure the disks are not degraded (still sync-ing is ok)
1274 feedback_fn("* checking mirrors status")
1275 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1280 RemoveDisks(self, iobj)
1281 self.cfg.RemoveInstance(iobj.name)
1282 # Make sure the instance lock gets removed
1283 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1284 raise errors.OpExecError("There are some degraded disks for"
1287 # instance disks are now active
1288 iobj.disks_active = True
1290 # Release all node resource locks
1291 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1293 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1294 # we need to set the disks ID to the primary node, since the
1295 # preceding code might or might have not done it, depending on
1296 # disk template and other options
1297 for disk in iobj.disks:
1298 self.cfg.SetDiskID(disk, pnode_name)
1299 if self.op.mode == constants.INSTANCE_CREATE:
1300 if not self.op.no_install:
1301 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1302 not self.op.wait_for_sync)
1304 feedback_fn("* pausing disk sync to install instance OS")
1305 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1308 for idx, success in enumerate(result.payload):
1310 logging.warn("pause-sync of instance %s for disk %d failed",
1313 feedback_fn("* running the instance OS create scripts...")
1314 # FIXME: pass debug option from opcode to backend
1316 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1317 self.op.debug_level)
1319 feedback_fn("* resuming disk sync")
1320 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1323 for idx, success in enumerate(result.payload):
1325 logging.warn("resume-sync of instance %s for disk %d failed",
1328 os_add_result.Raise("Could not add os for instance %s"
1329 " on node %s" % (instance, pnode_name))
1332 if self.op.mode == constants.INSTANCE_IMPORT:
1333 feedback_fn("* running the instance OS import scripts...")
1337 for idx, image in enumerate(self.src_images):
1341 # FIXME: pass debug option from opcode to backend
1342 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1343 constants.IEIO_FILE, (image, ),
1344 constants.IEIO_SCRIPT,
1345 (iobj.disks[idx], idx),
1347 transfers.append(dt)
1350 masterd.instance.TransferInstanceData(self, feedback_fn,
1351 self.op.src_node, pnode_name,
1352 self.pnode.secondary_ip,
1354 if not compat.all(import_result):
1355 self.LogWarning("Some disks for instance %s on node %s were not"
1356 " imported successfully" % (instance, pnode_name))
1358 rename_from = self._old_instance_name
1360 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1361 feedback_fn("* preparing remote import...")
1362 # The source cluster will stop the instance before attempting to make
1363 # a connection. In some cases stopping an instance can take a long
1364 # time, hence the shutdown timeout is added to the connection
1366 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1367 self.op.source_shutdown_timeout)
1368 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1370 assert iobj.primary_node == self.pnode.name
1372 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1373 self.source_x509_ca,
1374 self._cds, timeouts)
1375 if not compat.all(disk_results):
1376 # TODO: Should the instance still be started, even if some disks
1377 # failed to import (valid for local imports, too)?
1378 self.LogWarning("Some disks for instance %s on node %s were not"
1379 " imported successfully" % (instance, pnode_name))
1381 rename_from = self.source_instance_name
1384 # also checked in the prereq part
1385 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1388 # Run rename script on newly imported instance
1389 assert iobj.name == instance
1390 feedback_fn("Running rename script for %s" % instance)
1391 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1393 self.op.debug_level)
1395 self.LogWarning("Failed to run rename script for %s on node"
1396 " %s: %s" % (instance, pnode_name, result.fail_msg))
1398 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1401 iobj.admin_state = constants.ADMINST_UP
1402 self.cfg.Update(iobj, feedback_fn)
1403 logging.info("Starting instance %s on node %s", instance, pnode_name)
1404 feedback_fn("* starting instance...")
1405 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1406 False, self.op.reason)
1407 result.Raise("Could not start instance")
1409 return list(iobj.all_nodes)
1412 class LUInstanceRename(LogicalUnit):
1413 """Rename an instance.
1416 HPATH = "instance-rename"
1417 HTYPE = constants.HTYPE_INSTANCE
1419 def CheckArguments(self):
1423 if self.op.ip_check and not self.op.name_check:
1424 # TODO: make the ip check more flexible and not depend on the name check
1425 raise errors.OpPrereqError("IP address check requires a name check",
1428 def BuildHooksEnv(self):
1431 This runs on master, primary and secondary nodes of the instance.
1434 env = BuildInstanceHookEnvByObject(self, self.instance)
1435 env["INSTANCE_NEW_NAME"] = self.op.new_name
1438 def BuildHooksNodes(self):
1439 """Build hooks nodes.
1442 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1445 def CheckPrereq(self):
1446 """Check prerequisites.
1448 This checks that the instance is in the cluster and is not running.
1451 self.op.instance_name = ExpandInstanceName(self.cfg,
1452 self.op.instance_name)
1453 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1454 assert instance is not None
1455 CheckNodeOnline(self, instance.primary_node)
1456 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1457 msg="cannot rename")
1458 self.instance = instance
1460 new_name = self.op.new_name
1461 if self.op.name_check:
1462 hostname = _CheckHostnameSane(self, new_name)
1463 new_name = self.op.new_name = hostname.name
1464 if (self.op.ip_check and
1465 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1466 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1467 (hostname.ip, new_name),
1468 errors.ECODE_NOTUNIQUE)
1470 instance_list = self.cfg.GetInstanceList()
1471 if new_name in instance_list and new_name != instance.name:
1472 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1473 new_name, errors.ECODE_EXISTS)
1475 def Exec(self, feedback_fn):
1476 """Rename the instance.
1479 inst = self.instance
1480 old_name = inst.name
1482 rename_file_storage = False
1483 if (inst.disk_template in constants.DTS_FILEBASED and
1484 self.op.new_name != inst.name):
1485 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1486 rename_file_storage = True
1488 self.cfg.RenameInstance(inst.name, self.op.new_name)
1489 # Change the instance lock. This is definitely safe while we hold the BGL.
1490 # Otherwise the new lock would have to be added in acquired mode.
1492 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1493 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1494 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1496 # re-read the instance from the configuration after rename
1497 inst = self.cfg.GetInstanceInfo(self.op.new_name)
1499 if rename_file_storage:
1500 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1501 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1502 old_file_storage_dir,
1503 new_file_storage_dir)
1504 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1505 " (but the instance has been renamed in Ganeti)" %
1506 (inst.primary_node, old_file_storage_dir,
1507 new_file_storage_dir))
1509 StartInstanceDisks(self, inst, None)
1510 # update info on disks
1511 info = GetInstanceInfoText(inst)
1512 for (idx, disk) in enumerate(inst.disks):
1513 for node in inst.all_nodes:
1514 self.cfg.SetDiskID(disk, node)
1515 result = self.rpc.call_blockdev_setinfo(node, disk, info)
1517 self.LogWarning("Error setting info on node %s for disk %s: %s",
1518 node, idx, result.fail_msg)
1520 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1521 old_name, self.op.debug_level)
1522 msg = result.fail_msg
1524 msg = ("Could not run OS rename script for instance %s on node %s"
1525 " (but the instance has been renamed in Ganeti): %s" %
1526 (inst.name, inst.primary_node, msg))
1527 self.LogWarning(msg)
1529 ShutdownInstanceDisks(self, inst)
1534 class LUInstanceRemove(LogicalUnit):
1535 """Remove an instance.
1538 HPATH = "instance-remove"
1539 HTYPE = constants.HTYPE_INSTANCE
1542 def ExpandNames(self):
1543 self._ExpandAndLockInstance()
1544 self.needed_locks[locking.LEVEL_NODE] = []
1545 self.needed_locks[locking.LEVEL_NODE_RES] = []
1546 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1548 def DeclareLocks(self, level):
1549 if level == locking.LEVEL_NODE:
1550 self._LockInstancesNodes()
1551 elif level == locking.LEVEL_NODE_RES:
1553 self.needed_locks[locking.LEVEL_NODE_RES] = \
1554 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1556 def BuildHooksEnv(self):
1559 This runs on master, primary and secondary nodes of the instance.
1562 env = BuildInstanceHookEnvByObject(self, self.instance)
1563 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1566 def BuildHooksNodes(self):
1567 """Build hooks nodes.
1570 nl = [self.cfg.GetMasterNode()]
1571 nl_post = list(self.instance.all_nodes) + nl
1572 return (nl, nl_post)
1574 def CheckPrereq(self):
1575 """Check prerequisites.
1577 This checks that the instance is in the cluster.
1580 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1581 assert self.instance is not None, \
1582 "Cannot retrieve locked instance %s" % self.op.instance_name
1584 def Exec(self, feedback_fn):
1585 """Remove the instance.
1588 instance = self.instance
1589 logging.info("Shutting down instance %s on node %s",
1590 instance.name, instance.primary_node)
1592 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1593 self.op.shutdown_timeout,
1595 msg = result.fail_msg
1597 if self.op.ignore_failures:
1598 feedback_fn("Warning: can't shutdown instance: %s" % msg)
1600 raise errors.OpExecError("Could not shutdown instance %s on"
1602 (instance.name, instance.primary_node, msg))
1604 assert (self.owned_locks(locking.LEVEL_NODE) ==
1605 self.owned_locks(locking.LEVEL_NODE_RES))
1606 assert not (set(instance.all_nodes) -
1607 self.owned_locks(locking.LEVEL_NODE)), \
1608 "Not owning correct locks"
1610 RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1613 class LUInstanceMove(LogicalUnit):
1614 """Move an instance by data-copying.
1617 HPATH = "instance-move"
1618 HTYPE = constants.HTYPE_INSTANCE
1621 def ExpandNames(self):
1622 self._ExpandAndLockInstance()
1623 target_node = ExpandNodeName(self.cfg, self.op.target_node)
1624 self.op.target_node = target_node
1625 self.needed_locks[locking.LEVEL_NODE] = [target_node]
1626 self.needed_locks[locking.LEVEL_NODE_RES] = []
1627 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1629 def DeclareLocks(self, level):
1630 if level == locking.LEVEL_NODE:
1631 self._LockInstancesNodes(primary_only=True)
1632 elif level == locking.LEVEL_NODE_RES:
1634 self.needed_locks[locking.LEVEL_NODE_RES] = \
1635 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1637 def BuildHooksEnv(self):
1640 This runs on master, primary and secondary nodes of the instance.
1644 "TARGET_NODE": self.op.target_node,
1645 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1647 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1650 def BuildHooksNodes(self):
1651 """Build hooks nodes.
1655 self.cfg.GetMasterNode(),
1656 self.instance.primary_node,
1657 self.op.target_node,
1661 def CheckPrereq(self):
1662 """Check prerequisites.
1664 This checks that the instance is in the cluster.
1667 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1668 assert self.instance is not None, \
1669 "Cannot retrieve locked instance %s" % self.op.instance_name
1671 if instance.disk_template not in constants.DTS_COPYABLE:
1672 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1673 instance.disk_template, errors.ECODE_STATE)
1675 node = self.cfg.GetNodeInfo(self.op.target_node)
1676 assert node is not None, \
1677 "Cannot retrieve locked node %s" % self.op.target_node
1679 self.target_node = target_node = node.name
1681 if target_node == instance.primary_node:
1682 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1683 (instance.name, target_node),
1686 bep = self.cfg.GetClusterInfo().FillBE(instance)
1688 for idx, dsk in enumerate(instance.disks):
1689 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1690 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1691 " cannot copy" % idx, errors.ECODE_STATE)
1693 CheckNodeOnline(self, target_node)
1694 CheckNodeNotDrained(self, target_node)
1695 CheckNodeVmCapable(self, target_node)
1696 cluster = self.cfg.GetClusterInfo()
1697 group_info = self.cfg.GetNodeGroup(node.group)
1698 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1699 CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1700 ignore=self.op.ignore_ipolicy)
1702 if instance.admin_state == constants.ADMINST_UP:
1703 # check memory requirements on the secondary node
1704 CheckNodeFreeMemory(self, target_node,
1705 "failing over instance %s" %
1706 instance.name, bep[constants.BE_MAXMEM],
1707 instance.hypervisor)
1709 self.LogInfo("Not checking memory on the secondary node as"
1710 " instance will not be started")
1712 # check bridge existance
1713 CheckInstanceBridgesExist(self, instance, node=target_node)
1715 def Exec(self, feedback_fn):
1716 """Move an instance.
1718 The move is done by shutting it down on its present node, copying
1719 the data over (slow) and starting it on the new node.
1722 instance = self.instance
1724 source_node = instance.primary_node
1725 target_node = self.target_node
1727 self.LogInfo("Shutting down instance %s on source node %s",
1728 instance.name, source_node)
1730 assert (self.owned_locks(locking.LEVEL_NODE) ==
1731 self.owned_locks(locking.LEVEL_NODE_RES))
1733 result = self.rpc.call_instance_shutdown(source_node, instance,
1734 self.op.shutdown_timeout,
1736 msg = result.fail_msg
1738 if self.op.ignore_consistency:
1739 self.LogWarning("Could not shutdown instance %s on node %s."
1740 " Proceeding anyway. Please make sure node"
1741 " %s is down. Error details: %s",
1742 instance.name, source_node, source_node, msg)
1744 raise errors.OpExecError("Could not shutdown instance %s on"
1746 (instance.name, source_node, msg))
1748 # create the target disks
1750 CreateDisks(self, instance, target_node=target_node)
1751 except errors.OpExecError:
1752 self.LogWarning("Device creation failed")
1753 self.cfg.ReleaseDRBDMinors(instance.name)
1756 cluster_name = self.cfg.GetClusterInfo().cluster_name
1759 # activate, get path, copy the data over
1760 for idx, disk in enumerate(instance.disks):
1761 self.LogInfo("Copying data for disk %d", idx)
1762 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1763 instance.name, True, idx)
1765 self.LogWarning("Can't assemble newly created disk %d: %s",
1766 idx, result.fail_msg)
1767 errs.append(result.fail_msg)
1769 dev_path, _ = result.payload
1770 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1771 target_node, dev_path,
1774 self.LogWarning("Can't copy data over for disk %d: %s",
1775 idx, result.fail_msg)
1776 errs.append(result.fail_msg)
1780 self.LogWarning("Some disks failed to copy, aborting")
1782 RemoveDisks(self, instance, target_node=target_node)
1784 self.cfg.ReleaseDRBDMinors(instance.name)
1785 raise errors.OpExecError("Errors during disk copy: %s" %
1788 instance.primary_node = target_node
1789 self.cfg.Update(instance, feedback_fn)
1791 self.LogInfo("Removing the disks on the original node")
1792 RemoveDisks(self, instance, target_node=source_node)
1794 # Only start the instance if it's marked as up
1795 if instance.admin_state == constants.ADMINST_UP:
1796 self.LogInfo("Starting instance %s on node %s",
1797 instance.name, target_node)
1799 disks_ok, _ = AssembleInstanceDisks(self, instance,
1800 ignore_secondaries=True)
1802 ShutdownInstanceDisks(self, instance)
1803 raise errors.OpExecError("Can't activate the instance's disks")
1805 result = self.rpc.call_instance_start(target_node,
1806 (instance, None, None), False,
1808 msg = result.fail_msg
1810 ShutdownInstanceDisks(self, instance)
1811 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1812 (instance.name, target_node, msg))
1815 class LUInstanceMultiAlloc(NoHooksLU):
1816 """Allocates multiple instances at the same time.
1821 def CheckArguments(self):
1826 for inst in self.op.instances:
1827 if inst.iallocator is not None:
1828 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1829 " instance objects", errors.ECODE_INVAL)
1830 nodes.append(bool(inst.pnode))
1831 if inst.disk_template in constants.DTS_INT_MIRROR:
1832 nodes.append(bool(inst.snode))
1834 has_nodes = compat.any(nodes)
1835 if compat.all(nodes) ^ has_nodes:
1836 raise errors.OpPrereqError("There are instance objects providing"
1837 " pnode/snode while others do not",
1840 if not has_nodes and self.op.iallocator is None:
1841 default_iallocator = self.cfg.GetDefaultIAllocator()
1842 if default_iallocator:
1843 self.op.iallocator = default_iallocator
1845 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1846 " given and no cluster-wide default"
1847 " iallocator found; please specify either"
1848 " an iallocator or nodes on the instances"
1849 " or set a cluster-wide default iallocator",
1852 _CheckOpportunisticLocking(self.op)
1854 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1856 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1857 utils.CommaJoin(dups), errors.ECODE_INVAL)
1859 def ExpandNames(self):
1860 """Calculate the locks.
1863 self.share_locks = ShareAll()
1864 self.needed_locks = {
1865 # iallocator will select nodes and even if no iallocator is used,
1866 # collisions with LUInstanceCreate should be avoided
1867 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1870 if self.op.iallocator:
1871 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1872 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1874 if self.op.opportunistic_locking:
1875 self.opportunistic_locks[locking.LEVEL_NODE] = True
1876 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1879 for inst in self.op.instances:
1880 inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1881 nodeslist.append(inst.pnode)
1882 if inst.snode is not None:
1883 inst.snode = ExpandNodeName(self.cfg, inst.snode)
1884 nodeslist.append(inst.snode)
1886 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1887 # Lock resources of instance's primary and secondary nodes (copy to
1888 # prevent accidential modification)
1889 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1891 def CheckPrereq(self):
1892 """Check prerequisite.
1895 if self.op.iallocator:
1896 cluster = self.cfg.GetClusterInfo()
1897 default_vg = self.cfg.GetVGName()
1898 ec_id = self.proc.GetECId()
1900 if self.op.opportunistic_locking:
1901 # Only consider nodes for which a lock is held
1902 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1904 node_whitelist = None
1906 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1907 _ComputeNics(op, cluster, None,
1909 _ComputeFullBeParams(op, cluster),
1911 for op in self.op.instances]
1913 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1914 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1916 ial.Run(self.op.iallocator)
1919 raise errors.OpPrereqError("Can't compute nodes using"
1920 " iallocator '%s': %s" %
1921 (self.op.iallocator, ial.info),
1924 self.ia_result = ial.result
1927 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1928 constants.JOB_IDS_KEY: [],
1931 def _ConstructPartialResult(self):
1932 """Contructs the partial result.
1935 if self.op.iallocator:
1936 (allocatable, failed_insts) = self.ia_result
1937 allocatable_insts = map(compat.fst, allocatable)
1939 allocatable_insts = [op.instance_name for op in self.op.instances]
1943 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
1944 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
1947 def Exec(self, feedback_fn):
1948 """Executes the opcode.
1952 if self.op.iallocator:
1953 op2inst = dict((op.instance_name, op) for op in self.op.instances)
1954 (allocatable, failed) = self.ia_result
1956 for (name, nodes) in allocatable:
1957 op = op2inst.pop(name)
1960 (op.pnode, op.snode) = nodes
1966 missing = set(op2inst.keys()) - set(failed)
1967 assert not missing, \
1968 "Iallocator did return incomplete result: %s" % \
1969 utils.CommaJoin(missing)
1971 jobs.extend([op] for op in self.op.instances)
1973 return ResultWithJobs(jobs, **self._ConstructPartialResult())
1976 class _InstNicModPrivate:
1977 """Data structure for network interface modifications.
1979 Used by L{LUInstanceSetParams}.
1987 def _PrepareContainerMods(mods, private_fn):
1988 """Prepares a list of container modifications by adding a private data field.
1990 @type mods: list of tuples; (operation, index, parameters)
1991 @param mods: List of modifications
1992 @type private_fn: callable or None
1993 @param private_fn: Callable for constructing a private data field for a
1998 if private_fn is None:
2003 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2006 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
2007 """Checks if nodes have enough physical CPUs
2009 This function checks if all given nodes have the needed number of
2010 physical CPUs. In case any node has less CPUs or we cannot get the
2011 information from the node, this function raises an OpPrereqError
2014 @type lu: C{LogicalUnit}
2015 @param lu: a logical unit from which we get configuration data
2016 @type nodenames: C{list}
2017 @param nodenames: the list of node names to check
2018 @type requested: C{int}
2019 @param requested: the minimum acceptable number of physical CPUs
2020 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2021 or we cannot check the node
2024 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2025 for node in nodenames:
2026 info = nodeinfo[node]
2027 info.Raise("Cannot get current information from node %s" % node,
2028 prereq=True, ecode=errors.ECODE_ENVIRON)
2029 (_, _, (hv_info, )) = info.payload
2030 num_cpus = hv_info.get("cpu_total", None)
2031 if not isinstance(num_cpus, int):
2032 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2033 " on node %s, result was '%s'" %
2034 (node, num_cpus), errors.ECODE_ENVIRON)
2035 if requested > num_cpus:
2036 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2037 "required" % (node, num_cpus, requested),
2041 def GetItemFromContainer(identifier, kind, container):
2042 """Return the item refered by the identifier.
2044 @type identifier: string
2045 @param identifier: Item index or name or UUID
2047 @param kind: One-word item description
2048 @type container: list
2049 @param container: Container to get the item from
2054 idx = int(identifier)
2057 absidx = len(container) - 1
2059 raise IndexError("Not accepting negative indices other than -1")
2060 elif idx > len(container):
2061 raise IndexError("Got %s index %s, but there are only %s" %
2062 (kind, idx, len(container)))
2065 return (absidx, container[idx])
2069 for idx, item in enumerate(container):
2070 if item.uuid == identifier or item.name == identifier:
2073 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2074 (kind, identifier), errors.ECODE_NOENT)
2077 def _ApplyContainerMods(kind, container, chgdesc, mods,
2078 create_fn, modify_fn, remove_fn):
2079 """Applies descriptions in C{mods} to C{container}.
2082 @param kind: One-word item description
2083 @type container: list
2084 @param container: Container to modify
2085 @type chgdesc: None or list
2086 @param chgdesc: List of applied changes
2088 @param mods: Modifications as returned by L{_PrepareContainerMods}
2089 @type create_fn: callable
2090 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2091 receives absolute item index, parameters and private data object as added
2092 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2094 @type modify_fn: callable
2095 @param modify_fn: Callback for modifying an existing item
2096 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2097 and private data object as added by L{_PrepareContainerMods}, returns
2099 @type remove_fn: callable
2100 @param remove_fn: Callback on removing item; receives absolute item index,
2101 item and private data object as added by L{_PrepareContainerMods}
2104 for (op, identifier, params, private) in mods:
2107 if op == constants.DDM_ADD:
2108 # Calculate where item will be added
2109 # When adding an item, identifier can only be an index
2111 idx = int(identifier)
2113 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2114 " identifier for %s" % constants.DDM_ADD,
2117 addidx = len(container)
2120 raise IndexError("Not accepting negative indices other than -1")
2121 elif idx > len(container):
2122 raise IndexError("Got %s index %s, but there are only %s" %
2123 (kind, idx, len(container)))
2126 if create_fn is None:
2129 (item, changes) = create_fn(addidx, params, private)
2132 container.append(item)
2135 assert idx <= len(container)
2136 # list.insert does so before the specified index
2137 container.insert(idx, item)
2139 # Retrieve existing item
2140 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2142 if op == constants.DDM_REMOVE:
2145 if remove_fn is not None:
2146 remove_fn(absidx, item, private)
2148 changes = [("%s/%s" % (kind, absidx), "remove")]
2150 assert container[absidx] == item
2151 del container[absidx]
2152 elif op == constants.DDM_MODIFY:
2153 if modify_fn is not None:
2154 changes = modify_fn(absidx, item, params, private)
2156 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2158 assert _TApplyContModsCbChanges(changes)
2160 if not (chgdesc is None or changes is None):
2161 chgdesc.extend(changes)
2164 def _UpdateIvNames(base_index, disks):
2165 """Updates the C{iv_name} attribute of disks.
2167 @type disks: list of L{objects.Disk}
2170 for (idx, disk) in enumerate(disks):
2171 disk.iv_name = "disk/%s" % (base_index + idx, )
2174 class LUInstanceSetParams(LogicalUnit):
2175 """Modifies an instances's parameters.
2178 HPATH = "instance-modify"
2179 HTYPE = constants.HTYPE_INSTANCE
2183 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2184 assert ht.TList(mods)
2185 assert not mods or len(mods[0]) in (2, 3)
2187 if mods and len(mods[0]) == 2:
2191 for op, params in mods:
2192 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2193 result.append((op, -1, params))
2197 raise errors.OpPrereqError("Only one %s add or remove operation is"
2198 " supported at a time" % kind,
2201 result.append((constants.DDM_MODIFY, op, params))
2203 assert verify_fn(result)
2210 def _CheckMods(kind, mods, key_types, item_fn):
2211 """Ensures requested disk/NIC modifications are valid.
2214 for (op, _, params) in mods:
2215 assert ht.TDict(params)
2217 # If 'key_types' is an empty dict, we assume we have an
2218 # 'ext' template and thus do not ForceDictType
2220 utils.ForceDictType(params, key_types)
2222 if op == constants.DDM_REMOVE:
2224 raise errors.OpPrereqError("No settings should be passed when"
2225 " removing a %s" % kind,
2227 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2230 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2233 def _VerifyDiskModification(op, params):
2234 """Verifies a disk modification.
2237 if op == constants.DDM_ADD:
2238 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2239 if mode not in constants.DISK_ACCESS_SET:
2240 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2243 size = params.get(constants.IDISK_SIZE, None)
2245 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2246 constants.IDISK_SIZE, errors.ECODE_INVAL)
2250 except (TypeError, ValueError), err:
2251 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2254 params[constants.IDISK_SIZE] = size
2255 name = params.get(constants.IDISK_NAME, None)
2256 if name is not None and name.lower() == constants.VALUE_NONE:
2257 params[constants.IDISK_NAME] = None
2259 elif op == constants.DDM_MODIFY:
2260 if constants.IDISK_SIZE in params:
2261 raise errors.OpPrereqError("Disk size change not possible, use"
2262 " grow-disk", errors.ECODE_INVAL)
2264 raise errors.OpPrereqError("Disk modification doesn't support"
2265 " additional arbitrary parameters",
2267 name = params.get(constants.IDISK_NAME, None)
2268 if name is not None and name.lower() == constants.VALUE_NONE:
2269 params[constants.IDISK_NAME] = None
2272 def _VerifyNicModification(op, params):
2273 """Verifies a network interface modification.
2276 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2277 ip = params.get(constants.INIC_IP, None)
2278 name = params.get(constants.INIC_NAME, None)
2279 req_net = params.get(constants.INIC_NETWORK, None)
2280 link = params.get(constants.NIC_LINK, None)
2281 mode = params.get(constants.NIC_MODE, None)
2282 if name is not None and name.lower() == constants.VALUE_NONE:
2283 params[constants.INIC_NAME] = None
2284 if req_net is not None:
2285 if req_net.lower() == constants.VALUE_NONE:
2286 params[constants.INIC_NETWORK] = None
2288 elif link is not None or mode is not None:
2289 raise errors.OpPrereqError("If network is given"
2290 " mode or link should not",
2293 if op == constants.DDM_ADD:
2294 macaddr = params.get(constants.INIC_MAC, None)
2296 params[constants.INIC_MAC] = constants.VALUE_AUTO
2299 if ip.lower() == constants.VALUE_NONE:
2300 params[constants.INIC_IP] = None
2302 if ip.lower() == constants.NIC_IP_POOL:
2303 if op == constants.DDM_ADD and req_net is None:
2304 raise errors.OpPrereqError("If ip=pool, parameter network"
2308 if not netutils.IPAddress.IsValid(ip):
2309 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2312 if constants.INIC_MAC in params:
2313 macaddr = params[constants.INIC_MAC]
2314 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2315 macaddr = utils.NormalizeAndValidateMac(macaddr)
2317 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2318 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2319 " modifying an existing NIC",
2322 def CheckArguments(self):
2323 if not (self.op.nics or self.op.disks or self.op.disk_template or
2324 self.op.hvparams or self.op.beparams or self.op.os_name or
2325 self.op.osparams or self.op.offline is not None or
2326 self.op.runtime_mem or self.op.pnode):
2327 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2329 if self.op.hvparams:
2330 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2331 "hypervisor", "instance", "cluster")
2333 self.op.disks = self._UpgradeDiskNicMods(
2334 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2335 self.op.nics = self._UpgradeDiskNicMods(
2336 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2338 if self.op.disks and self.op.disk_template is not None:
2339 raise errors.OpPrereqError("Disk template conversion and other disk"
2340 " changes not supported at the same time",
2343 if (self.op.disk_template and
2344 self.op.disk_template in constants.DTS_INT_MIRROR and
2345 self.op.remote_node is None):
2346 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2347 " one requires specifying a secondary node",
2350 # Check NIC modifications
2351 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2352 self._VerifyNicModification)
2355 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2357 def ExpandNames(self):
2358 self._ExpandAndLockInstance()
2359 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2360 # Can't even acquire node locks in shared mode as upcoming changes in
2361 # Ganeti 2.6 will start to modify the node object on disk conversion
2362 self.needed_locks[locking.LEVEL_NODE] = []
2363 self.needed_locks[locking.LEVEL_NODE_RES] = []
2364 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2365 # Look node group to look up the ipolicy
2366 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2368 def DeclareLocks(self, level):
2369 if level == locking.LEVEL_NODEGROUP:
2370 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2371 # Acquire locks for the instance's nodegroups optimistically. Needs
2372 # to be verified in CheckPrereq
2373 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2374 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2375 elif level == locking.LEVEL_NODE:
2376 self._LockInstancesNodes()
2377 if self.op.disk_template and self.op.remote_node:
2378 self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2379 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2380 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2382 self.needed_locks[locking.LEVEL_NODE_RES] = \
2383 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2385 def BuildHooksEnv(self):
2388 This runs on the master, primary and secondaries.
2392 if constants.BE_MINMEM in self.be_new:
2393 args["minmem"] = self.be_new[constants.BE_MINMEM]
2394 if constants.BE_MAXMEM in self.be_new:
2395 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2396 if constants.BE_VCPUS in self.be_new:
2397 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2398 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2399 # information at all.
2401 if self._new_nics is not None:
2404 for nic in self._new_nics:
2405 n = copy.deepcopy(nic)
2406 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2407 n.nicparams = nicparams
2408 nics.append(NICToTuple(self, n))
2412 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2413 if self.op.disk_template:
2414 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2415 if self.op.runtime_mem:
2416 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2420 def BuildHooksNodes(self):
2421 """Build hooks nodes.
2424 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2427 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2428 old_params, cluster, pnode):
2430 update_params_dict = dict([(key, params[key])
2431 for key in constants.NICS_PARAMETERS
2434 req_link = update_params_dict.get(constants.NIC_LINK, None)
2435 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2438 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2439 if new_net_uuid_or_name:
2440 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2441 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2444 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2447 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2449 raise errors.OpPrereqError("No netparams found for the network"
2450 " %s, probably not connected" %
2451 new_net_obj.name, errors.ECODE_INVAL)
2452 new_params = dict(netparams)
2454 new_params = GetUpdatedParams(old_params, update_params_dict)
2456 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2458 new_filled_params = cluster.SimpleFillNIC(new_params)
2459 objects.NIC.CheckParameterSyntax(new_filled_params)
2461 new_mode = new_filled_params[constants.NIC_MODE]
2462 if new_mode == constants.NIC_MODE_BRIDGED:
2463 bridge = new_filled_params[constants.NIC_LINK]
2464 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2466 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2468 self.warn.append(msg)
2470 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2472 elif new_mode == constants.NIC_MODE_ROUTED:
2473 ip = params.get(constants.INIC_IP, old_ip)
2475 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2476 " on a routed NIC", errors.ECODE_INVAL)
2478 elif new_mode == constants.NIC_MODE_OVS:
2479 # TODO: check OVS link
2480 self.LogInfo("OVS links are currently not checked for correctness")
2482 if constants.INIC_MAC in params:
2483 mac = params[constants.INIC_MAC]
2485 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2487 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2488 # otherwise generate the MAC address
2489 params[constants.INIC_MAC] = \
2490 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2492 # or validate/reserve the current one
2494 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2495 except errors.ReservationError:
2496 raise errors.OpPrereqError("MAC address '%s' already in use"
2497 " in cluster" % mac,
2498 errors.ECODE_NOTUNIQUE)
2499 elif new_net_uuid != old_net_uuid:
2501 def get_net_prefix(net_uuid):
2504 nobj = self.cfg.GetNetwork(net_uuid)
2505 mac_prefix = nobj.mac_prefix
2509 new_prefix = get_net_prefix(new_net_uuid)
2510 old_prefix = get_net_prefix(old_net_uuid)
2511 if old_prefix != new_prefix:
2512 params[constants.INIC_MAC] = \
2513 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2515 # if there is a change in (ip, network) tuple
2516 new_ip = params.get(constants.INIC_IP, old_ip)
2517 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2519 # if IP is pool then require a network and generate one IP
2520 if new_ip.lower() == constants.NIC_IP_POOL:
2523 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2524 except errors.ReservationError:
2525 raise errors.OpPrereqError("Unable to get a free IP"
2526 " from the address pool",
2528 self.LogInfo("Chose IP %s from network %s",
2531 params[constants.INIC_IP] = new_ip
2533 raise errors.OpPrereqError("ip=pool, but no network found",
2535 # Reserve new IP if in the new network if any
2538 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2539 self.LogInfo("Reserving IP %s in network %s",
2540 new_ip, new_net_obj.name)
2541 except errors.ReservationError:
2542 raise errors.OpPrereqError("IP %s not available in network %s" %
2543 (new_ip, new_net_obj.name),
2544 errors.ECODE_NOTUNIQUE)
2545 # new network is None so check if new IP is a conflicting IP
2546 elif self.op.conflicts_check:
2547 _CheckForConflictingIp(self, new_ip, pnode)
2549 # release old IP if old network is not None
2550 if old_ip and old_net_uuid:
2552 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2553 except errors.AddressPoolError:
2554 logging.warning("Release IP %s not contained in network %s",
2555 old_ip, old_net_obj.name)
2557 # there are no changes in (ip, network) tuple and old network is not None
2558 elif (old_net_uuid is not None and
2559 (req_link is not None or req_mode is not None)):
2560 raise errors.OpPrereqError("Not allowed to change link or mode of"
2561 " a NIC that is connected to a network",
2564 private.params = new_params
2565 private.filled = new_filled_params
2567 def _PreCheckDiskTemplate(self, pnode_info):
2568 """CheckPrereq checks related to a new disk template."""
2569 # Arguments are passed to avoid configuration lookups
2570 instance = self.instance
2571 pnode = instance.primary_node
2572 cluster = self.cluster
2573 if instance.disk_template == self.op.disk_template:
2574 raise errors.OpPrereqError("Instance already has disk template %s" %
2575 instance.disk_template, errors.ECODE_INVAL)
2577 if (instance.disk_template,
2578 self.op.disk_template) not in self._DISK_CONVERSIONS:
2579 raise errors.OpPrereqError("Unsupported disk template conversion from"
2580 " %s to %s" % (instance.disk_template,
2581 self.op.disk_template),
2583 CheckInstanceState(self, instance, INSTANCE_DOWN,
2584 msg="cannot change disk template")
2585 if self.op.disk_template in constants.DTS_INT_MIRROR:
2586 if self.op.remote_node == pnode:
2587 raise errors.OpPrereqError("Given new secondary node %s is the same"
2588 " as the primary node of the instance" %
2589 self.op.remote_node, errors.ECODE_STATE)
2590 CheckNodeOnline(self, self.op.remote_node)
2591 CheckNodeNotDrained(self, self.op.remote_node)
2592 # FIXME: here we assume that the old instance type is DT_PLAIN
2593 assert instance.disk_template == constants.DT_PLAIN
2594 disks = [{constants.IDISK_SIZE: d.size,
2595 constants.IDISK_VG: d.logical_id[0]}
2596 for d in instance.disks]
2597 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2598 CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2600 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2601 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2602 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2604 CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2605 ignore=self.op.ignore_ipolicy)
2606 if pnode_info.group != snode_info.group:
2607 self.LogWarning("The primary and secondary nodes are in two"
2608 " different node groups; the disk parameters"
2609 " from the first disk's node group will be"
2612 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2613 # Make sure none of the nodes require exclusive storage
2614 nodes = [pnode_info]
2615 if self.op.disk_template in constants.DTS_INT_MIRROR:
2617 nodes.append(snode_info)
2618 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2619 if compat.any(map(has_es, nodes)):
2620 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2621 " storage is enabled" % (instance.disk_template,
2622 self.op.disk_template))
2623 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2625 def CheckPrereq(self):
2626 """Check prerequisites.
2628 This only checks the instance list against the existing names.
2631 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2632 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2634 cluster = self.cluster = self.cfg.GetClusterInfo()
2635 assert self.instance is not None, \
2636 "Cannot retrieve locked instance %s" % self.op.instance_name
2638 pnode = instance.primary_node
2642 if (self.op.pnode is not None and self.op.pnode != pnode and
2644 # verify that the instance is not up
2645 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2646 instance.hypervisor)
2647 if instance_info.fail_msg:
2648 self.warn.append("Can't get instance runtime information: %s" %
2649 instance_info.fail_msg)
2650 elif instance_info.payload:
2651 raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2654 assert pnode in self.owned_locks(locking.LEVEL_NODE)
2655 nodelist = list(instance.all_nodes)
2656 pnode_info = self.cfg.GetNodeInfo(pnode)
2657 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2659 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2660 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2661 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2663 # dictionary with instance information after the modification
2667 result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2669 result.Raise("Hotplug is not supported.")
2671 # Check disk modifications. This is done here and not in CheckArguments
2672 # (as with NICs), because we need to know the instance's disk template
2673 if instance.disk_template == constants.DT_EXT:
2674 self._CheckMods("disk", self.op.disks, {},
2675 self._VerifyDiskModification)
2677 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2678 self._VerifyDiskModification)
2680 # Prepare disk/NIC modifications
2681 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2682 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2684 # Check the validity of the `provider' parameter
2685 if instance.disk_template in constants.DT_EXT:
2686 for mod in self.diskmod:
2687 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2688 if mod[0] == constants.DDM_ADD:
2689 if ext_provider is None:
2690 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2691 " '%s' missing, during disk add" %
2693 constants.IDISK_PROVIDER),
2695 elif mod[0] == constants.DDM_MODIFY:
2697 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2699 constants.IDISK_PROVIDER,
2702 for mod in self.diskmod:
2703 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2704 if ext_provider is not None:
2705 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2706 " instances of type '%s'" %
2707 (constants.IDISK_PROVIDER,
2712 if self.op.os_name and not self.op.force:
2713 CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2714 self.op.force_variant)
2715 instance_os = self.op.os_name
2717 instance_os = instance.os
2719 assert not (self.op.disk_template and self.op.disks), \
2720 "Can't modify disk template and apply disk changes at the same time"
2722 if self.op.disk_template:
2723 self._PreCheckDiskTemplate(pnode_info)
2725 # hvparams processing
2726 if self.op.hvparams:
2727 hv_type = instance.hypervisor
2728 i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2729 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2730 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2733 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2734 CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2735 self.hv_proposed = self.hv_new = hv_new # the new actual values
2736 self.hv_inst = i_hvdict # the new dict (without defaults)
2738 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2740 self.hv_new = self.hv_inst = {}
2742 # beparams processing
2743 if self.op.beparams:
2744 i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2746 objects.UpgradeBeParams(i_bedict)
2747 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2748 be_new = cluster.SimpleFillBE(i_bedict)
2749 self.be_proposed = self.be_new = be_new # the new actual values
2750 self.be_inst = i_bedict # the new dict (without defaults)
2752 self.be_new = self.be_inst = {}
2753 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2754 be_old = cluster.FillBE(instance)
2756 # CPU param validation -- checking every time a parameter is
2757 # changed to cover all cases where either CPU mask or vcpus have
2759 if (constants.BE_VCPUS in self.be_proposed and
2760 constants.HV_CPU_MASK in self.hv_proposed):
2762 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2763 # Verify mask is consistent with number of vCPUs. Can skip this
2764 # test if only 1 entry in the CPU mask, which means same mask
2765 # is applied to all vCPUs.
2766 if (len(cpu_list) > 1 and
2767 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2768 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2770 (self.be_proposed[constants.BE_VCPUS],
2771 self.hv_proposed[constants.HV_CPU_MASK]),
2774 # Only perform this test if a new CPU mask is given
2775 if constants.HV_CPU_MASK in self.hv_new:
2776 # Calculate the largest CPU number requested
2777 max_requested_cpu = max(map(max, cpu_list))
2778 # Check that all of the instance's nodes have enough physical CPUs to
2779 # satisfy the requested CPU mask
2780 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2781 max_requested_cpu + 1, instance.hypervisor)
2783 # osparams processing
2784 if self.op.osparams:
2785 i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2786 CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2787 self.os_inst = i_osdict # the new dict (without defaults)
2791 #TODO(dynmem): do the appropriate check involving MINMEM
2792 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2793 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2794 mem_check_list = [pnode]
2795 if be_new[constants.BE_AUTO_BALANCE]:
2796 # either we changed auto_balance to yes or it was from before
2797 mem_check_list.extend(instance.secondary_nodes)
2798 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2799 instance.hypervisor)
2800 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2801 [instance.hypervisor], False)
2802 pninfo = nodeinfo[pnode]
2803 msg = pninfo.fail_msg
2805 # Assume the primary node is unreachable and go ahead
2806 self.warn.append("Can't get info from primary node %s: %s" %
2809 (_, _, (pnhvinfo, )) = pninfo.payload
2810 if not isinstance(pnhvinfo.get("memory_free", None), int):
2811 self.warn.append("Node data from primary node %s doesn't contain"
2812 " free memory information" % pnode)
2813 elif instance_info.fail_msg:
2814 self.warn.append("Can't get instance runtime information: %s" %
2815 instance_info.fail_msg)
2817 if instance_info.payload:
2818 current_mem = int(instance_info.payload["memory"])
2820 # Assume instance not running
2821 # (there is a slight race condition here, but it's not very
2822 # probable, and we have no other way to check)
2823 # TODO: Describe race condition
2825 #TODO(dynmem): do the appropriate check involving MINMEM
2826 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2827 pnhvinfo["memory_free"])
2829 raise errors.OpPrereqError("This change will prevent the instance"
2830 " from starting, due to %d MB of memory"
2831 " missing on its primary node" %
2832 miss_mem, errors.ECODE_NORES)
2834 if be_new[constants.BE_AUTO_BALANCE]:
2835 for node, nres in nodeinfo.items():
2836 if node not in instance.secondary_nodes:
2838 nres.Raise("Can't get info from secondary node %s" % node,
2839 prereq=True, ecode=errors.ECODE_STATE)
2840 (_, _, (nhvinfo, )) = nres.payload
2841 if not isinstance(nhvinfo.get("memory_free", None), int):
2842 raise errors.OpPrereqError("Secondary node %s didn't return free"
2843 " memory information" % node,
2845 #TODO(dynmem): do the appropriate check involving MINMEM
2846 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2847 raise errors.OpPrereqError("This change will prevent the instance"
2848 " from failover to its secondary node"
2849 " %s, due to not enough memory" % node,
2852 if self.op.runtime_mem:
2853 remote_info = self.rpc.call_instance_info(instance.primary_node,
2855 instance.hypervisor)
2856 remote_info.Raise("Error checking node %s" % instance.primary_node)
2857 if not remote_info.payload: # not running already
2858 raise errors.OpPrereqError("Instance %s is not running" %
2859 instance.name, errors.ECODE_STATE)
2861 current_memory = remote_info.payload["memory"]
2862 if (not self.op.force and
2863 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2864 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2865 raise errors.OpPrereqError("Instance %s must have memory between %d"
2866 " and %d MB of memory unless --force is"
2869 self.be_proposed[constants.BE_MINMEM],
2870 self.be_proposed[constants.BE_MAXMEM]),
2873 delta = self.op.runtime_mem - current_memory
2875 CheckNodeFreeMemory(self, instance.primary_node,
2876 "ballooning memory for instance %s" %
2877 instance.name, delta, instance.hypervisor)
2879 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2880 raise errors.OpPrereqError("Disk operations not supported for"
2881 " diskless instances", errors.ECODE_INVAL)
2883 def _PrepareNicCreate(_, params, private):
2884 self._PrepareNicModification(params, private, None, None,
2888 def _PrepareNicMod(_, nic, params, private):
2889 self._PrepareNicModification(params, private, nic.ip, nic.network,
2890 nic.nicparams, cluster, pnode)
2893 def _PrepareNicRemove(_, params, __):
2895 net = params.network
2896 if net is not None and ip is not None:
2897 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2899 # Verify NIC changes (operating on copy)
2900 nics = instance.nics[:]
2901 _ApplyContainerMods("NIC", nics, None, self.nicmod,
2902 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2903 if len(nics) > constants.MAX_NICS:
2904 raise errors.OpPrereqError("Instance has too many network interfaces"
2905 " (%d), cannot add more" % constants.MAX_NICS,
2908 def _PrepareDiskMod(_, disk, params, __):
2909 disk.name = params.get(constants.IDISK_NAME, None)
2911 # Verify disk changes (operating on a copy)
2912 disks = copy.deepcopy(instance.disks)
2913 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2914 _PrepareDiskMod, None)
2915 utils.ValidateDeviceNames("disk", disks)
2916 if len(disks) > constants.MAX_DISKS:
2917 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2918 " more" % constants.MAX_DISKS,
2920 disk_sizes = [disk.size for disk in instance.disks]
2921 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2922 self.diskmod if op == constants.DDM_ADD)
2923 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2924 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2926 if self.op.offline is not None and self.op.offline:
2927 CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2928 msg="can't change to offline")
2930 # Pre-compute NIC changes (necessary to use result in hooks)
2931 self._nic_chgdesc = []
2933 # Operate on copies as this is still in prereq
2934 nics = [nic.Copy() for nic in instance.nics]
2935 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2936 self._CreateNewNic, self._ApplyNicMods,
2938 # Verify that NIC names are unique and valid
2939 utils.ValidateDeviceNames("NIC", nics)
2940 self._new_nics = nics
2941 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2943 self._new_nics = None
2944 ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2946 if not self.op.ignore_ipolicy:
2947 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2950 # Fill ispec with backend parameters
2951 ispec[constants.ISPEC_SPINDLE_USE] = \
2952 self.be_new.get(constants.BE_SPINDLE_USE, None)
2953 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2956 # Copy ispec to verify parameters with min/max values separately
2957 if self.op.disk_template:
2958 new_disk_template = self.op.disk_template
2960 new_disk_template = instance.disk_template
2961 ispec_max = ispec.copy()
2962 ispec_max[constants.ISPEC_MEM_SIZE] = \
2963 self.be_new.get(constants.BE_MAXMEM, None)
2964 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2966 ispec_min = ispec.copy()
2967 ispec_min[constants.ISPEC_MEM_SIZE] = \
2968 self.be_new.get(constants.BE_MINMEM, None)
2969 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2972 if (res_max or res_min):
2973 # FIXME: Improve error message by including information about whether
2974 # the upper or lower limit of the parameter fails the ipolicy.
2975 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
2976 (group_info, group_info.name,
2977 utils.CommaJoin(set(res_max + res_min))))
2978 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2980 def _ConvertPlainToDrbd(self, feedback_fn):
2981 """Converts an instance from plain to drbd.
2984 feedback_fn("Converting template to drbd")
2985 instance = self.instance
2986 pnode = instance.primary_node
2987 snode = self.op.remote_node
2989 assert instance.disk_template == constants.DT_PLAIN
2991 # create a fake disk info for _GenerateDiskTemplate
2992 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
2993 constants.IDISK_VG: d.logical_id[0],
2994 constants.IDISK_NAME: d.name}
2995 for d in instance.disks]
2996 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
2997 instance.name, pnode, [snode],
2998 disk_info, None, None, 0, feedback_fn,
3000 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3002 p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3003 s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3004 info = GetInstanceInfoText(instance)
3005 feedback_fn("Creating additional volumes...")
3006 # first, create the missing data and meta devices
3007 for disk in anno_disks:
3008 # unfortunately this is... not too nice
3009 CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3010 info, True, p_excl_stor)
3011 for child in disk.children:
3012 CreateSingleBlockDev(self, snode, instance, child, info, True,
3014 # at this stage, all new LVs have been created, we can rename the
3016 feedback_fn("Renaming original volumes...")
3017 rename_list = [(o, n.children[0].logical_id)
3018 for (o, n) in zip(instance.disks, new_disks)]
3019 result = self.rpc.call_blockdev_rename(pnode, rename_list)
3020 result.Raise("Failed to rename original LVs")
3022 feedback_fn("Initializing DRBD devices...")
3023 # all child devices are in place, we can now create the DRBD devices
3025 for disk in anno_disks:
3026 for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3027 f_create = node == pnode
3028 CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3030 except errors.GenericError, e:
3031 feedback_fn("Initializing of DRBD devices failed;"
3032 " renaming back original volumes...")
3033 for disk in new_disks:
3034 self.cfg.SetDiskID(disk, pnode)
3035 rename_back_list = [(n.children[0], o.logical_id)
3036 for (n, o) in zip(new_disks, instance.disks)]
3037 result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3038 result.Raise("Failed to rename LVs back after error %s" % str(e))
3041 # at this point, the instance has been modified
3042 instance.disk_template = constants.DT_DRBD8
3043 instance.disks = new_disks
3044 self.cfg.Update(instance, feedback_fn)
3046 # Release node locks while waiting for sync
3047 ReleaseLocks(self, locking.LEVEL_NODE)
3049 # disks are created, waiting for sync
3050 disk_abort = not WaitForSync(self, instance,
3051 oneshot=not self.op.wait_for_sync)
3053 raise errors.OpExecError("There are some degraded disks for"
3054 " this instance, please cleanup manually")
3056 # Node resource locks will be released by caller
3058 def _ConvertDrbdToPlain(self, feedback_fn):
3059 """Converts an instance from drbd to plain.
3062 instance = self.instance
3064 assert len(instance.secondary_nodes) == 1
3065 assert instance.disk_template == constants.DT_DRBD8
3067 pnode = instance.primary_node
3068 snode = instance.secondary_nodes[0]
3069 feedback_fn("Converting template to plain")
3071 old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3072 new_disks = [d.children[0] for d in instance.disks]
3074 # copy over size, mode and name
3075 for parent, child in zip(old_disks, new_disks):
3076 child.size = parent.size
3077 child.mode = parent.mode
3078 child.name = parent.name
3080 # this is a DRBD disk, return its port to the pool
3081 # NOTE: this must be done right before the call to cfg.Update!
3082 for disk in old_disks:
3083 tcp_port = disk.logical_id[2]
3084 self.cfg.AddTcpUdpPort(tcp_port)
3086 # update instance structure
3087 instance.disks = new_disks
3088 instance.disk_template = constants.DT_PLAIN
3089 _UpdateIvNames(0, instance.disks)
3090 self.cfg.Update(instance, feedback_fn)
3092 # Release locks in case removing disks takes a while
3093 ReleaseLocks(self, locking.LEVEL_NODE)
3095 feedback_fn("Removing volumes on the secondary node...")
3096 for disk in old_disks:
3097 self.cfg.SetDiskID(disk, snode)
3098 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3100 self.LogWarning("Could not remove block device %s on node %s,"
3101 " continuing anyway: %s", disk.iv_name, snode, msg)
3103 feedback_fn("Removing unneeded volumes on the primary node...")
3104 for idx, disk in enumerate(old_disks):
3105 meta = disk.children[1]
3106 self.cfg.SetDiskID(meta, pnode)
3107 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3109 self.LogWarning("Could not remove metadata for disk %d on node %s,"
3110 " continuing anyway: %s", idx, pnode, msg)
3112 def _HotplugDevice(self, action, dev_type, device, extra, seq):
3113 self.LogInfo("Trying to hotplug device...")
3114 result = self.rpc.call_hotplug_device(self.instance.primary_node,
3115 self.instance, action, dev_type,
3116 (device, self.instance),
3119 self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3120 self.LogInfo("Continuing execution..")
3122 self.LogInfo("Hotplug done.")
3124 def _CreateNewDisk(self, idx, params, _):
3125 """Creates a new disk.
3128 instance = self.instance
3131 if instance.disk_template in constants.DTS_FILEBASED:
3132 (file_driver, file_path) = instance.disks[0].logical_id
3133 file_path = os.path.dirname(file_path)
3135 file_driver = file_path = None
3138 GenerateDiskTemplate(self, instance.disk_template, instance.name,
3139 instance.primary_node, instance.secondary_nodes,
3140 [params], file_path, file_driver, idx,
3141 self.Log, self.diskparams)[0]
3143 new_disks = CreateDisks(self, instance, disks=[disk])
3145 if self.cluster.prealloc_wipe_disks:
3147 WipeOrCleanupDisks(self, instance,
3148 disks=[(idx, disk, 0)],
3152 # _, device_info = AssembleInstanceDisks(self, self.instance,
3153 # [disk], check=False)
3154 self.cfg.SetDiskID(disk, self.instance.primary_node)
3155 result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3156 (disk, self.instance),
3157 self.instance.name, True, idx)
3159 self.LogWarning("Can't assemble newly created disk %d: %s",
3160 idx, result.fail_msg)
3162 # _, _, dev_path = device_info[0]
3163 _, link_name = result.payload
3164 self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3165 constants.HOTPLUG_TARGET_DISK,
3166 disk, link_name, idx)
3169 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3173 def _ModifyDisk(idx, disk, params, _):
3178 mode = params.get(constants.IDISK_MODE, None)
3181 changes.append(("disk.mode/%d" % idx, disk.mode))
3183 name = params.get(constants.IDISK_NAME, None)
3185 changes.append(("disk.name/%d" % idx, disk.name))
3189 def _RemoveDisk(self, idx, root, _):
3194 self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3195 constants.HOTPLUG_TARGET_DISK,
3197 ShutdownInstanceDisks(self, self.instance, [root])
3199 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3200 for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3201 self.cfg.SetDiskID(disk, node)
3202 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3204 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3205 " continuing anyway", idx, node, msg)
3207 # if this is a DRBD disk, return its port to the pool
3208 if root.dev_type in constants.LDS_DRBD:
3209 self.cfg.AddTcpUdpPort(root.logical_id[2])
3211 def _CreateNewNic(self, idx, params, private):
3212 """Creates data structure for a new network interface.
3215 mac = params[constants.INIC_MAC]
3216 ip = params.get(constants.INIC_IP, None)
3217 net = params.get(constants.INIC_NETWORK, None)
3218 name = params.get(constants.INIC_NAME, None)
3219 net_uuid = self.cfg.LookupNetwork(net)
3220 #TODO: not private.filled?? can a nic have no nicparams??
3221 nicparams = private.filled
3222 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3223 nicparams=nicparams)
3224 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3227 self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3228 constants.HOTPLUG_TARGET_NIC,
3233 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3234 (mac, ip, private.filled[constants.NIC_MODE],
3235 private.filled[constants.NIC_LINK], net)),
3240 def _ApplyNicMods(self, idx, nic, params, private):
3241 """Modifies a network interface.
3246 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3248 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3249 setattr(nic, key, params[key])
3251 new_net = params.get(constants.INIC_NETWORK, nic.network)
3252 new_net_uuid = self.cfg.LookupNetwork(new_net)
3253 if new_net_uuid != nic.network:
3254 changes.append(("nic.network/%d" % idx, new_net))
3255 nic.network = new_net_uuid
3258 nic.nicparams = private.filled
3260 for (key, val) in nic.nicparams.items():
3261 changes.append(("nic.%s/%d" % (key, idx), val))
3264 self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3265 constants.HOTPLUG_TARGET_NIC,
3270 def _RemoveNic(self, idx, nic, _):
3272 self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3273 constants.HOTPLUG_TARGET_NIC,
3276 def Exec(self, feedback_fn):
3277 """Modifies an instance.
3279 All parameters take effect only at the next restart of the instance.
3282 # Process here the warnings from CheckPrereq, as we don't have a
3283 # feedback_fn there.
3284 # TODO: Replace with self.LogWarning
3285 for warn in self.warn:
3286 feedback_fn("WARNING: %s" % warn)
3288 assert ((self.op.disk_template is None) ^
3289 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3290 "Not owning any node resource locks"
3293 instance = self.instance
3297 instance.primary_node = self.op.pnode
3300 if self.op.runtime_mem:
3301 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3303 self.op.runtime_mem)
3304 rpcres.Raise("Cannot modify instance runtime memory")
3305 result.append(("runtime_memory", self.op.runtime_mem))
3307 # Apply disk changes
3308 _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3309 self._CreateNewDisk, self._ModifyDisk,
3311 _UpdateIvNames(0, instance.disks)
3313 if self.op.disk_template:
3315 check_nodes = set(instance.all_nodes)
3316 if self.op.remote_node:
3317 check_nodes.add(self.op.remote_node)
3318 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3319 owned = self.owned_locks(level)
3320 assert not (check_nodes - owned), \
3321 ("Not owning the correct locks, owning %r, expected at least %r" %
3322 (owned, check_nodes))
3324 r_shut = ShutdownInstanceDisks(self, instance)
3326 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3327 " proceed with disk template conversion")
3328 mode = (instance.disk_template, self.op.disk_template)
3330 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3332 self.cfg.ReleaseDRBDMinors(instance.name)
3334 result.append(("disk_template", self.op.disk_template))
3336 assert instance.disk_template == self.op.disk_template, \
3337 ("Expected disk template '%s', found '%s'" %
3338 (self.op.disk_template, instance.disk_template))
3340 # Release node and resource locks if there are any (they might already have
3341 # been released during disk conversion)
3342 ReleaseLocks(self, locking.LEVEL_NODE)
3343 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3346 if self._new_nics is not None:
3347 instance.nics = self._new_nics
3348 result.extend(self._nic_chgdesc)
3351 if self.op.hvparams:
3352 instance.hvparams = self.hv_inst
3353 for key, val in self.op.hvparams.iteritems():
3354 result.append(("hv/%s" % key, val))
3357 if self.op.beparams:
3358 instance.beparams = self.be_inst
3359 for key, val in self.op.beparams.iteritems():
3360 result.append(("be/%s" % key, val))
3364 instance.os = self.op.os_name
3367 if self.op.osparams:
3368 instance.osparams = self.os_inst
3369 for key, val in self.op.osparams.iteritems():
3370 result.append(("os/%s" % key, val))
3372 if self.op.offline is None:
3375 elif self.op.offline:
3376 # Mark instance as offline
3377 self.cfg.MarkInstanceOffline(instance.name)
3378 result.append(("admin_state", constants.ADMINST_OFFLINE))
3380 # Mark instance as online, but stopped
3381 self.cfg.MarkInstanceDown(instance.name)
3382 result.append(("admin_state", constants.ADMINST_DOWN))
3384 self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3386 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3387 self.owned_locks(locking.LEVEL_NODE)), \
3388 "All node locks should have been released by now"
3392 _DISK_CONVERSIONS = {
3393 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3394 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3398 class LUInstanceChangeGroup(LogicalUnit):
3399 HPATH = "instance-change-group"
3400 HTYPE = constants.HTYPE_INSTANCE
3403 def ExpandNames(self):
3404 self.share_locks = ShareAll()
3406 self.needed_locks = {
3407 locking.LEVEL_NODEGROUP: [],
3408 locking.LEVEL_NODE: [],
3409 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3412 self._ExpandAndLockInstance()
3414 if self.op.target_groups:
3415 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3416 self.op.target_groups)
3418 self.req_target_uuids = None
3420 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3422 def DeclareLocks(self, level):
3423 if level == locking.LEVEL_NODEGROUP:
3424 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3426 if self.req_target_uuids:
3427 lock_groups = set(self.req_target_uuids)
3429 # Lock all groups used by instance optimistically; this requires going
3430 # via the node before it's locked, requiring verification later on
3431 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3432 lock_groups.update(instance_groups)
3434 # No target groups, need to lock all of them
3435 lock_groups = locking.ALL_SET
3437 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3439 elif level == locking.LEVEL_NODE:
3440 if self.req_target_uuids:
3441 # Lock all nodes used by instances
3442 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3443 self._LockInstancesNodes()
3445 # Lock all nodes in all potential target groups
3446 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3447 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3448 member_nodes = [node_name
3449 for group in lock_groups
3450 for node_name in self.cfg.GetNodeGroup(group).members]
3451 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3453 # Lock all nodes as all groups are potential targets
3454 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3456 def CheckPrereq(self):
3457 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3458 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3459 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3461 assert (self.req_target_uuids is None or
3462 owned_groups.issuperset(self.req_target_uuids))
3463 assert owned_instances == set([self.op.instance_name])
3465 # Get instance information
3466 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3468 # Check if node groups for locked instance are still correct
3469 assert owned_nodes.issuperset(self.instance.all_nodes), \
3470 ("Instance %s's nodes changed while we kept the lock" %
3471 self.op.instance_name)
3473 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3476 if self.req_target_uuids:
3477 # User requested specific target groups
3478 self.target_uuids = frozenset(self.req_target_uuids)
3480 # All groups except those used by the instance are potential targets
3481 self.target_uuids = owned_groups - inst_groups
3483 conflicting_groups = self.target_uuids & inst_groups
3484 if conflicting_groups:
3485 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3486 " used by the instance '%s'" %
3487 (utils.CommaJoin(conflicting_groups),
3488 self.op.instance_name),
3491 if not self.target_uuids:
3492 raise errors.OpPrereqError("There are no possible target groups",
3495 def BuildHooksEnv(self):
3499 assert self.target_uuids
3502 "TARGET_GROUPS": " ".join(self.target_uuids),
3505 env.update(BuildInstanceHookEnvByObject(self, self.instance))
3509 def BuildHooksNodes(self):
3510 """Build hooks nodes.
3513 mn = self.cfg.GetMasterNode()
3516 def Exec(self, feedback_fn):
3517 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3519 assert instances == [self.op.instance_name], "Instance not locked"
3521 req = iallocator.IAReqGroupChange(instances=instances,
3522 target_groups=list(self.target_uuids))
3523 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3525 ial.Run(self.op.iallocator)
3528 raise errors.OpPrereqError("Can't compute solution for changing group of"
3529 " instance '%s' using iallocator '%s': %s" %
3530 (self.op.instance_name, self.op.iallocator,
3531 ial.info), errors.ECODE_NORES)
3533 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3535 self.LogInfo("Iallocator returned %s job(s) for changing group of"
3536 " instance '%s'", len(jobs), self.op.instance_name)
3538 return ResultWithJobs(jobs)