4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable=W0201,C0302
26 # W0201 since most LU attributes are defined in CheckPrereq or similar
29 # C0302: since we have waaaay too many lines in this module
45 from ganeti import ssh
46 from ganeti import utils
47 from ganeti import errors
48 from ganeti import hypervisor
49 from ganeti import locking
50 from ganeti import constants
51 from ganeti import objects
52 from ganeti import serializer
53 from ganeti import ssconf
54 from ganeti import uidpool
55 from ganeti import compat
56 from ganeti import masterd
57 from ganeti import netutils
58 from ganeti import query
59 from ganeti import qlang
60 from ganeti import opcodes
63 import ganeti.masterd.instance # pylint: disable=W0611
67 """Data container for LU results with jobs.
69 Instances of this class returned from L{LogicalUnit.Exec} will be recognized
70 by L{mcpu.Processor._ProcessResult}. The latter will then submit the jobs
71 contained in the C{jobs} attribute and include the job IDs in the opcode
75 def __init__(self, jobs, **kwargs):
76 """Initializes this class.
78 Additional return values can be specified as keyword arguments.
80 @type jobs: list of lists of L{opcode.OpCode}
81 @param jobs: A list of lists of opcode objects
88 class LogicalUnit(object):
89 """Logical Unit base class.
91 Subclasses must follow these rules:
92 - implement ExpandNames
93 - implement CheckPrereq (except when tasklets are used)
94 - implement Exec (except when tasklets are used)
95 - implement BuildHooksEnv
96 - implement BuildHooksNodes
97 - redefine HPATH and HTYPE
98 - optionally redefine their run requirements:
99 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
101 Note that all commands require root permissions.
103 @ivar dry_run_result: the value (if any) that will be returned to the caller
104 in dry-run mode (signalled by opcode dry_run parameter)
111 def __init__(self, processor, op, context, rpc):
112 """Constructor for LogicalUnit.
114 This needs to be overridden in derived classes in order to check op
118 self.proc = processor
120 self.cfg = context.cfg
121 self.glm = context.glm
123 self.owned_locks = context.glm.list_owned
124 self.context = context
126 # Dicts used to declare locking needs to mcpu
127 self.needed_locks = None
128 self.share_locks = dict.fromkeys(locking.LEVELS, 0)
130 self.remove_locks = {}
131 # Used to force good behavior when calling helper functions
132 self.recalculate_locks = {}
134 self.Log = processor.Log # pylint: disable=C0103
135 self.LogWarning = processor.LogWarning # pylint: disable=C0103
136 self.LogInfo = processor.LogInfo # pylint: disable=C0103
137 self.LogStep = processor.LogStep # pylint: disable=C0103
138 # support for dry-run
139 self.dry_run_result = None
140 # support for generic debug attribute
141 if (not hasattr(self.op, "debug_level") or
142 not isinstance(self.op.debug_level, int)):
143 self.op.debug_level = 0
148 # Validate opcode parameters and set defaults
149 self.op.Validate(True)
151 self.CheckArguments()
153 def CheckArguments(self):
154 """Check syntactic validity for the opcode arguments.
156 This method is for doing a simple syntactic check and ensure
157 validity of opcode parameters, without any cluster-related
158 checks. While the same can be accomplished in ExpandNames and/or
159 CheckPrereq, doing these separate is better because:
161 - ExpandNames is left as as purely a lock-related function
162 - CheckPrereq is run after we have acquired locks (and possible
165 The function is allowed to change the self.op attribute so that
166 later methods can no longer worry about missing parameters.
171 def ExpandNames(self):
172 """Expand names for this LU.
174 This method is called before starting to execute the opcode, and it should
175 update all the parameters of the opcode to their canonical form (e.g. a
176 short node name must be fully expanded after this method has successfully
177 completed). This way locking, hooks, logging, etc. can work correctly.
179 LUs which implement this method must also populate the self.needed_locks
180 member, as a dict with lock levels as keys, and a list of needed lock names
183 - use an empty dict if you don't need any lock
184 - if you don't need any lock at a particular level omit that level
185 - don't put anything for the BGL level
186 - if you want all locks at a level use locking.ALL_SET as a value
188 If you need to share locks (rather than acquire them exclusively) at one
189 level you can modify self.share_locks, setting a true value (usually 1) for
190 that level. By default locks are not shared.
192 This function can also define a list of tasklets, which then will be
193 executed in order instead of the usual LU-level CheckPrereq and Exec
194 functions, if those are not defined by the LU.
198 # Acquire all nodes and one instance
199 self.needed_locks = {
200 locking.LEVEL_NODE: locking.ALL_SET,
201 locking.LEVEL_INSTANCE: ['instance1.example.com'],
203 # Acquire just two nodes
204 self.needed_locks = {
205 locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
208 self.needed_locks = {} # No, you can't leave it to the default value None
211 # The implementation of this method is mandatory only if the new LU is
212 # concurrent, so that old LUs don't need to be changed all at the same
215 self.needed_locks = {} # Exclusive LUs don't need locks.
217 raise NotImplementedError
219 def DeclareLocks(self, level):
220 """Declare LU locking needs for a level
222 While most LUs can just declare their locking needs at ExpandNames time,
223 sometimes there's the need to calculate some locks after having acquired
224 the ones before. This function is called just before acquiring locks at a
225 particular level, but after acquiring the ones at lower levels, and permits
226 such calculations. It can be used to modify self.needed_locks, and by
227 default it does nothing.
229 This function is only called if you have something already set in
230 self.needed_locks for the level.
232 @param level: Locking level which is going to be locked
233 @type level: member of ganeti.locking.LEVELS
237 def CheckPrereq(self):
238 """Check prerequisites for this LU.
240 This method should check that the prerequisites for the execution
241 of this LU are fulfilled. It can do internode communication, but
242 it should be idempotent - no cluster or system changes are
245 The method should raise errors.OpPrereqError in case something is
246 not fulfilled. Its return value is ignored.
248 This method should also update all the parameters of the opcode to
249 their canonical form if it hasn't been done by ExpandNames before.
252 if self.tasklets is not None:
253 for (idx, tl) in enumerate(self.tasklets):
254 logging.debug("Checking prerequisites for tasklet %s/%s",
255 idx + 1, len(self.tasklets))
260 def Exec(self, feedback_fn):
263 This method should implement the actual work. It should raise
264 errors.OpExecError for failures that are somewhat dealt with in
268 if self.tasklets is not None:
269 for (idx, tl) in enumerate(self.tasklets):
270 logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
273 raise NotImplementedError
275 def BuildHooksEnv(self):
276 """Build hooks environment for this LU.
279 @return: Dictionary containing the environment that will be used for
280 running the hooks for this LU. The keys of the dict must not be prefixed
281 with "GANETI_"--that'll be added by the hooks runner. The hooks runner
282 will extend the environment with additional variables. If no environment
283 should be defined, an empty dictionary should be returned (not C{None}).
284 @note: If the C{HPATH} attribute of the LU class is C{None}, this function
288 raise NotImplementedError
290 def BuildHooksNodes(self):
291 """Build list of nodes to run LU's hooks.
293 @rtype: tuple; (list, list)
294 @return: Tuple containing a list of node names on which the hook
295 should run before the execution and a list of node names on which the
296 hook should run after the execution. No nodes should be returned as an
297 empty list (and not None).
298 @note: If the C{HPATH} attribute of the LU class is C{None}, this function
302 raise NotImplementedError
304 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
305 """Notify the LU about the results of its hooks.
307 This method is called every time a hooks phase is executed, and notifies
308 the Logical Unit about the hooks' result. The LU can then use it to alter
309 its result based on the hooks. By default the method does nothing and the
310 previous result is passed back unchanged but any LU can define it if it
311 wants to use the local cluster hook-scripts somehow.
313 @param phase: one of L{constants.HOOKS_PHASE_POST} or
314 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
315 @param hook_results: the results of the multi-node hooks rpc call
316 @param feedback_fn: function used send feedback back to the caller
317 @param lu_result: the previous Exec result this LU had, or None
319 @return: the new Exec result, based on the previous result
323 # API must be kept, thus we ignore the unused argument and could
324 # be a function warnings
325 # pylint: disable=W0613,R0201
328 def _ExpandAndLockInstance(self):
329 """Helper function to expand and lock an instance.
331 Many LUs that work on an instance take its name in self.op.instance_name
332 and need to expand it and then declare the expanded name for locking. This
333 function does it, and then updates self.op.instance_name to the expanded
334 name. It also initializes needed_locks as a dict, if this hasn't been done
338 if self.needed_locks is None:
339 self.needed_locks = {}
341 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
342 "_ExpandAndLockInstance called with instance-level locks set"
343 self.op.instance_name = _ExpandInstanceName(self.cfg,
344 self.op.instance_name)
345 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
347 def _LockInstancesNodes(self, primary_only=False):
348 """Helper function to declare instances' nodes for locking.
350 This function should be called after locking one or more instances to lock
351 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
352 with all primary or secondary nodes for instances already locked and
353 present in self.needed_locks[locking.LEVEL_INSTANCE].
355 It should be called from DeclareLocks, and for safety only works if
356 self.recalculate_locks[locking.LEVEL_NODE] is set.
358 In the future it may grow parameters to just lock some instance's nodes, or
359 to just lock primaries or secondary nodes, if needed.
361 If should be called in DeclareLocks in a way similar to::
363 if level == locking.LEVEL_NODE:
364 self._LockInstancesNodes()
366 @type primary_only: boolean
367 @param primary_only: only lock primary nodes of locked instances
370 assert locking.LEVEL_NODE in self.recalculate_locks, \
371 "_LockInstancesNodes helper function called with no nodes to recalculate"
373 # TODO: check if we're really been called with the instance locks held
375 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
376 # future we might want to have different behaviors depending on the value
377 # of self.recalculate_locks[locking.LEVEL_NODE]
379 locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
380 for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
381 wanted_nodes.append(instance.primary_node)
383 wanted_nodes.extend(instance.secondary_nodes)
385 if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
386 self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
387 elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
388 self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
390 del self.recalculate_locks[locking.LEVEL_NODE]
393 class NoHooksLU(LogicalUnit): # pylint: disable=W0223
394 """Simple LU which runs no hooks.
396 This LU is intended as a parent for other LogicalUnits which will
397 run no hooks, in order to reduce duplicate code.
403 def BuildHooksEnv(self):
404 """Empty BuildHooksEnv for NoHooksLu.
406 This just raises an error.
409 raise AssertionError("BuildHooksEnv called for NoHooksLUs")
411 def BuildHooksNodes(self):
412 """Empty BuildHooksNodes for NoHooksLU.
415 raise AssertionError("BuildHooksNodes called for NoHooksLU")
419 """Tasklet base class.
421 Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
422 they can mix legacy code with tasklets. Locking needs to be done in the LU,
423 tasklets know nothing about locks.
425 Subclasses must follow these rules:
426 - Implement CheckPrereq
430 def __init__(self, lu):
437 def CheckPrereq(self):
438 """Check prerequisites for this tasklets.
440 This method should check whether the prerequisites for the execution of
441 this tasklet are fulfilled. It can do internode communication, but it
442 should be idempotent - no cluster or system changes are allowed.
444 The method should raise errors.OpPrereqError in case something is not
445 fulfilled. Its return value is ignored.
447 This method should also update all parameters to their canonical form if it
448 hasn't been done before.
453 def Exec(self, feedback_fn):
454 """Execute the tasklet.
456 This method should implement the actual work. It should raise
457 errors.OpExecError for failures that are somewhat dealt with in code, or
461 raise NotImplementedError
465 """Base for query utility classes.
468 #: Attribute holding field definitions
471 def __init__(self, filter_, fields, use_locking):
472 """Initializes this class.
475 self.use_locking = use_locking
477 self.query = query.Query(self.FIELDS, fields, filter_=filter_,
479 self.requested_data = self.query.RequestedData()
480 self.names = self.query.RequestedNames()
482 # Sort only if no names were requested
483 self.sort_by_name = not self.names
485 self.do_locking = None
488 def _GetNames(self, lu, all_names, lock_level):
489 """Helper function to determine names asked for in the query.
493 names = lu.owned_locks(lock_level)
497 if self.wanted == locking.ALL_SET:
498 assert not self.names
499 # caller didn't specify names, so ordering is not important
500 return utils.NiceSort(names)
502 # caller specified names and we must keep the same order
504 assert not self.do_locking or lu.glm.is_owned(lock_level)
506 missing = set(self.wanted).difference(names)
508 raise errors.OpExecError("Some items were removed before retrieving"
509 " their data: %s" % missing)
511 # Return expanded names
514 def ExpandNames(self, lu):
515 """Expand names for this query.
517 See L{LogicalUnit.ExpandNames}.
520 raise NotImplementedError()
522 def DeclareLocks(self, lu, level):
523 """Declare locks for this query.
525 See L{LogicalUnit.DeclareLocks}.
528 raise NotImplementedError()
530 def _GetQueryData(self, lu):
531 """Collects all data for this query.
533 @return: Query data object
536 raise NotImplementedError()
538 def NewStyleQuery(self, lu):
539 """Collect data and execute query.
542 return query.GetQueryResponse(self.query, self._GetQueryData(lu),
543 sort_by_name=self.sort_by_name)
545 def OldStyleQuery(self, lu):
546 """Collect data and execute query.
549 return self.query.OldStyleQuery(self._GetQueryData(lu),
550 sort_by_name=self.sort_by_name)
554 """Returns a dict declaring all lock levels shared.
557 return dict.fromkeys(locking.LEVELS, 1)
560 def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups):
561 """Checks if the owned node groups are still correct for an instance.
563 @type cfg: L{config.ConfigWriter}
564 @param cfg: The cluster configuration
565 @type instance_name: string
566 @param instance_name: Instance name
567 @type owned_groups: set or frozenset
568 @param owned_groups: List of currently owned node groups
571 inst_groups = cfg.GetInstanceNodeGroups(instance_name)
573 if not owned_groups.issuperset(inst_groups):
574 raise errors.OpPrereqError("Instance %s's node groups changed since"
575 " locks were acquired, current groups are"
576 " are '%s', owning groups '%s'; retry the"
579 utils.CommaJoin(inst_groups),
580 utils.CommaJoin(owned_groups)),
586 def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
587 """Checks if the instances in a node group are still correct.
589 @type cfg: L{config.ConfigWriter}
590 @param cfg: The cluster configuration
591 @type group_uuid: string
592 @param group_uuid: Node group UUID
593 @type owned_instances: set or frozenset
594 @param owned_instances: List of currently owned instances
597 wanted_instances = cfg.GetNodeGroupInstances(group_uuid)
598 if owned_instances != wanted_instances:
599 raise errors.OpPrereqError("Instances in node group '%s' changed since"
600 " locks were acquired, wanted '%s', have '%s';"
601 " retry the operation" %
603 utils.CommaJoin(wanted_instances),
604 utils.CommaJoin(owned_instances)),
607 return wanted_instances
610 def _SupportsOob(cfg, node):
611 """Tells if node supports OOB.
613 @type cfg: L{config.ConfigWriter}
614 @param cfg: The cluster configuration
615 @type node: L{objects.Node}
616 @param node: The node
617 @return: The OOB script if supported or an empty string otherwise
620 return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
623 def _GetWantedNodes(lu, nodes):
624 """Returns list of checked and expanded node names.
626 @type lu: L{LogicalUnit}
627 @param lu: the logical unit on whose behalf we execute
629 @param nodes: list of node names or None for all nodes
631 @return: the list of nodes, sorted
632 @raise errors.ProgrammerError: if the nodes parameter is wrong type
636 return [_ExpandNodeName(lu.cfg, name) for name in nodes]
638 return utils.NiceSort(lu.cfg.GetNodeList())
641 def _GetWantedInstances(lu, instances):
642 """Returns list of checked and expanded instance names.
644 @type lu: L{LogicalUnit}
645 @param lu: the logical unit on whose behalf we execute
646 @type instances: list
647 @param instances: list of instance names or None for all instances
649 @return: the list of instances, sorted
650 @raise errors.OpPrereqError: if the instances parameter is wrong type
651 @raise errors.OpPrereqError: if any of the passed instances is not found
655 wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
657 wanted = utils.NiceSort(lu.cfg.GetInstanceList())
661 def _GetUpdatedParams(old_params, update_dict,
662 use_default=True, use_none=False):
663 """Return the new version of a parameter dictionary.
665 @type old_params: dict
666 @param old_params: old parameters
667 @type update_dict: dict
668 @param update_dict: dict containing new parameter values, or
669 constants.VALUE_DEFAULT to reset the parameter to its default
671 @param use_default: boolean
672 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
673 values as 'to be deleted' values
674 @param use_none: boolean
675 @type use_none: whether to recognise C{None} values as 'to be
678 @return: the new parameter dictionary
681 params_copy = copy.deepcopy(old_params)
682 for key, val in update_dict.iteritems():
683 if ((use_default and val == constants.VALUE_DEFAULT) or
684 (use_none and val is None)):
690 params_copy[key] = val
694 def _ReleaseLocks(lu, level, names=None, keep=None):
695 """Releases locks owned by an LU.
697 @type lu: L{LogicalUnit}
698 @param level: Lock level
699 @type names: list or None
700 @param names: Names of locks to release
701 @type keep: list or None
702 @param keep: Names of locks to retain
705 assert not (keep is not None and names is not None), \
706 "Only one of the 'names' and the 'keep' parameters can be given"
708 if names is not None:
709 should_release = names.__contains__
711 should_release = lambda name: name not in keep
713 should_release = None
719 # Determine which locks to release
720 for name in lu.owned_locks(level):
721 if should_release(name):
726 assert len(lu.owned_locks(level)) == (len(retain) + len(release))
728 # Release just some locks
729 lu.glm.release(level, names=release)
731 assert frozenset(lu.owned_locks(level)) == frozenset(retain)
734 lu.glm.release(level)
736 assert not lu.glm.is_owned(level), "No locks should be owned"
739 def _MapInstanceDisksToNodes(instances):
740 """Creates a map from (node, volume) to instance name.
742 @type instances: list of L{objects.Instance}
743 @rtype: dict; tuple of (node name, volume name) as key, instance name as value
746 return dict(((node, vol), inst.name)
747 for inst in instances
748 for (node, vols) in inst.MapLVsByNode().items()
752 def _RunPostHook(lu, node_name):
753 """Runs the post-hook for an opcode on a single node.
756 hm = lu.proc.hmclass(lu.rpc.call_hooks_runner, lu)
758 hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
760 # pylint: disable=W0702
761 lu.LogWarning("Errors occurred running hooks on %s" % node_name)
764 def _CheckOutputFields(static, dynamic, selected):
765 """Checks whether all selected fields are valid.
767 @type static: L{utils.FieldSet}
768 @param static: static fields set
769 @type dynamic: L{utils.FieldSet}
770 @param dynamic: dynamic fields set
777 delta = f.NonMatching(selected)
779 raise errors.OpPrereqError("Unknown output fields selected: %s"
780 % ",".join(delta), errors.ECODE_INVAL)
783 def _CheckGlobalHvParams(params):
784 """Validates that given hypervisor params are not global ones.
786 This will ensure that instances don't get customised versions of
790 used_globals = constants.HVC_GLOBALS.intersection(params)
792 msg = ("The following hypervisor parameters are global and cannot"
793 " be customized at instance level, please modify them at"
794 " cluster level: %s" % utils.CommaJoin(used_globals))
795 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
798 def _CheckNodeOnline(lu, node, msg=None):
799 """Ensure that a given node is online.
801 @param lu: the LU on behalf of which we make the check
802 @param node: the node to check
803 @param msg: if passed, should be a message to replace the default one
804 @raise errors.OpPrereqError: if the node is offline
808 msg = "Can't use offline node"
809 if lu.cfg.GetNodeInfo(node).offline:
810 raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
813 def _CheckNodeNotDrained(lu, node):
814 """Ensure that a given node is not drained.
816 @param lu: the LU on behalf of which we make the check
817 @param node: the node to check
818 @raise errors.OpPrereqError: if the node is drained
821 if lu.cfg.GetNodeInfo(node).drained:
822 raise errors.OpPrereqError("Can't use drained node %s" % node,
826 def _CheckNodeVmCapable(lu, node):
827 """Ensure that a given node is vm capable.
829 @param lu: the LU on behalf of which we make the check
830 @param node: the node to check
831 @raise errors.OpPrereqError: if the node is not vm capable
834 if not lu.cfg.GetNodeInfo(node).vm_capable:
835 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
839 def _CheckNodeHasOS(lu, node, os_name, force_variant):
840 """Ensure that a node supports a given OS.
842 @param lu: the LU on behalf of which we make the check
843 @param node: the node to check
844 @param os_name: the OS to query about
845 @param force_variant: whether to ignore variant errors
846 @raise errors.OpPrereqError: if the node is not supporting the OS
849 result = lu.rpc.call_os_get(node, os_name)
850 result.Raise("OS '%s' not in supported OS list for node %s" %
852 prereq=True, ecode=errors.ECODE_INVAL)
853 if not force_variant:
854 _CheckOSVariant(result.payload, os_name)
857 def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
858 """Ensure that a node has the given secondary ip.
860 @type lu: L{LogicalUnit}
861 @param lu: the LU on behalf of which we make the check
863 @param node: the node to check
864 @type secondary_ip: string
865 @param secondary_ip: the ip to check
866 @type prereq: boolean
867 @param prereq: whether to throw a prerequisite or an execute error
868 @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
869 @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
872 result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
873 result.Raise("Failure checking secondary ip on node %s" % node,
874 prereq=prereq, ecode=errors.ECODE_ENVIRON)
875 if not result.payload:
876 msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
877 " please fix and re-run this command" % secondary_ip)
879 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
881 raise errors.OpExecError(msg)
884 def _GetClusterDomainSecret():
885 """Reads the cluster domain secret.
888 return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
892 def _CheckInstanceDown(lu, instance, reason):
893 """Ensure that an instance is not running."""
894 if instance.admin_up:
895 raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
896 (instance.name, reason), errors.ECODE_STATE)
898 pnode = instance.primary_node
899 ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
900 ins_l.Raise("Can't contact node %s for instance information" % pnode,
901 prereq=True, ecode=errors.ECODE_ENVIRON)
903 if instance.name in ins_l.payload:
904 raise errors.OpPrereqError("Instance %s is running, %s" %
905 (instance.name, reason), errors.ECODE_STATE)
908 def _ExpandItemName(fn, name, kind):
909 """Expand an item name.
911 @param fn: the function to use for expansion
912 @param name: requested item name
913 @param kind: text description ('Node' or 'Instance')
914 @return: the resolved (full) name
915 @raise errors.OpPrereqError: if the item is not found
919 if full_name is None:
920 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
925 def _ExpandNodeName(cfg, name):
926 """Wrapper over L{_ExpandItemName} for nodes."""
927 return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
930 def _ExpandInstanceName(cfg, name):
931 """Wrapper over L{_ExpandItemName} for instance."""
932 return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
935 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
936 memory, vcpus, nics, disk_template, disks,
937 bep, hvp, hypervisor_name, tags):
938 """Builds instance related env variables for hooks
940 This builds the hook environment from individual variables.
943 @param name: the name of the instance
944 @type primary_node: string
945 @param primary_node: the name of the instance's primary node
946 @type secondary_nodes: list
947 @param secondary_nodes: list of secondary nodes as strings
948 @type os_type: string
949 @param os_type: the name of the instance's OS
950 @type status: boolean
951 @param status: the should_run status of the instance
953 @param memory: the memory size of the instance
955 @param vcpus: the count of VCPUs the instance has
957 @param nics: list of tuples (ip, mac, mode, link) representing
958 the NICs the instance has
959 @type disk_template: string
960 @param disk_template: the disk template of the instance
962 @param disks: the list of (size, mode) pairs
964 @param bep: the backend parameters for the instance
966 @param hvp: the hypervisor parameters for the instance
967 @type hypervisor_name: string
968 @param hypervisor_name: the hypervisor for the instance
970 @param tags: list of instance tags as strings
972 @return: the hook environment for this instance
981 "INSTANCE_NAME": name,
982 "INSTANCE_PRIMARY": primary_node,
983 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
984 "INSTANCE_OS_TYPE": os_type,
985 "INSTANCE_STATUS": str_status,
986 "INSTANCE_MEMORY": memory,
987 "INSTANCE_VCPUS": vcpus,
988 "INSTANCE_DISK_TEMPLATE": disk_template,
989 "INSTANCE_HYPERVISOR": hypervisor_name,
993 nic_count = len(nics)
994 for idx, (ip, mac, mode, link) in enumerate(nics):
997 env["INSTANCE_NIC%d_IP" % idx] = ip
998 env["INSTANCE_NIC%d_MAC" % idx] = mac
999 env["INSTANCE_NIC%d_MODE" % idx] = mode
1000 env["INSTANCE_NIC%d_LINK" % idx] = link
1001 if mode == constants.NIC_MODE_BRIDGED:
1002 env["INSTANCE_NIC%d_BRIDGE" % idx] = link
1006 env["INSTANCE_NIC_COUNT"] = nic_count
1009 disk_count = len(disks)
1010 for idx, (size, mode) in enumerate(disks):
1011 env["INSTANCE_DISK%d_SIZE" % idx] = size
1012 env["INSTANCE_DISK%d_MODE" % idx] = mode
1016 env["INSTANCE_DISK_COUNT"] = disk_count
1021 env["INSTANCE_TAGS"] = " ".join(tags)
1023 for source, kind in [(bep, "BE"), (hvp, "HV")]:
1024 for key, value in source.items():
1025 env["INSTANCE_%s_%s" % (kind, key)] = value
1030 def _NICListToTuple(lu, nics):
1031 """Build a list of nic information tuples.
1033 This list is suitable to be passed to _BuildInstanceHookEnv or as a return
1034 value in LUInstanceQueryData.
1036 @type lu: L{LogicalUnit}
1037 @param lu: the logical unit on whose behalf we execute
1038 @type nics: list of L{objects.NIC}
1039 @param nics: list of nics to convert to hooks tuples
1043 cluster = lu.cfg.GetClusterInfo()
1047 filled_params = cluster.SimpleFillNIC(nic.nicparams)
1048 mode = filled_params[constants.NIC_MODE]
1049 link = filled_params[constants.NIC_LINK]
1050 hooks_nics.append((ip, mac, mode, link))
1054 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
1055 """Builds instance related env variables for hooks from an object.
1057 @type lu: L{LogicalUnit}
1058 @param lu: the logical unit on whose behalf we execute
1059 @type instance: L{objects.Instance}
1060 @param instance: the instance for which we should build the
1062 @type override: dict
1063 @param override: dictionary with key/values that will override
1066 @return: the hook environment dictionary
1069 cluster = lu.cfg.GetClusterInfo()
1070 bep = cluster.FillBE(instance)
1071 hvp = cluster.FillHV(instance)
1073 "name": instance.name,
1074 "primary_node": instance.primary_node,
1075 "secondary_nodes": instance.secondary_nodes,
1076 "os_type": instance.os,
1077 "status": instance.admin_up,
1078 "memory": bep[constants.BE_MEMORY],
1079 "vcpus": bep[constants.BE_VCPUS],
1080 "nics": _NICListToTuple(lu, instance.nics),
1081 "disk_template": instance.disk_template,
1082 "disks": [(disk.size, disk.mode) for disk in instance.disks],
1085 "hypervisor_name": instance.hypervisor,
1086 "tags": instance.tags,
1089 args.update(override)
1090 return _BuildInstanceHookEnv(**args) # pylint: disable=W0142
1093 def _AdjustCandidatePool(lu, exceptions):
1094 """Adjust the candidate pool after node operations.
1097 mod_list = lu.cfg.MaintainCandidatePool(exceptions)
1099 lu.LogInfo("Promoted nodes to master candidate role: %s",
1100 utils.CommaJoin(node.name for node in mod_list))
1101 for name in mod_list:
1102 lu.context.ReaddNode(name)
1103 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1105 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
1109 def _DecideSelfPromotion(lu, exceptions=None):
1110 """Decide whether I should promote myself as a master candidate.
1113 cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
1114 mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1115 # the new node will increase mc_max with one, so:
1116 mc_should = min(mc_should + 1, cp_size)
1117 return mc_now < mc_should
1120 def _CheckNicsBridgesExist(lu, target_nics, target_node):
1121 """Check that the brigdes needed by a list of nics exist.
1124 cluster = lu.cfg.GetClusterInfo()
1125 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
1126 brlist = [params[constants.NIC_LINK] for params in paramslist
1127 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
1129 result = lu.rpc.call_bridges_exist(target_node, brlist)
1130 result.Raise("Error checking bridges on destination node '%s'" %
1131 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
1134 def _CheckInstanceBridgesExist(lu, instance, node=None):
1135 """Check that the brigdes needed by an instance exist.
1139 node = instance.primary_node
1140 _CheckNicsBridgesExist(lu, instance.nics, node)
1143 def _CheckOSVariant(os_obj, name):
1144 """Check whether an OS name conforms to the os variants specification.
1146 @type os_obj: L{objects.OS}
1147 @param os_obj: OS object to check
1149 @param name: OS name passed by the user, to check for validity
1152 variant = objects.OS.GetVariant(name)
1153 if not os_obj.supported_variants:
1155 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
1156 " passed)" % (os_obj.name, variant),
1160 raise errors.OpPrereqError("OS name must include a variant",
1163 if variant not in os_obj.supported_variants:
1164 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1167 def _GetNodeInstancesInner(cfg, fn):
1168 return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1171 def _GetNodeInstances(cfg, node_name):
1172 """Returns a list of all primary and secondary instances on a node.
1176 return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1179 def _GetNodePrimaryInstances(cfg, node_name):
1180 """Returns primary instances on a node.
1183 return _GetNodeInstancesInner(cfg,
1184 lambda inst: node_name == inst.primary_node)
1187 def _GetNodeSecondaryInstances(cfg, node_name):
1188 """Returns secondary instances on a node.
1191 return _GetNodeInstancesInner(cfg,
1192 lambda inst: node_name in inst.secondary_nodes)
1195 def _GetStorageTypeArgs(cfg, storage_type):
1196 """Returns the arguments for a storage type.
1199 # Special case for file storage
1200 if storage_type == constants.ST_FILE:
1201 # storage.FileStorage wants a list of storage directories
1202 return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
1207 def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1210 for dev in instance.disks:
1211 cfg.SetDiskID(dev, node_name)
1213 result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1214 result.Raise("Failed to get disk status from node %s" % node_name,
1215 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1217 for idx, bdev_status in enumerate(result.payload):
1218 if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1224 def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1225 """Check the sanity of iallocator and node arguments and use the
1226 cluster-wide iallocator if appropriate.
1228 Check that at most one of (iallocator, node) is specified. If none is
1229 specified, then the LU's opcode's iallocator slot is filled with the
1230 cluster-wide default iallocator.
1232 @type iallocator_slot: string
1233 @param iallocator_slot: the name of the opcode iallocator slot
1234 @type node_slot: string
1235 @param node_slot: the name of the opcode target node slot
1238 node = getattr(lu.op, node_slot, None)
1239 iallocator = getattr(lu.op, iallocator_slot, None)
1241 if node is not None and iallocator is not None:
1242 raise errors.OpPrereqError("Do not specify both, iallocator and node",
1244 elif node is None and iallocator is None:
1245 default_iallocator = lu.cfg.GetDefaultIAllocator()
1246 if default_iallocator:
1247 setattr(lu.op, iallocator_slot, default_iallocator)
1249 raise errors.OpPrereqError("No iallocator or node given and no"
1250 " cluster-wide default iallocator found;"
1251 " please specify either an iallocator or a"
1252 " node, or set a cluster-wide default"
1256 def _GetDefaultIAllocator(cfg, iallocator):
1257 """Decides on which iallocator to use.
1259 @type cfg: L{config.ConfigWriter}
1260 @param cfg: Cluster configuration object
1261 @type iallocator: string or None
1262 @param iallocator: Iallocator specified in opcode
1264 @return: Iallocator name
1268 # Use default iallocator
1269 iallocator = cfg.GetDefaultIAllocator()
1272 raise errors.OpPrereqError("No iallocator was specified, neither in the"
1273 " opcode nor as a cluster-wide default",
1279 class LUClusterPostInit(LogicalUnit):
1280 """Logical unit for running hooks after cluster initialization.
1283 HPATH = "cluster-init"
1284 HTYPE = constants.HTYPE_CLUSTER
1286 def BuildHooksEnv(self):
1291 "OP_TARGET": self.cfg.GetClusterName(),
1294 def BuildHooksNodes(self):
1295 """Build hooks nodes.
1298 return ([], [self.cfg.GetMasterNode()])
1300 def Exec(self, feedback_fn):
1307 class LUClusterDestroy(LogicalUnit):
1308 """Logical unit for destroying the cluster.
1311 HPATH = "cluster-destroy"
1312 HTYPE = constants.HTYPE_CLUSTER
1314 def BuildHooksEnv(self):
1319 "OP_TARGET": self.cfg.GetClusterName(),
1322 def BuildHooksNodes(self):
1323 """Build hooks nodes.
1328 def CheckPrereq(self):
1329 """Check prerequisites.
1331 This checks whether the cluster is empty.
1333 Any errors are signaled by raising errors.OpPrereqError.
1336 master = self.cfg.GetMasterNode()
1338 nodelist = self.cfg.GetNodeList()
1339 if len(nodelist) != 1 or nodelist[0] != master:
1340 raise errors.OpPrereqError("There are still %d node(s) in"
1341 " this cluster." % (len(nodelist) - 1),
1343 instancelist = self.cfg.GetInstanceList()
1345 raise errors.OpPrereqError("There are still %d instance(s) in"
1346 " this cluster." % len(instancelist),
1349 def Exec(self, feedback_fn):
1350 """Destroys the cluster.
1353 master = self.cfg.GetMasterNode()
1355 # Run post hooks on master node before it's removed
1356 _RunPostHook(self, master)
1358 result = self.rpc.call_node_deactivate_master_ip(master)
1359 result.Raise("Could not disable the master role")
1364 def _VerifyCertificate(filename):
1365 """Verifies a certificate for L{LUClusterVerifyConfig}.
1367 @type filename: string
1368 @param filename: Path to PEM file
1372 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1373 utils.ReadFile(filename))
1374 except Exception, err: # pylint: disable=W0703
1375 return (LUClusterVerifyConfig.ETYPE_ERROR,
1376 "Failed to load X509 certificate %s: %s" % (filename, err))
1379 utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1380 constants.SSL_CERT_EXPIRATION_ERROR)
1383 fnamemsg = "While verifying %s: %s" % (filename, msg)
1388 return (None, fnamemsg)
1389 elif errcode == utils.CERT_WARNING:
1390 return (LUClusterVerifyConfig.ETYPE_WARNING, fnamemsg)
1391 elif errcode == utils.CERT_ERROR:
1392 return (LUClusterVerifyConfig.ETYPE_ERROR, fnamemsg)
1394 raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1397 def _GetAllHypervisorParameters(cluster, instances):
1398 """Compute the set of all hypervisor parameters.
1400 @type cluster: L{objects.Cluster}
1401 @param cluster: the cluster object
1402 @param instances: list of L{objects.Instance}
1403 @param instances: additional instances from which to obtain parameters
1404 @rtype: list of (origin, hypervisor, parameters)
1405 @return: a list with all parameters found, indicating the hypervisor they
1406 apply to, and the origin (can be "cluster", "os X", or "instance Y")
1411 for hv_name in cluster.enabled_hypervisors:
1412 hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
1414 for os_name, os_hvp in cluster.os_hvp.items():
1415 for hv_name, hv_params in os_hvp.items():
1417 full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
1418 hvp_data.append(("os %s" % os_name, hv_name, full_params))
1420 # TODO: collapse identical parameter values in a single one
1421 for instance in instances:
1422 if instance.hvparams:
1423 hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
1424 cluster.FillHV(instance)))
1429 class _VerifyErrors(object):
1430 """Mix-in for cluster/group verify LUs.
1432 It provides _Error and _ErrorIf, and updates the self.bad boolean. (Expects
1433 self.op and self._feedback_fn to be available.)
1436 TCLUSTER = "cluster"
1438 TINSTANCE = "instance"
1440 ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1441 ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1442 ECLUSTERFILECHECK = (TCLUSTER, "ECLUSTERFILECHECK")
1443 ECLUSTERDANGLINGNODES = (TNODE, "ECLUSTERDANGLINGNODES")
1444 ECLUSTERDANGLINGINST = (TNODE, "ECLUSTERDANGLINGINST")
1445 EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1446 EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1447 EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1448 EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1449 EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
1450 EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1451 EINSTANCESPLITGROUPS = (TINSTANCE, "EINSTANCESPLITGROUPS")
1452 ENODEDRBD = (TNODE, "ENODEDRBD")
1453 ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1454 ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1455 ENODEHOOKS = (TNODE, "ENODEHOOKS")
1456 ENODEHV = (TNODE, "ENODEHV")
1457 ENODELVM = (TNODE, "ENODELVM")
1458 ENODEN1 = (TNODE, "ENODEN1")
1459 ENODENET = (TNODE, "ENODENET")
1460 ENODEOS = (TNODE, "ENODEOS")
1461 ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1462 ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1463 ENODERPC = (TNODE, "ENODERPC")
1464 ENODESSH = (TNODE, "ENODESSH")
1465 ENODEVERSION = (TNODE, "ENODEVERSION")
1466 ENODESETUP = (TNODE, "ENODESETUP")
1467 ENODETIME = (TNODE, "ENODETIME")
1468 ENODEOOBPATH = (TNODE, "ENODEOOBPATH")
1470 ETYPE_FIELD = "code"
1471 ETYPE_ERROR = "ERROR"
1472 ETYPE_WARNING = "WARNING"
1474 def _Error(self, ecode, item, msg, *args, **kwargs):
1475 """Format an error message.
1477 Based on the opcode's error_codes parameter, either format a
1478 parseable error code, or a simpler error string.
1480 This must be called only from Exec and functions called from Exec.
1483 ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1485 # first complete the msg
1488 # then format the whole message
1489 if self.op.error_codes: # This is a mix-in. pylint: disable=E1101
1490 msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1496 msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1497 # and finally report it via the feedback_fn
1498 self._feedback_fn(" - %s" % msg) # Mix-in. pylint: disable=E1101
1500 def _ErrorIf(self, cond, *args, **kwargs):
1501 """Log an error message if the passed condition is True.
1505 or self.op.debug_simulate_errors) # pylint: disable=E1101
1507 self._Error(*args, **kwargs)
1508 # do not mark the operation as failed for WARN cases only
1509 if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1510 self.bad = self.bad or cond
1513 class LUClusterVerify(NoHooksLU):
1514 """Submits all jobs necessary to verify the cluster.
1519 def ExpandNames(self):
1520 self.needed_locks = {}
1522 def Exec(self, feedback_fn):
1525 if self.op.group_name:
1526 groups = [self.op.group_name]
1527 depends_fn = lambda: None
1529 groups = self.cfg.GetNodeGroupList()
1531 # Verify global configuration
1532 jobs.append([opcodes.OpClusterVerifyConfig()])
1534 # Always depend on global verification
1535 depends_fn = lambda: [(-len(jobs), [])]
1537 jobs.extend([opcodes.OpClusterVerifyGroup(group_name=group,
1538 depends=depends_fn())]
1539 for group in groups)
1541 # Fix up all parameters
1542 for op in itertools.chain(*jobs): # pylint: disable=W0142
1543 op.debug_simulate_errors = self.op.debug_simulate_errors
1544 op.verbose = self.op.verbose
1545 op.error_codes = self.op.error_codes
1547 op.skip_checks = self.op.skip_checks
1548 except AttributeError:
1549 assert not isinstance(op, opcodes.OpClusterVerifyGroup)
1551 return ResultWithJobs(jobs)
1554 class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
1555 """Verifies the cluster config.
1560 def _VerifyHVP(self, hvp_data):
1561 """Verifies locally the syntax of the hypervisor parameters.
1564 for item, hv_name, hv_params in hvp_data:
1565 msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
1568 hv_class = hypervisor.GetHypervisor(hv_name)
1569 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1570 hv_class.CheckParameterSyntax(hv_params)
1571 except errors.GenericError, err:
1572 self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err))
1574 def ExpandNames(self):
1575 # Information can be safely retrieved as the BGL is acquired in exclusive
1577 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1578 self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
1579 self.all_node_info = self.cfg.GetAllNodesInfo()
1580 self.all_inst_info = self.cfg.GetAllInstancesInfo()
1581 self.needed_locks = {}
1583 def Exec(self, feedback_fn):
1584 """Verify integrity of cluster, performing various test on nodes.
1588 self._feedback_fn = feedback_fn
1590 feedback_fn("* Verifying cluster config")
1592 for msg in self.cfg.VerifyConfig():
1593 self._ErrorIf(True, self.ECLUSTERCFG, None, msg)
1595 feedback_fn("* Verifying cluster certificate files")
1597 for cert_filename in constants.ALL_CERT_FILES:
1598 (errcode, msg) = _VerifyCertificate(cert_filename)
1599 self._ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
1601 feedback_fn("* Verifying hypervisor parameters")
1603 self._VerifyHVP(_GetAllHypervisorParameters(self.cfg.GetClusterInfo(),
1604 self.all_inst_info.values()))
1606 feedback_fn("* Verifying all nodes belong to an existing group")
1608 # We do this verification here because, should this bogus circumstance
1609 # occur, it would never be caught by VerifyGroup, which only acts on
1610 # nodes/instances reachable from existing node groups.
1612 dangling_nodes = set(node.name for node in self.all_node_info.values()
1613 if node.group not in self.all_group_info)
1615 dangling_instances = {}
1616 no_node_instances = []
1618 for inst in self.all_inst_info.values():
1619 if inst.primary_node in dangling_nodes:
1620 dangling_instances.setdefault(inst.primary_node, []).append(inst.name)
1621 elif inst.primary_node not in self.all_node_info:
1622 no_node_instances.append(inst.name)
1627 utils.CommaJoin(dangling_instances.get(node.name,
1629 for node in dangling_nodes]
1631 self._ErrorIf(bool(dangling_nodes), self.ECLUSTERDANGLINGNODES, None,
1632 "the following nodes (and their instances) belong to a non"
1633 " existing group: %s", utils.CommaJoin(pretty_dangling))
1635 self._ErrorIf(bool(no_node_instances), self.ECLUSTERDANGLINGINST, None,
1636 "the following instances have a non-existing primary-node:"
1637 " %s", utils.CommaJoin(no_node_instances))
1642 class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
1643 """Verifies the status of a node group.
1646 HPATH = "cluster-verify"
1647 HTYPE = constants.HTYPE_CLUSTER
1650 _HOOKS_INDENT_RE = re.compile("^", re.M)
1652 class NodeImage(object):
1653 """A class representing the logical and physical status of a node.
1656 @ivar name: the node name to which this object refers
1657 @ivar volumes: a structure as returned from
1658 L{ganeti.backend.GetVolumeList} (runtime)
1659 @ivar instances: a list of running instances (runtime)
1660 @ivar pinst: list of configured primary instances (config)
1661 @ivar sinst: list of configured secondary instances (config)
1662 @ivar sbp: dictionary of {primary-node: list of instances} for all
1663 instances for which this node is secondary (config)
1664 @ivar mfree: free memory, as reported by hypervisor (runtime)
1665 @ivar dfree: free disk, as reported by the node (runtime)
1666 @ivar offline: the offline status (config)
1667 @type rpc_fail: boolean
1668 @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1669 not whether the individual keys were correct) (runtime)
1670 @type lvm_fail: boolean
1671 @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1672 @type hyp_fail: boolean
1673 @ivar hyp_fail: whether the RPC call didn't return the instance list
1674 @type ghost: boolean
1675 @ivar ghost: whether this is a known node or not (config)
1676 @type os_fail: boolean
1677 @ivar os_fail: whether the RPC call didn't return valid OS data
1679 @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1680 @type vm_capable: boolean
1681 @ivar vm_capable: whether the node can host instances
1684 def __init__(self, offline=False, name=None, vm_capable=True):
1693 self.offline = offline
1694 self.vm_capable = vm_capable
1695 self.rpc_fail = False
1696 self.lvm_fail = False
1697 self.hyp_fail = False
1699 self.os_fail = False
1702 def ExpandNames(self):
1703 # This raises errors.OpPrereqError on its own:
1704 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
1706 # Get instances in node group; this is unsafe and needs verification later
1707 inst_names = self.cfg.GetNodeGroupInstances(self.group_uuid)
1709 self.needed_locks = {
1710 locking.LEVEL_INSTANCE: inst_names,
1711 locking.LEVEL_NODEGROUP: [self.group_uuid],
1712 locking.LEVEL_NODE: [],
1715 self.share_locks = _ShareAll()
1717 def DeclareLocks(self, level):
1718 if level == locking.LEVEL_NODE:
1719 # Get members of node group; this is unsafe and needs verification later
1720 nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
1722 all_inst_info = self.cfg.GetAllInstancesInfo()
1724 # In Exec(), we warn about mirrored instances that have primary and
1725 # secondary living in separate node groups. To fully verify that
1726 # volumes for these instances are healthy, we will need to do an
1727 # extra call to their secondaries. We ensure here those nodes will
1729 for inst in self.owned_locks(locking.LEVEL_INSTANCE):
1730 # Important: access only the instances whose lock is owned
1731 if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR:
1732 nodes.update(all_inst_info[inst].secondary_nodes)
1734 self.needed_locks[locking.LEVEL_NODE] = nodes
1736 def CheckPrereq(self):
1737 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
1738 self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
1740 group_nodes = set(self.group_info.members)
1741 group_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
1744 group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
1746 unlocked_instances = \
1747 group_instances.difference(self.owned_locks(locking.LEVEL_INSTANCE))
1750 raise errors.OpPrereqError("Missing lock for nodes: %s" %
1751 utils.CommaJoin(unlocked_nodes))
1753 if unlocked_instances:
1754 raise errors.OpPrereqError("Missing lock for instances: %s" %
1755 utils.CommaJoin(unlocked_instances))
1757 self.all_node_info = self.cfg.GetAllNodesInfo()
1758 self.all_inst_info = self.cfg.GetAllInstancesInfo()
1760 self.my_node_names = utils.NiceSort(group_nodes)
1761 self.my_inst_names = utils.NiceSort(group_instances)
1763 self.my_node_info = dict((name, self.all_node_info[name])
1764 for name in self.my_node_names)
1766 self.my_inst_info = dict((name, self.all_inst_info[name])
1767 for name in self.my_inst_names)
1769 # We detect here the nodes that will need the extra RPC calls for verifying
1770 # split LV volumes; they should be locked.
1771 extra_lv_nodes = set()
1773 for inst in self.my_inst_info.values():
1774 if inst.disk_template in constants.DTS_INT_MIRROR:
1775 group = self.my_node_info[inst.primary_node].group
1776 for nname in inst.secondary_nodes:
1777 if self.all_node_info[nname].group != group:
1778 extra_lv_nodes.add(nname)
1780 unlocked_lv_nodes = \
1781 extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
1783 if unlocked_lv_nodes:
1784 raise errors.OpPrereqError("these nodes could be locked: %s" %
1785 utils.CommaJoin(unlocked_lv_nodes))
1786 self.extra_lv_nodes = list(extra_lv_nodes)
1788 def _VerifyNode(self, ninfo, nresult):
1789 """Perform some basic validation on data returned from a node.
1791 - check the result data structure is well formed and has all the
1793 - check ganeti version
1795 @type ninfo: L{objects.Node}
1796 @param ninfo: the node to check
1797 @param nresult: the results from the node
1799 @return: whether overall this call was successful (and we can expect
1800 reasonable values in the respose)
1804 _ErrorIf = self._ErrorIf # pylint: disable=C0103
1806 # main result, nresult should be a non-empty dict
1807 test = not nresult or not isinstance(nresult, dict)
1808 _ErrorIf(test, self.ENODERPC, node,
1809 "unable to verify node: no data returned")
1813 # compares ganeti version
1814 local_version = constants.PROTOCOL_VERSION
1815 remote_version = nresult.get("version", None)
1816 test = not (remote_version and
1817 isinstance(remote_version, (list, tuple)) and
1818 len(remote_version) == 2)
1819 _ErrorIf(test, self.ENODERPC, node,
1820 "connection to node returned invalid data")
1824 test = local_version != remote_version[0]
1825 _ErrorIf(test, self.ENODEVERSION, node,
1826 "incompatible protocol versions: master %s,"
1827 " node %s", local_version, remote_version[0])
1831 # node seems compatible, we can actually try to look into its results
1833 # full package version
1834 self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1835 self.ENODEVERSION, node,
1836 "software version mismatch: master %s, node %s",
1837 constants.RELEASE_VERSION, remote_version[1],
1838 code=self.ETYPE_WARNING)
1840 hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1841 if ninfo.vm_capable and isinstance(hyp_result, dict):
1842 for hv_name, hv_result in hyp_result.iteritems():
1843 test = hv_result is not None
1844 _ErrorIf(test, self.ENODEHV, node,
1845 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1847 hvp_result = nresult.get(constants.NV_HVPARAMS, None)
1848 if ninfo.vm_capable and isinstance(hvp_result, list):
1849 for item, hv_name, hv_result in hvp_result:
1850 _ErrorIf(True, self.ENODEHV, node,
1851 "hypervisor %s parameter verify failure (source %s): %s",
1852 hv_name, item, hv_result)
1854 test = nresult.get(constants.NV_NODESETUP,
1855 ["Missing NODESETUP results"])
1856 _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1861 def _VerifyNodeTime(self, ninfo, nresult,
1862 nvinfo_starttime, nvinfo_endtime):
1863 """Check the node time.
1865 @type ninfo: L{objects.Node}
1866 @param ninfo: the node to check
1867 @param nresult: the remote results for the node
1868 @param nvinfo_starttime: the start time of the RPC call
1869 @param nvinfo_endtime: the end time of the RPC call
1873 _ErrorIf = self._ErrorIf # pylint: disable=C0103
1875 ntime = nresult.get(constants.NV_TIME, None)
1877 ntime_merged = utils.MergeTime(ntime)
1878 except (ValueError, TypeError):
1879 _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1882 if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1883 ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1884 elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1885 ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1889 _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1890 "Node time diverges by at least %s from master node time",
1893 def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1894 """Check the node LVM results.
1896 @type ninfo: L{objects.Node}
1897 @param ninfo: the node to check
1898 @param nresult: the remote results for the node
1899 @param vg_name: the configured VG name
1906 _ErrorIf = self._ErrorIf # pylint: disable=C0103
1908 # checks vg existence and size > 20G
1909 vglist = nresult.get(constants.NV_VGLIST, None)
1911 _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1913 vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1914 constants.MIN_VG_SIZE)
1915 _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1918 pvlist = nresult.get(constants.NV_PVLIST, None)
1919 test = pvlist is None
1920 _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1922 # check that ':' is not present in PV names, since it's a
1923 # special character for lvcreate (denotes the range of PEs to
1925 for _, pvname, owner_vg in pvlist:
1926 test = ":" in pvname
1927 _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1928 " '%s' of VG '%s'", pvname, owner_vg)
1930 def _VerifyNodeBridges(self, ninfo, nresult, bridges):
1931 """Check the node bridges.
1933 @type ninfo: L{objects.Node}
1934 @param ninfo: the node to check
1935 @param nresult: the remote results for the node
1936 @param bridges: the expected list of bridges
1943 _ErrorIf = self._ErrorIf # pylint: disable=C0103
1945 missing = nresult.get(constants.NV_BRIDGES, None)
1946 test = not isinstance(missing, list)
1947 _ErrorIf(test, self.ENODENET, node,
1948 "did not return valid bridge information")
1950 _ErrorIf(bool(missing), self.ENODENET, node, "missing bridges: %s" %
1951 utils.CommaJoin(sorted(missing)))
1953 def _VerifyNodeNetwork(self, ninfo, nresult):
1954 """Check the node network connectivity results.
1956 @type ninfo: L{objects.Node}
1957 @param ninfo: the node to check
1958 @param nresult: the remote results for the node
1962 _ErrorIf = self._ErrorIf # pylint: disable=C0103
1964 test = constants.NV_NODELIST not in nresult
1965 _ErrorIf(test, self.ENODESSH, node,
1966 "node hasn't returned node ssh connectivity data")
1968 if nresult[constants.NV_NODELIST]:
1969 for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1970 _ErrorIf(True, self.ENODESSH, node,
1971 "ssh communication with node '%s': %s", a_node, a_msg)
1973 test = constants.NV_NODENETTEST not in nresult
1974 _ErrorIf(test, self.ENODENET, node,
1975 "node hasn't returned node tcp connectivity data")
1977 if nresult[constants.NV_NODENETTEST]:
1978 nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1980 _ErrorIf(True, self.ENODENET, node,
1981 "tcp communication with node '%s': %s",
1982 anode, nresult[constants.NV_NODENETTEST][anode])
1984 test = constants.NV_MASTERIP not in nresult
1985 _ErrorIf(test, self.ENODENET, node,
1986 "node hasn't returned node master IP reachability data")
1988 if not nresult[constants.NV_MASTERIP]:
1989 if node == self.master_node:
1990 msg = "the master node cannot reach the master IP (not configured?)"
1992 msg = "cannot reach the master IP"
1993 _ErrorIf(True, self.ENODENET, node, msg)
1995 def _VerifyInstance(self, instance, instanceconfig, node_image,
1997 """Verify an instance.
1999 This function checks to see if the required block devices are
2000 available on the instance's node.
2003 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2004 node_current = instanceconfig.primary_node
2006 node_vol_should = {}
2007 instanceconfig.MapLVsByNode(node_vol_should)
2009 for node in node_vol_should:
2010 n_img = node_image[node]
2011 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
2012 # ignore missing volumes on offline or broken nodes
2014 for volume in node_vol_should[node]:
2015 test = volume not in n_img.volumes
2016 _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
2017 "volume %s missing on node %s", volume, node)
2019 if instanceconfig.admin_up:
2020 pri_img = node_image[node_current]
2021 test = instance not in pri_img.instances and not pri_img.offline
2022 _ErrorIf(test, self.EINSTANCEDOWN, instance,
2023 "instance not running on its primary node %s",
2026 diskdata = [(nname, success, status, idx)
2027 for (nname, disks) in diskstatus.items()
2028 for idx, (success, status) in enumerate(disks)]
2030 for nname, success, bdev_status, idx in diskdata:
2031 # the 'ghost node' construction in Exec() ensures that we have a
2033 snode = node_image[nname]
2034 bad_snode = snode.ghost or snode.offline
2035 _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
2036 self.EINSTANCEFAULTYDISK, instance,
2037 "couldn't retrieve status for disk/%s on %s: %s",
2038 idx, nname, bdev_status)
2039 _ErrorIf((instanceconfig.admin_up and success and
2040 bdev_status.ldisk_status == constants.LDS_FAULTY),
2041 self.EINSTANCEFAULTYDISK, instance,
2042 "disk/%s on %s is faulty", idx, nname)
2044 def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
2045 """Verify if there are any unknown volumes in the cluster.
2047 The .os, .swap and backup volumes are ignored. All other volumes are
2048 reported as unknown.
2050 @type reserved: L{ganeti.utils.FieldSet}
2051 @param reserved: a FieldSet of reserved volume names
2054 for node, n_img in node_image.items():
2055 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
2056 # skip non-healthy nodes
2058 for volume in n_img.volumes:
2059 test = ((node not in node_vol_should or
2060 volume not in node_vol_should[node]) and
2061 not reserved.Matches(volume))
2062 self._ErrorIf(test, self.ENODEORPHANLV, node,
2063 "volume %s is unknown", volume)
2065 def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
2066 """Verify N+1 Memory Resilience.
2068 Check that if one single node dies we can still start all the
2069 instances it was primary for.
2072 cluster_info = self.cfg.GetClusterInfo()
2073 for node, n_img in node_image.items():
2074 # This code checks that every node which is now listed as
2075 # secondary has enough memory to host all instances it is
2076 # supposed to should a single other node in the cluster fail.
2077 # FIXME: not ready for failover to an arbitrary node
2078 # FIXME: does not support file-backed instances
2079 # WARNING: we currently take into account down instances as well
2080 # as up ones, considering that even if they're down someone
2081 # might want to start them even in the event of a node failure.
2083 # we're skipping offline nodes from the N+1 warning, since
2084 # most likely we don't have good memory infromation from them;
2085 # we already list instances living on such nodes, and that's
2088 for prinode, instances in n_img.sbp.items():
2090 for instance in instances:
2091 bep = cluster_info.FillBE(instance_cfg[instance])
2092 if bep[constants.BE_AUTO_BALANCE]:
2093 needed_mem += bep[constants.BE_MEMORY]
2094 test = n_img.mfree < needed_mem
2095 self._ErrorIf(test, self.ENODEN1, node,
2096 "not enough memory to accomodate instance failovers"
2097 " should node %s fail (%dMiB needed, %dMiB available)",
2098 prinode, needed_mem, n_img.mfree)
2101 def _VerifyFiles(cls, errorif, nodeinfo, master_node, all_nvinfo,
2102 (files_all, files_all_opt, files_mc, files_vm)):
2103 """Verifies file checksums collected from all nodes.
2105 @param errorif: Callback for reporting errors
2106 @param nodeinfo: List of L{objects.Node} objects
2107 @param master_node: Name of master node
2108 @param all_nvinfo: RPC results
2111 node_names = frozenset(node.name for node in nodeinfo if not node.offline)
2113 assert master_node in node_names
2114 assert (len(files_all | files_all_opt | files_mc | files_vm) ==
2115 sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \
2116 "Found file listed in more than one file list"
2118 # Define functions determining which nodes to consider for a file
2119 file2nodefn = dict([(filename, fn)
2120 for (files, fn) in [(files_all, None),
2121 (files_all_opt, None),
2122 (files_mc, lambda node: (node.master_candidate or
2123 node.name == master_node)),
2124 (files_vm, lambda node: node.vm_capable)]
2125 for filename in files])
2127 fileinfo = dict((filename, {}) for filename in file2nodefn.keys())
2129 for node in nodeinfo:
2133 nresult = all_nvinfo[node.name]
2135 if nresult.fail_msg or not nresult.payload:
2138 node_files = nresult.payload.get(constants.NV_FILELIST, None)
2140 test = not (node_files and isinstance(node_files, dict))
2141 errorif(test, cls.ENODEFILECHECK, node.name,
2142 "Node did not return file checksum data")
2146 for (filename, checksum) in node_files.items():
2147 # Check if the file should be considered for a node
2148 fn = file2nodefn[filename]
2149 if fn is None or fn(node):
2150 fileinfo[filename].setdefault(checksum, set()).add(node.name)
2152 for (filename, checksums) in fileinfo.items():
2153 assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
2155 # Nodes having the file
2156 with_file = frozenset(node_name
2157 for nodes in fileinfo[filename].values()
2158 for node_name in nodes)
2160 # Nodes missing file
2161 missing_file = node_names - with_file
2163 if filename in files_all_opt:
2165 errorif(missing_file and missing_file != node_names,
2166 cls.ECLUSTERFILECHECK, None,
2167 "File %s is optional, but it must exist on all or no"
2168 " nodes (not found on %s)",
2169 filename, utils.CommaJoin(utils.NiceSort(missing_file)))
2171 errorif(missing_file, cls.ECLUSTERFILECHECK, None,
2172 "File %s is missing from node(s) %s", filename,
2173 utils.CommaJoin(utils.NiceSort(missing_file)))
2175 # See if there are multiple versions of the file
2176 test = len(checksums) > 1
2178 variants = ["variant %s on %s" %
2179 (idx + 1, utils.CommaJoin(utils.NiceSort(nodes)))
2180 for (idx, (checksum, nodes)) in
2181 enumerate(sorted(checksums.items()))]
2185 errorif(test, cls.ECLUSTERFILECHECK, None,
2186 "File %s found with %s different checksums (%s)",
2187 filename, len(checksums), "; ".join(variants))
2189 def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
2191 """Verifies and the node DRBD status.
2193 @type ninfo: L{objects.Node}
2194 @param ninfo: the node to check
2195 @param nresult: the remote results for the node
2196 @param instanceinfo: the dict of instances
2197 @param drbd_helper: the configured DRBD usermode helper
2198 @param drbd_map: the DRBD map as returned by
2199 L{ganeti.config.ConfigWriter.ComputeDRBDMap}
2203 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2206 helper_result = nresult.get(constants.NV_DRBDHELPER, None)
2207 test = (helper_result == None)
2208 _ErrorIf(test, self.ENODEDRBDHELPER, node,
2209 "no drbd usermode helper returned")
2211 status, payload = helper_result
2213 _ErrorIf(test, self.ENODEDRBDHELPER, node,
2214 "drbd usermode helper check unsuccessful: %s", payload)
2215 test = status and (payload != drbd_helper)
2216 _ErrorIf(test, self.ENODEDRBDHELPER, node,
2217 "wrong drbd usermode helper: %s", payload)
2219 # compute the DRBD minors
2221 for minor, instance in drbd_map[node].items():
2222 test = instance not in instanceinfo
2223 _ErrorIf(test, self.ECLUSTERCFG, None,
2224 "ghost instance '%s' in temporary DRBD map", instance)
2225 # ghost instance should not be running, but otherwise we
2226 # don't give double warnings (both ghost instance and
2227 # unallocated minor in use)
2229 node_drbd[minor] = (instance, False)
2231 instance = instanceinfo[instance]
2232 node_drbd[minor] = (instance.name, instance.admin_up)
2234 # and now check them
2235 used_minors = nresult.get(constants.NV_DRBDLIST, [])
2236 test = not isinstance(used_minors, (tuple, list))
2237 _ErrorIf(test, self.ENODEDRBD, node,
2238 "cannot parse drbd status file: %s", str(used_minors))
2240 # we cannot check drbd status
2243 for minor, (iname, must_exist) in node_drbd.items():
2244 test = minor not in used_minors and must_exist
2245 _ErrorIf(test, self.ENODEDRBD, node,
2246 "drbd minor %d of instance %s is not active", minor, iname)
2247 for minor in used_minors:
2248 test = minor not in node_drbd
2249 _ErrorIf(test, self.ENODEDRBD, node,
2250 "unallocated drbd minor %d is in use", minor)
2252 def _UpdateNodeOS(self, ninfo, nresult, nimg):
2253 """Builds the node OS structures.
2255 @type ninfo: L{objects.Node}
2256 @param ninfo: the node to check
2257 @param nresult: the remote results for the node
2258 @param nimg: the node image object
2262 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2264 remote_os = nresult.get(constants.NV_OSLIST, None)
2265 test = (not isinstance(remote_os, list) or
2266 not compat.all(isinstance(v, list) and len(v) == 7
2267 for v in remote_os))
2269 _ErrorIf(test, self.ENODEOS, node,
2270 "node hasn't returned valid OS data")
2279 for (name, os_path, status, diagnose,
2280 variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
2282 if name not in os_dict:
2285 # parameters is a list of lists instead of list of tuples due to
2286 # JSON lacking a real tuple type, fix it:
2287 parameters = [tuple(v) for v in parameters]
2288 os_dict[name].append((os_path, status, diagnose,
2289 set(variants), set(parameters), set(api_ver)))
2291 nimg.oslist = os_dict
2293 def _VerifyNodeOS(self, ninfo, nimg, base):
2294 """Verifies the node OS list.
2296 @type ninfo: L{objects.Node}
2297 @param ninfo: the node to check
2298 @param nimg: the node image object
2299 @param base: the 'template' node we match against (e.g. from the master)
2303 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2305 assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
2307 beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
2308 for os_name, os_data in nimg.oslist.items():
2309 assert os_data, "Empty OS status for OS %s?!" % os_name
2310 f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
2311 _ErrorIf(not f_status, self.ENODEOS, node,
2312 "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
2313 _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
2314 "OS '%s' has multiple entries (first one shadows the rest): %s",
2315 os_name, utils.CommaJoin([v[0] for v in os_data]))
2316 # comparisons with the 'base' image
2317 test = os_name not in base.oslist
2318 _ErrorIf(test, self.ENODEOS, node,
2319 "Extra OS %s not present on reference node (%s)",
2323 assert base.oslist[os_name], "Base node has empty OS status?"
2324 _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
2326 # base OS is invalid, skipping
2328 for kind, a, b in [("API version", f_api, b_api),
2329 ("variants list", f_var, b_var),
2330 ("parameters", beautify_params(f_param),
2331 beautify_params(b_param))]:
2332 _ErrorIf(a != b, self.ENODEOS, node,
2333 "OS %s for %s differs from reference node %s: [%s] vs. [%s]",
2334 kind, os_name, base.name,
2335 utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
2337 # check any missing OSes
2338 missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
2339 _ErrorIf(missing, self.ENODEOS, node,
2340 "OSes present on reference node %s but missing on this node: %s",
2341 base.name, utils.CommaJoin(missing))
2343 def _VerifyOob(self, ninfo, nresult):
2344 """Verifies out of band functionality of a node.
2346 @type ninfo: L{objects.Node}
2347 @param ninfo: the node to check
2348 @param nresult: the remote results for the node
2352 # We just have to verify the paths on master and/or master candidates
2353 # as the oob helper is invoked on the master
2354 if ((ninfo.master_candidate or ninfo.master_capable) and
2355 constants.NV_OOB_PATHS in nresult):
2356 for path_result in nresult[constants.NV_OOB_PATHS]:
2357 self._ErrorIf(path_result, self.ENODEOOBPATH, node, path_result)
2359 def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
2360 """Verifies and updates the node volume data.
2362 This function will update a L{NodeImage}'s internal structures
2363 with data from the remote call.
2365 @type ninfo: L{objects.Node}
2366 @param ninfo: the node to check
2367 @param nresult: the remote results for the node
2368 @param nimg: the node image object
2369 @param vg_name: the configured VG name
2373 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2375 nimg.lvm_fail = True
2376 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
2379 elif isinstance(lvdata, basestring):
2380 _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
2381 utils.SafeEncode(lvdata))
2382 elif not isinstance(lvdata, dict):
2383 _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
2385 nimg.volumes = lvdata
2386 nimg.lvm_fail = False
2388 def _UpdateNodeInstances(self, ninfo, nresult, nimg):
2389 """Verifies and updates the node instance list.
2391 If the listing was successful, then updates this node's instance
2392 list. Otherwise, it marks the RPC call as failed for the instance
2395 @type ninfo: L{objects.Node}
2396 @param ninfo: the node to check
2397 @param nresult: the remote results for the node
2398 @param nimg: the node image object
2401 idata = nresult.get(constants.NV_INSTANCELIST, None)
2402 test = not isinstance(idata, list)
2403 self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
2404 " (instancelist): %s", utils.SafeEncode(str(idata)))
2406 nimg.hyp_fail = True
2408 nimg.instances = idata
2410 def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
2411 """Verifies and computes a node information map
2413 @type ninfo: L{objects.Node}
2414 @param ninfo: the node to check
2415 @param nresult: the remote results for the node
2416 @param nimg: the node image object
2417 @param vg_name: the configured VG name
2421 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2423 # try to read free memory (from the hypervisor)
2424 hv_info = nresult.get(constants.NV_HVINFO, None)
2425 test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
2426 _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
2429 nimg.mfree = int(hv_info["memory_free"])
2430 except (ValueError, TypeError):
2431 _ErrorIf(True, self.ENODERPC, node,
2432 "node returned invalid nodeinfo, check hypervisor")
2434 # FIXME: devise a free space model for file based instances as well
2435 if vg_name is not None:
2436 test = (constants.NV_VGLIST not in nresult or
2437 vg_name not in nresult[constants.NV_VGLIST])
2438 _ErrorIf(test, self.ENODELVM, node,
2439 "node didn't return data for the volume group '%s'"
2440 " - it is either missing or broken", vg_name)
2443 nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
2444 except (ValueError, TypeError):
2445 _ErrorIf(True, self.ENODERPC, node,
2446 "node returned invalid LVM info, check LVM status")
2448 def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
2449 """Gets per-disk status information for all instances.
2451 @type nodelist: list of strings
2452 @param nodelist: Node names
2453 @type node_image: dict of (name, L{objects.Node})
2454 @param node_image: Node objects
2455 @type instanceinfo: dict of (name, L{objects.Instance})
2456 @param instanceinfo: Instance objects
2457 @rtype: {instance: {node: [(succes, payload)]}}
2458 @return: a dictionary of per-instance dictionaries with nodes as
2459 keys and disk information as values; the disk information is a
2460 list of tuples (success, payload)
2463 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2466 node_disks_devonly = {}
2467 diskless_instances = set()
2468 diskless = constants.DT_DISKLESS
2470 for nname in nodelist:
2471 node_instances = list(itertools.chain(node_image[nname].pinst,
2472 node_image[nname].sinst))
2473 diskless_instances.update(inst for inst in node_instances
2474 if instanceinfo[inst].disk_template == diskless)
2475 disks = [(inst, disk)
2476 for inst in node_instances
2477 for disk in instanceinfo[inst].disks]
2480 # No need to collect data
2483 node_disks[nname] = disks
2485 # Creating copies as SetDiskID below will modify the objects and that can
2486 # lead to incorrect data returned from nodes
2487 devonly = [dev.Copy() for (_, dev) in disks]
2490 self.cfg.SetDiskID(dev, nname)
2492 node_disks_devonly[nname] = devonly
2494 assert len(node_disks) == len(node_disks_devonly)
2496 # Collect data from all nodes with disks
2497 result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2500 assert len(result) == len(node_disks)
2504 for (nname, nres) in result.items():
2505 disks = node_disks[nname]
2508 # No data from this node
2509 data = len(disks) * [(False, "node offline")]
2512 _ErrorIf(msg, self.ENODERPC, nname,
2513 "while getting disk information: %s", msg)
2515 # No data from this node
2516 data = len(disks) * [(False, msg)]
2519 for idx, i in enumerate(nres.payload):
2520 if isinstance(i, (tuple, list)) and len(i) == 2:
2523 logging.warning("Invalid result from node %s, entry %d: %s",
2525 data.append((False, "Invalid result from the remote node"))
2527 for ((inst, _), status) in zip(disks, data):
2528 instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2530 # Add empty entries for diskless instances.
2531 for inst in diskless_instances:
2532 assert inst not in instdisk
2535 assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2536 len(nnames) <= len(instanceinfo[inst].all_nodes) and
2537 compat.all(isinstance(s, (tuple, list)) and
2538 len(s) == 2 for s in statuses)
2539 for inst, nnames in instdisk.items()
2540 for nname, statuses in nnames.items())
2541 assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2545 def BuildHooksEnv(self):
2548 Cluster-Verify hooks just ran in the post phase and their failure makes
2549 the output be logged in the verify output and the verification to fail.
2553 "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
2556 env.update(("NODE_TAGS_%s" % node.name, " ".join(node.GetTags()))
2557 for node in self.my_node_info.values())
2561 def BuildHooksNodes(self):
2562 """Build hooks nodes.
2565 return ([], self.my_node_names)
2567 def Exec(self, feedback_fn):
2568 """Verify integrity of the node group, performing various test on nodes.
2571 # This method has too many local variables. pylint: disable=R0914
2572 feedback_fn("* Verifying group '%s'" % self.group_info.name)
2574 if not self.my_node_names:
2576 feedback_fn("* Empty node group, skipping verification")
2580 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2581 verbose = self.op.verbose
2582 self._feedback_fn = feedback_fn
2584 vg_name = self.cfg.GetVGName()
2585 drbd_helper = self.cfg.GetDRBDHelper()
2586 cluster = self.cfg.GetClusterInfo()
2587 groupinfo = self.cfg.GetAllNodeGroupsInfo()
2588 hypervisors = cluster.enabled_hypervisors
2589 node_data_list = [self.my_node_info[name] for name in self.my_node_names]
2591 i_non_redundant = [] # Non redundant instances
2592 i_non_a_balanced = [] # Non auto-balanced instances
2593 n_offline = 0 # Count of offline nodes
2594 n_drained = 0 # Count of nodes being drained
2595 node_vol_should = {}
2597 # FIXME: verify OS list
2600 filemap = _ComputeAncillaryFiles(cluster, False)
2602 # do local checksums
2603 master_node = self.master_node = self.cfg.GetMasterNode()
2604 master_ip = self.cfg.GetMasterIP()
2606 feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_names))
2608 # We will make nodes contact all nodes in their group, and one node from
2609 # every other group.
2610 # TODO: should it be a *random* node, different every time?
2611 online_nodes = [node.name for node in node_data_list if not node.offline]
2612 other_group_nodes = {}
2614 for name in sorted(self.all_node_info):
2615 node = self.all_node_info[name]
2616 if (node.group not in other_group_nodes
2617 and node.group != self.group_uuid
2618 and not node.offline):
2619 other_group_nodes[node.group] = node.name
2621 node_verify_param = {
2622 constants.NV_FILELIST:
2623 utils.UniqueSequence(filename
2624 for files in filemap
2625 for filename in files),
2626 constants.NV_NODELIST: online_nodes + other_group_nodes.values(),
2627 constants.NV_HYPERVISOR: hypervisors,
2628 constants.NV_HVPARAMS:
2629 _GetAllHypervisorParameters(cluster, self.all_inst_info.values()),
2630 constants.NV_NODENETTEST: [(node.name, node.primary_ip, node.secondary_ip)
2631 for node in node_data_list
2632 if not node.offline],
2633 constants.NV_INSTANCELIST: hypervisors,
2634 constants.NV_VERSION: None,
2635 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2636 constants.NV_NODESETUP: None,
2637 constants.NV_TIME: None,
2638 constants.NV_MASTERIP: (master_node, master_ip),
2639 constants.NV_OSLIST: None,
2640 constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
2643 if vg_name is not None:
2644 node_verify_param[constants.NV_VGLIST] = None
2645 node_verify_param[constants.NV_LVLIST] = vg_name
2646 node_verify_param[constants.NV_PVLIST] = [vg_name]
2647 node_verify_param[constants.NV_DRBDLIST] = None
2650 node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2653 # FIXME: this needs to be changed per node-group, not cluster-wide
2655 default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
2656 if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2657 bridges.add(default_nicpp[constants.NIC_LINK])
2658 for instance in self.my_inst_info.values():
2659 for nic in instance.nics:
2660 full_nic = cluster.SimpleFillNIC(nic.nicparams)
2661 if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2662 bridges.add(full_nic[constants.NIC_LINK])
2665 node_verify_param[constants.NV_BRIDGES] = list(bridges)
2667 # Build our expected cluster state
2668 node_image = dict((node.name, self.NodeImage(offline=node.offline,
2670 vm_capable=node.vm_capable))
2671 for node in node_data_list)
2675 for node in self.all_node_info.values():
2676 path = _SupportsOob(self.cfg, node)
2677 if path and path not in oob_paths:
2678 oob_paths.append(path)
2681 node_verify_param[constants.NV_OOB_PATHS] = oob_paths
2683 for instance in self.my_inst_names:
2684 inst_config = self.my_inst_info[instance]
2686 for nname in inst_config.all_nodes:
2687 if nname not in node_image:
2688 gnode = self.NodeImage(name=nname)
2689 gnode.ghost = (nname not in self.all_node_info)
2690 node_image[nname] = gnode
2692 inst_config.MapLVsByNode(node_vol_should)
2694 pnode = inst_config.primary_node
2695 node_image[pnode].pinst.append(instance)
2697 for snode in inst_config.secondary_nodes:
2698 nimg = node_image[snode]
2699 nimg.sinst.append(instance)
2700 if pnode not in nimg.sbp:
2701 nimg.sbp[pnode] = []
2702 nimg.sbp[pnode].append(instance)
2704 # At this point, we have the in-memory data structures complete,
2705 # except for the runtime information, which we'll gather next
2707 # Due to the way our RPC system works, exact response times cannot be
2708 # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2709 # time before and after executing the request, we can at least have a time
2711 nvinfo_starttime = time.time()
2712 all_nvinfo = self.rpc.call_node_verify(self.my_node_names,
2714 self.cfg.GetClusterName())
2715 nvinfo_endtime = time.time()
2717 if self.extra_lv_nodes and vg_name is not None:
2719 self.rpc.call_node_verify(self.extra_lv_nodes,
2720 {constants.NV_LVLIST: vg_name},
2721 self.cfg.GetClusterName())
2723 extra_lv_nvinfo = {}
2725 all_drbd_map = self.cfg.ComputeDRBDMap()
2727 feedback_fn("* Gathering disk information (%s nodes)" %
2728 len(self.my_node_names))
2729 instdisk = self._CollectDiskInfo(self.my_node_names, node_image,
2732 feedback_fn("* Verifying configuration file consistency")
2734 # If not all nodes are being checked, we need to make sure the master node
2735 # and a non-checked vm_capable node are in the list.
2736 absent_nodes = set(self.all_node_info).difference(self.my_node_info)
2738 vf_nvinfo = all_nvinfo.copy()
2739 vf_node_info = list(self.my_node_info.values())
2740 additional_nodes = []
2741 if master_node not in self.my_node_info:
2742 additional_nodes.append(master_node)
2743 vf_node_info.append(self.all_node_info[master_node])
2744 # Add the first vm_capable node we find which is not included
2745 for node in absent_nodes:
2746 nodeinfo = self.all_node_info[node]
2747 if nodeinfo.vm_capable and not nodeinfo.offline:
2748 additional_nodes.append(node)
2749 vf_node_info.append(self.all_node_info[node])
2751 key = constants.NV_FILELIST
2752 vf_nvinfo.update(self.rpc.call_node_verify(additional_nodes,
2753 {key: node_verify_param[key]},
2754 self.cfg.GetClusterName()))
2756 vf_nvinfo = all_nvinfo
2757 vf_node_info = self.my_node_info.values()
2759 self._VerifyFiles(_ErrorIf, vf_node_info, master_node, vf_nvinfo, filemap)
2761 feedback_fn("* Verifying node status")
2765 for node_i in node_data_list:
2767 nimg = node_image[node]
2771 feedback_fn("* Skipping offline node %s" % (node,))
2775 if node == master_node:
2777 elif node_i.master_candidate:
2778 ntype = "master candidate"
2779 elif node_i.drained:
2785 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2787 msg = all_nvinfo[node].fail_msg
2788 _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2790 nimg.rpc_fail = True
2793 nresult = all_nvinfo[node].payload
2795 nimg.call_ok = self._VerifyNode(node_i, nresult)
2796 self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2797 self._VerifyNodeNetwork(node_i, nresult)
2798 self._VerifyOob(node_i, nresult)
2801 self._VerifyNodeLVM(node_i, nresult, vg_name)
2802 self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info, drbd_helper,
2805 self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2806 self._UpdateNodeInstances(node_i, nresult, nimg)
2807 self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2808 self._UpdateNodeOS(node_i, nresult, nimg)
2810 if not nimg.os_fail:
2811 if refos_img is None:
2813 self._VerifyNodeOS(node_i, nimg, refos_img)
2814 self._VerifyNodeBridges(node_i, nresult, bridges)
2816 # Check whether all running instancies are primary for the node. (This
2817 # can no longer be done from _VerifyInstance below, since some of the
2818 # wrong instances could be from other node groups.)
2819 non_primary_inst = set(nimg.instances).difference(nimg.pinst)
2821 for inst in non_primary_inst:
2822 test = inst in self.all_inst_info
2823 _ErrorIf(test, self.EINSTANCEWRONGNODE, inst,
2824 "instance should not run on node %s", node_i.name)
2825 _ErrorIf(not test, self.ENODEORPHANINSTANCE, node_i.name,
2826 "node is running unknown instance %s", inst)
2828 for node, result in extra_lv_nvinfo.items():
2829 self._UpdateNodeVolumes(self.all_node_info[node], result.payload,
2830 node_image[node], vg_name)
2832 feedback_fn("* Verifying instance status")
2833 for instance in self.my_inst_names:
2835 feedback_fn("* Verifying instance %s" % instance)
2836 inst_config = self.my_inst_info[instance]
2837 self._VerifyInstance(instance, inst_config, node_image,
2839 inst_nodes_offline = []
2841 pnode = inst_config.primary_node
2842 pnode_img = node_image[pnode]
2843 _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2844 self.ENODERPC, pnode, "instance %s, connection to"
2845 " primary node failed", instance)
2847 _ErrorIf(inst_config.admin_up and pnode_img.offline,
2848 self.EINSTANCEBADNODE, instance,
2849 "instance is marked as running and lives on offline node %s",
2850 inst_config.primary_node)
2852 # If the instance is non-redundant we cannot survive losing its primary
2853 # node, so we are not N+1 compliant. On the other hand we have no disk
2854 # templates with more than one secondary so that situation is not well
2856 # FIXME: does not support file-backed instances
2857 if not inst_config.secondary_nodes:
2858 i_non_redundant.append(instance)
2860 _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2861 instance, "instance has multiple secondary nodes: %s",
2862 utils.CommaJoin(inst_config.secondary_nodes),
2863 code=self.ETYPE_WARNING)
2865 if inst_config.disk_template in constants.DTS_INT_MIRROR:
2866 pnode = inst_config.primary_node
2867 instance_nodes = utils.NiceSort(inst_config.all_nodes)
2868 instance_groups = {}
2870 for node in instance_nodes:
2871 instance_groups.setdefault(self.all_node_info[node].group,
2875 "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
2876 # Sort so that we always list the primary node first.
2877 for group, nodes in sorted(instance_groups.items(),
2878 key=lambda (_, nodes): pnode in nodes,
2881 self._ErrorIf(len(instance_groups) > 1, self.EINSTANCESPLITGROUPS,
2882 instance, "instance has primary and secondary nodes in"
2883 " different groups: %s", utils.CommaJoin(pretty_list),
2884 code=self.ETYPE_WARNING)
2886 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2887 i_non_a_balanced.append(instance)
2889 for snode in inst_config.secondary_nodes:
2890 s_img = node_image[snode]
2891 _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2892 "instance %s, connection to secondary node failed", instance)
2895 inst_nodes_offline.append(snode)
2897 # warn that the instance lives on offline nodes
2898 _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2899 "instance has offline secondary node(s) %s",
2900 utils.CommaJoin(inst_nodes_offline))
2901 # ... or ghost/non-vm_capable nodes
2902 for node in inst_config.all_nodes:
2903 _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2904 "instance lives on ghost node %s", node)
2905 _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
2906 instance, "instance lives on non-vm_capable node %s", node)
2908 feedback_fn("* Verifying orphan volumes")
2909 reserved = utils.FieldSet(*cluster.reserved_lvs)
2911 # We will get spurious "unknown volume" warnings if any node of this group
2912 # is secondary for an instance whose primary is in another group. To avoid
2913 # them, we find these instances and add their volumes to node_vol_should.
2914 for inst in self.all_inst_info.values():
2915 for secondary in inst.secondary_nodes:
2916 if (secondary in self.my_node_info
2917 and inst.name not in self.my_inst_info):
2918 inst.MapLVsByNode(node_vol_should)
2921 self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2923 if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2924 feedback_fn("* Verifying N+1 Memory redundancy")
2925 self._VerifyNPlusOneMemory(node_image, self.my_inst_info)
2927 feedback_fn("* Other Notes")
2929 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
2930 % len(i_non_redundant))
2932 if i_non_a_balanced:
2933 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
2934 % len(i_non_a_balanced))
2937 feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
2940 feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
2944 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2945 """Analyze the post-hooks' result
2947 This method analyses the hook result, handles it, and sends some
2948 nicely-formatted feedback back to the user.
2950 @param phase: one of L{constants.HOOKS_PHASE_POST} or
2951 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2952 @param hooks_results: the results of the multi-node hooks rpc call
2953 @param feedback_fn: function used send feedback back to the caller
2954 @param lu_result: previous Exec result
2955 @return: the new Exec result, based on the previous result
2959 # We only really run POST phase hooks, only for non-empty groups,
2960 # and are only interested in their results
2961 if not self.my_node_names:
2964 elif phase == constants.HOOKS_PHASE_POST:
2965 # Used to change hooks' output to proper indentation
2966 feedback_fn("* Hooks Results")
2967 assert hooks_results, "invalid result from hooks"
2969 for node_name in hooks_results:
2970 res = hooks_results[node_name]
2972 test = msg and not res.offline
2973 self._ErrorIf(test, self.ENODEHOOKS, node_name,
2974 "Communication failure in hooks execution: %s", msg)
2975 if res.offline or msg:
2976 # No need to investigate payload if node is offline or gave an error.
2977 # override manually lu_result here as _ErrorIf only
2978 # overrides self.bad
2981 for script, hkr, output in res.payload:
2982 test = hkr == constants.HKR_FAIL
2983 self._ErrorIf(test, self.ENODEHOOKS, node_name,
2984 "Script %s failed, output:", script)
2986 output = self._HOOKS_INDENT_RE.sub(" ", output)
2987 feedback_fn("%s" % output)
2993 class LUClusterVerifyDisks(NoHooksLU):
2994 """Verifies the cluster disks status.
2999 def ExpandNames(self):
3000 self.share_locks = _ShareAll()
3001 self.needed_locks = {
3002 locking.LEVEL_NODEGROUP: locking.ALL_SET,
3005 def Exec(self, feedback_fn):
3006 group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
3008 # Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
3009 return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
3010 for group in group_names])
3013 class LUGroupVerifyDisks(NoHooksLU):
3014 """Verifies the status of all disks in a node group.
3019 def ExpandNames(self):
3020 # Raises errors.OpPrereqError on its own if group can't be found
3021 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
3023 self.share_locks = _ShareAll()
3024 self.needed_locks = {
3025 locking.LEVEL_INSTANCE: [],
3026 locking.LEVEL_NODEGROUP: [],
3027 locking.LEVEL_NODE: [],
3030 def DeclareLocks(self, level):
3031 if level == locking.LEVEL_INSTANCE:
3032 assert not self.needed_locks[locking.LEVEL_INSTANCE]
3034 # Lock instances optimistically, needs verification once node and group
3035 # locks have been acquired
3036 self.needed_locks[locking.LEVEL_INSTANCE] = \
3037 self.cfg.GetNodeGroupInstances(self.group_uuid)
3039 elif level == locking.LEVEL_NODEGROUP:
3040 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3042 self.needed_locks[locking.LEVEL_NODEGROUP] = \
3043 set([self.group_uuid] +
3044 # Lock all groups used by instances optimistically; this requires
3045 # going via the node before it's locked, requiring verification
3048 for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
3049 for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
3051 elif level == locking.LEVEL_NODE:
3052 # This will only lock the nodes in the group to be verified which contain
3054 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3055 self._LockInstancesNodes()
3057 # Lock all nodes in group to be verified
3058 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
3059 member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
3060 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3062 def CheckPrereq(self):
3063 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3064 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3065 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3067 assert self.group_uuid in owned_groups
3069 # Check if locked instances are still correct
3070 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
3072 # Get instance information
3073 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
3075 # Check if node groups for locked instances are still correct
3076 for (instance_name, inst) in self.instances.items():
3077 assert owned_nodes.issuperset(inst.all_nodes), \
3078 "Instance %s's nodes changed while we kept the lock" % instance_name
3080 inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
3083 assert self.group_uuid in inst_groups, \
3084 "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
3086 def Exec(self, feedback_fn):
3087 """Verify integrity of cluster disks.
3089 @rtype: tuple of three items
3090 @return: a tuple of (dict of node-to-node_error, list of instances
3091 which need activate-disks, dict of instance: (node, volume) for
3096 res_instances = set()
3099 nv_dict = _MapInstanceDisksToNodes([inst
3100 for inst in self.instances.values()
3104 nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
3105 set(self.cfg.GetVmCapableNodeList()))
3107 node_lvs = self.rpc.call_lv_list(nodes, [])
3109 for (node, node_res) in node_lvs.items():
3110 if node_res.offline:
3113 msg = node_res.fail_msg
3115 logging.warning("Error enumerating LVs on node %s: %s", node, msg)
3116 res_nodes[node] = msg
3119 for lv_name, (_, _, lv_online) in node_res.payload.items():
3120 inst = nv_dict.pop((node, lv_name), None)
3121 if not (lv_online or inst is None):
3122 res_instances.add(inst)
3124 # any leftover items in nv_dict are missing LVs, let's arrange the data
3126 for key, inst in nv_dict.iteritems():
3127 res_missing.setdefault(inst, []).append(key)
3129 return (res_nodes, list(res_instances), res_missing)
3132 class LUClusterRepairDiskSizes(NoHooksLU):
3133 """Verifies the cluster disks sizes.
3138 def ExpandNames(self):
3139 if self.op.instances:
3140 self.wanted_names = _GetWantedInstances(self, self.op.instances)
3141 self.needed_locks = {
3142 locking.LEVEL_NODE: [],
3143 locking.LEVEL_INSTANCE: self.wanted_names,
3145 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3147 self.wanted_names = None
3148 self.needed_locks = {
3149 locking.LEVEL_NODE: locking.ALL_SET,
3150 locking.LEVEL_INSTANCE: locking.ALL_SET,
3152 self.share_locks = _ShareAll()
3154 def DeclareLocks(self, level):
3155 if level == locking.LEVEL_NODE and self.wanted_names is not None:
3156 self._LockInstancesNodes(primary_only=True)
3158 def CheckPrereq(self):
3159 """Check prerequisites.
3161 This only checks the optional instance list against the existing names.
3164 if self.wanted_names is None:
3165 self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
3167 self.wanted_instances = \
3168 map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
3170 def _EnsureChildSizes(self, disk):
3171 """Ensure children of the disk have the needed disk size.
3173 This is valid mainly for DRBD8 and fixes an issue where the
3174 children have smaller disk size.
3176 @param disk: an L{ganeti.objects.Disk} object
3179 if disk.dev_type == constants.LD_DRBD8:
3180 assert disk.children, "Empty children for DRBD8?"
3181 fchild = disk.children[0]
3182 mismatch = fchild.size < disk.size
3184 self.LogInfo("Child disk has size %d, parent %d, fixing",
3185 fchild.size, disk.size)
3186 fchild.size = disk.size
3188 # and we recurse on this child only, not on the metadev
3189 return self._EnsureChildSizes(fchild) or mismatch
3193 def Exec(self, feedback_fn):
3194 """Verify the size of cluster disks.
3197 # TODO: check child disks too
3198 # TODO: check differences in size between primary/secondary nodes
3200 for instance in self.wanted_instances:
3201 pnode = instance.primary_node
3202 if pnode not in per_node_disks:
3203 per_node_disks[pnode] = []
3204 for idx, disk in enumerate(instance.disks):
3205 per_node_disks[pnode].append((instance, idx, disk))
3208 for node, dskl in per_node_disks.items():
3209 newl = [v[2].Copy() for v in dskl]
3211 self.cfg.SetDiskID(dsk, node)
3212 result = self.rpc.call_blockdev_getsize(node, newl)
3214 self.LogWarning("Failure in blockdev_getsize call to node"
3215 " %s, ignoring", node)
3217 if len(result.payload) != len(dskl):
3218 logging.warning("Invalid result from node %s: len(dksl)=%d,"
3219 " result.payload=%s", node, len(dskl), result.payload)
3220 self.LogWarning("Invalid result from node %s, ignoring node results",
3223 for ((instance, idx, disk), size) in zip(dskl, result.payload):
3225 self.LogWarning("Disk %d of instance %s did not return size"
3226 " information, ignoring", idx, instance.name)
3228 if not isinstance(size, (int, long)):
3229 self.LogWarning("Disk %d of instance %s did not return valid"
3230 " size information, ignoring", idx, instance.name)
3233 if size != disk.size:
3234 self.LogInfo("Disk %d of instance %s has mismatched size,"
3235 " correcting: recorded %d, actual %d", idx,
3236 instance.name, disk.size, size)
3238 self.cfg.Update(instance, feedback_fn)
3239 changed.append((instance.name, idx, size))
3240 if self._EnsureChildSizes(disk):
3241 self.cfg.Update(instance, feedback_fn)
3242 changed.append((instance.name, idx, disk.size))
3246 class LUClusterRename(LogicalUnit):
3247 """Rename the cluster.
3250 HPATH = "cluster-rename"
3251 HTYPE = constants.HTYPE_CLUSTER
3253 def BuildHooksEnv(self):
3258 "OP_TARGET": self.cfg.GetClusterName(),
3259 "NEW_NAME": self.op.name,
3262 def BuildHooksNodes(self):
3263 """Build hooks nodes.
3266 return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
3268 def CheckPrereq(self):
3269 """Verify that the passed name is a valid one.
3272 hostname = netutils.GetHostname(name=self.op.name,
3273 family=self.cfg.GetPrimaryIPFamily())
3275 new_name = hostname.name
3276 self.ip = new_ip = hostname.ip
3277 old_name = self.cfg.GetClusterName()
3278 old_ip = self.cfg.GetMasterIP()
3279 if new_name == old_name and new_ip == old_ip:
3280 raise errors.OpPrereqError("Neither the name nor the IP address of the"
3281 " cluster has changed",
3283 if new_ip != old_ip:
3284 if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
3285 raise errors.OpPrereqError("The given cluster IP address (%s) is"
3286 " reachable on the network" %
3287 new_ip, errors.ECODE_NOTUNIQUE)
3289 self.op.name = new_name
3291 def Exec(self, feedback_fn):
3292 """Rename the cluster.
3295 clustername = self.op.name
3298 # shutdown the master IP
3299 master = self.cfg.GetMasterNode()
3300 result = self.rpc.call_node_deactivate_master_ip(master)
3301 result.Raise("Could not disable the master role")
3304 cluster = self.cfg.GetClusterInfo()
3305 cluster.cluster_name = clustername
3306 cluster.master_ip = ip
3307 self.cfg.Update(cluster, feedback_fn)
3309 # update the known hosts file
3310 ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
3311 node_list = self.cfg.GetOnlineNodeList()
3313 node_list.remove(master)
3316 _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
3318 result = self.rpc.call_node_activate_master_ip(master)
3319 msg = result.fail_msg
3321 self.LogWarning("Could not re-enable the master role on"
3322 " the master, please restart manually: %s", msg)
3327 class LUClusterSetParams(LogicalUnit):
3328 """Change the parameters of the cluster.
3331 HPATH = "cluster-modify"
3332 HTYPE = constants.HTYPE_CLUSTER
3335 def CheckArguments(self):
3339 if self.op.uid_pool:
3340 uidpool.CheckUidPool(self.op.uid_pool)
3342 if self.op.add_uids:
3343 uidpool.CheckUidPool(self.op.add_uids)
3345 if self.op.remove_uids:
3346 uidpool.CheckUidPool(self.op.remove_uids)
3348 def ExpandNames(self):
3349 # FIXME: in the future maybe other cluster params won't require checking on
3350 # all nodes to be modified.
3351 self.needed_locks = {
3352 locking.LEVEL_NODE: locking.ALL_SET,
3354 self.share_locks[locking.LEVEL_NODE] = 1
3356 def BuildHooksEnv(self):
3361 "OP_TARGET": self.cfg.GetClusterName(),
3362 "NEW_VG_NAME": self.op.vg_name,
3365 def BuildHooksNodes(self):
3366 """Build hooks nodes.
3369 mn = self.cfg.GetMasterNode()
3372 def CheckPrereq(self):
3373 """Check prerequisites.
3375 This checks whether the given params don't conflict and
3376 if the given volume group is valid.
3379 if self.op.vg_name is not None and not self.op.vg_name:
3380 if self.cfg.HasAnyDiskOfType(constants.LD_LV):
3381 raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
3382 " instances exist", errors.ECODE_INVAL)
3384 if self.op.drbd_helper is not None and not self.op.drbd_helper:
3385 if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
3386 raise errors.OpPrereqError("Cannot disable drbd helper while"
3387 " drbd-based instances exist",
3390 node_list = self.owned_locks(locking.LEVEL_NODE)
3392 # if vg_name not None, checks given volume group on all nodes
3394 vglist = self.rpc.call_vg_list(node_list)
3395 for node in node_list:
3396 msg = vglist[node].fail_msg
3398 # ignoring down node
3399 self.LogWarning("Error while gathering data on node %s"
3400 " (ignoring node): %s", node, msg)
3402 vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
3404 constants.MIN_VG_SIZE)
3406 raise errors.OpPrereqError("Error on node '%s': %s" %
3407 (node, vgstatus), errors.ECODE_ENVIRON)
3409 if self.op.drbd_helper:
3410 # checks given drbd helper on all nodes
3411 helpers = self.rpc.call_drbd_helper(node_list)
3412 for (node, ninfo) in self.cfg.GetMultiNodeInfo(node_list):
3414 self.LogInfo("Not checking drbd helper on offline node %s", node)
3416 msg = helpers[node].fail_msg
3418 raise errors.OpPrereqError("Error checking drbd helper on node"
3419 " '%s': %s" % (node, msg),
3420 errors.ECODE_ENVIRON)
3421 node_helper = helpers[node].payload
3422 if node_helper != self.op.drbd_helper:
3423 raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
3424 (node, node_helper), errors.ECODE_ENVIRON)
3426 self.cluster = cluster = self.cfg.GetClusterInfo()
3427 # validate params changes
3428 if self.op.beparams:
3429 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
3430 self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
3432 if self.op.ndparams:
3433 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
3434 self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
3436 # TODO: we need a more general way to handle resetting
3437 # cluster-level parameters to default values
3438 if self.new_ndparams["oob_program"] == "":
3439 self.new_ndparams["oob_program"] = \
3440 constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
3442 if self.op.nicparams:
3443 utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
3444 self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
3445 objects.NIC.CheckParameterSyntax(self.new_nicparams)
3448 # check all instances for consistency
3449 for instance in self.cfg.GetAllInstancesInfo().values():
3450 for nic_idx, nic in enumerate(instance.nics):
3451 params_copy = copy.deepcopy(nic.nicparams)
3452 params_filled = objects.FillDict(self.new_nicparams, params_copy)
3454 # check parameter syntax
3456 objects.NIC.CheckParameterSyntax(params_filled)
3457 except errors.ConfigurationError, err:
3458 nic_errors.append("Instance %s, nic/%d: %s" %
3459 (instance.name, nic_idx, err))
3461 # if we're moving instances to routed, check that they have an ip
3462 target_mode = params_filled[constants.NIC_MODE]
3463 if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
3464 nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
3465 " address" % (instance.name, nic_idx))
3467 raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
3468 "\n".join(nic_errors))
3470 # hypervisor list/parameters
3471 self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
3472 if self.op.hvparams:
3473 for hv_name, hv_dict in self.op.hvparams.items():
3474 if hv_name not in self.new_hvparams:
3475 self.new_hvparams[hv_name] = hv_dict
3477 self.new_hvparams[hv_name].update(hv_dict)
3479 # os hypervisor parameters
3480 self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
3482 for os_name, hvs in self.op.os_hvp.items():
3483 if os_name not in self.new_os_hvp:
3484 self.new_os_hvp[os_name] = hvs
3486 for hv_name, hv_dict in hvs.items():
3487 if hv_name not in self.new_os_hvp[os_name]:
3488 self.new_os_hvp[os_name][hv_name] = hv_dict
3490 self.new_os_hvp[os_name][hv_name].update(hv_dict)
3493 self.new_osp = objects.FillDict(cluster.osparams, {})
3494 if self.op.osparams:
3495 for os_name, osp in self.op.osparams.items():
3496 if os_name not in self.new_osp:
3497 self.new_osp[os_name] = {}
3499 self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
3502 if not self.new_osp[os_name]:
3503 # we removed all parameters
3504 del self.new_osp[os_name]
3506 # check the parameter validity (remote check)
3507 _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
3508 os_name, self.new_osp[os_name])
3510 # changes to the hypervisor list
3511 if self.op.enabled_hypervisors is not None:
3512 self.hv_list = self.op.enabled_hypervisors
3513 for hv in self.hv_list:
3514 # if the hypervisor doesn't already exist in the cluster
3515 # hvparams, we initialize it to empty, and then (in both
3516 # cases) we make sure to fill the defaults, as we might not
3517 # have a complete defaults list if the hypervisor wasn't
3519 if hv not in new_hvp:
3521 new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
3522 utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
3524 self.hv_list = cluster.enabled_hypervisors
3526 if self.op.hvparams or self.op.enabled_hypervisors is not None:
3527 # either the enabled list has changed, or the parameters have, validate
3528 for hv_name, hv_params in self.new_hvparams.items():
3529 if ((self.op.hvparams and hv_name in self.op.hvparams) or
3530 (self.op.enabled_hypervisors and
3531 hv_name in self.op.enabled_hypervisors)):
3532 # either this is a new hypervisor, or its parameters have changed
3533 hv_class = hypervisor.GetHypervisor(hv_name)
3534 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
3535 hv_class.CheckParameterSyntax(hv_params)
3536 _CheckHVParams(self, node_list, hv_name, hv_params)
3539 # no need to check any newly-enabled hypervisors, since the
3540 # defaults have already been checked in the above code-block
3541 for os_name, os_hvp in self.new_os_hvp.items():
3542 for hv_name, hv_params in os_hvp.items():
3543 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
3544 # we need to fill in the new os_hvp on top of the actual hv_p
3545 cluster_defaults = self.new_hvparams.get(hv_name, {})
3546 new_osp = objects.FillDict(cluster_defaults, hv_params)
3547 hv_class = hypervisor.GetHypervisor(hv_name)
3548 hv_class.CheckParameterSyntax(new_osp)
3549 _CheckHVParams(self, node_list, hv_name, new_osp)
3551 if self.op.default_iallocator:
3552 alloc_script = utils.FindFile(self.op.default_iallocator,
3553 constants.IALLOCATOR_SEARCH_PATH,
3555 if alloc_script is None:
3556 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
3557 " specified" % self.op.default_iallocator,
3560 def Exec(self, feedback_fn):
3561 """Change the parameters of the cluster.
3564 if self.op.vg_name is not None:
3565 new_volume = self.op.vg_name
3568 if new_volume != self.cfg.GetVGName():
3569 self.cfg.SetVGName(new_volume)
3571 feedback_fn("Cluster LVM configuration already in desired"
3572 " state, not changing")
3573 if self.op.drbd_helper is not None:
3574 new_helper = self.op.drbd_helper
3577 if new_helper != self.cfg.GetDRBDHelper():
3578 self.cfg.SetDRBDHelper(new_helper)
3580 feedback_fn("Cluster DRBD helper already in desired state,"
3582 if self.op.hvparams:
3583 self.cluster.hvparams = self.new_hvparams
3585 self.cluster.os_hvp = self.new_os_hvp
3586 if self.op.enabled_hypervisors is not None:
3587 self.cluster.hvparams = self.new_hvparams
3588 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
3589 if self.op.beparams:
3590 self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
3591 if self.op.nicparams:
3592 self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
3593 if self.op.osparams:
3594 self.cluster.osparams = self.new_osp
3595 if self.op.ndparams:
3596 self.cluster.ndparams = self.new_ndparams
3598 if self.op.candidate_pool_size is not None:
3599 self.cluster.candidate_pool_size = self.op.candidate_pool_size
3600 # we need to update the pool size here, otherwise the save will fail
3601 _AdjustCandidatePool(self, [])
3603 if self.op.maintain_node_health is not None:
3604 self.cluster.maintain_node_health = self.op.maintain_node_health
3606 if self.op.prealloc_wipe_disks is not None:
3607 self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
3609 if self.op.add_uids is not None:
3610 uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
3612 if self.op.remove_uids is not None:
3613 uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
3615 if self.op.uid_pool is not None:
3616 self.cluster.uid_pool = self.op.uid_pool
3618 if self.op.default_iallocator is not None:
3619 self.cluster.default_iallocator = self.op.default_iallocator
3621 if self.op.reserved_lvs is not None:
3622 self.cluster.reserved_lvs = self.op.reserved_lvs
3624 def helper_os(aname, mods, desc):
3626 lst = getattr(self.cluster, aname)
3627 for key, val in mods:
3628 if key == constants.DDM_ADD:
3630 feedback_fn("OS %s already in %s, ignoring" % (val, desc))
3633 elif key == constants.DDM_REMOVE:
3637 feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
3639 raise errors.ProgrammerError("Invalid modification '%s'" % key)
3641 if self.op.hidden_os:
3642 helper_os("hidden_os", self.op.hidden_os, "hidden")
3644 if self.op.blacklisted_os:
3645 helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
3647 if self.op.master_netdev:
3648 master = self.cfg.GetMasterNode()
3649 feedback_fn("Shutting down master ip on the current netdev (%s)" %
3650 self.cluster.master_netdev)
3651 result = self.rpc.call_node_deactivate_master_ip(master)
3652 result.Raise("Could not disable the master ip")
3653 feedback_fn("Changing master_netdev from %s to %s" %
3654 (self.cluster.master_netdev, self.op.master_netdev))
3655 self.cluster.master_netdev = self.op.master_netdev
3657 self.cfg.Update(self.cluster, feedback_fn)
3659 if self.op.master_netdev:
3660 feedback_fn("Starting the master ip on the new master netdev (%s)" %
3661 self.op.master_netdev)
3662 result = self.rpc.call_node_activate_master_ip(master)
3664 self.LogWarning("Could not re-enable the master ip on"
3665 " the master, please restart manually: %s",
3669 def _UploadHelper(lu, nodes, fname):
3670 """Helper for uploading a file and showing warnings.
3673 if os.path.exists(fname):
3674 result = lu.rpc.call_upload_file(nodes, fname)
3675 for to_node, to_result in result.items():
3676 msg = to_result.fail_msg
3678 msg = ("Copy of file %s to node %s failed: %s" %
3679 (fname, to_node, msg))
3680 lu.proc.LogWarning(msg)
3683 def _ComputeAncillaryFiles(cluster, redist):
3684 """Compute files external to Ganeti which need to be consistent.
3686 @type redist: boolean
3687 @param redist: Whether to include files which need to be redistributed
3690 # Compute files for all nodes
3692 constants.SSH_KNOWN_HOSTS_FILE,
3693 constants.CONFD_HMAC_KEY,
3694 constants.CLUSTER_DOMAIN_SECRET_FILE,
3698 files_all.update(constants.ALL_CERT_FILES)
3699 files_all.update(ssconf.SimpleStore().GetFileList())
3701 if cluster.modify_etc_hosts:
3702 files_all.add(constants.ETC_HOSTS)
3704 # Files which must either exist on all nodes or on none
3705 files_all_opt = set([
3706 constants.RAPI_USERS_FILE,
3709 # Files which should only be on master candidates
3712 files_mc.add(constants.CLUSTER_CONF_FILE)
3714 # Files which should only be on VM-capable nodes
3715 files_vm = set(filename
3716 for hv_name in cluster.enabled_hypervisors
3717 for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles())
3719 # Filenames must be unique
3720 assert (len(files_all | files_all_opt | files_mc | files_vm) ==
3721 sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \
3722 "Found file listed in more than one file list"
3724 return (files_all, files_all_opt, files_mc, files_vm)
3727 def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
3728 """Distribute additional files which are part of the cluster configuration.
3730 ConfigWriter takes care of distributing the config and ssconf files, but
3731 there are more files which should be distributed to all nodes. This function
3732 makes sure those are copied.
3734 @param lu: calling logical unit
3735 @param additional_nodes: list of nodes not in the config to distribute to
3736 @type additional_vm: boolean
3737 @param additional_vm: whether the additional nodes are vm-capable or not
3740 # Gather target nodes
3741 cluster = lu.cfg.GetClusterInfo()
3742 master_info = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
3744 online_nodes = lu.cfg.GetOnlineNodeList()
3745 vm_nodes = lu.cfg.GetVmCapableNodeList()
3747 if additional_nodes is not None:
3748 online_nodes.extend(additional_nodes)
3750 vm_nodes.extend(additional_nodes)
3752 # Never distribute to master node
3753 for nodelist in [online_nodes, vm_nodes]:
3754 if master_info.name in nodelist:
3755 nodelist.remove(master_info.name)
3758 (files_all, files_all_opt, files_mc, files_vm) = \
3759 _ComputeAncillaryFiles(cluster, True)
3761 # Never re-distribute configuration file from here
3762 assert not (constants.CLUSTER_CONF_FILE in files_all or
3763 constants.CLUSTER_CONF_FILE in files_vm)
3764 assert not files_mc, "Master candidates not handled in this function"
3767 (online_nodes, files_all),
3768 (online_nodes, files_all_opt),
3769 (vm_nodes, files_vm),
3773 for (node_list, files) in filemap:
3775 _UploadHelper(lu, node_list, fname)
3778 class LUClusterRedistConf(NoHooksLU):
3779 """Force the redistribution of cluster configuration.
3781 This is a very simple LU.
3786 def ExpandNames(self):
3787 self.needed_locks = {
3788 locking.LEVEL_NODE: locking.ALL_SET,
3790 self.share_locks[locking.LEVEL_NODE] = 1
3792 def Exec(self, feedback_fn):
3793 """Redistribute the configuration.
3796 self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
3797 _RedistributeAncillaryFiles(self)
3800 def _WaitForSync(lu, instance, disks=None, oneshot=False):
3801 """Sleep and poll for an instance's disk to sync.
3804 if not instance.disks or disks is not None and not disks:
3807 disks = _ExpandCheckDisks(instance, disks)
3810 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
3812 node = instance.primary_node
3815 lu.cfg.SetDiskID(dev, node)
3817 # TODO: Convert to utils.Retry
3820 degr_retries = 10 # in seconds, as we sleep 1 second each time
3824 cumul_degraded = False
3825 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
3826 msg = rstats.fail_msg
3828 lu.LogWarning("Can't get any data from node %s: %s", node, msg)
3831 raise errors.RemoteError("Can't contact node %s for mirror data,"
3832 " aborting." % node)
3835 rstats = rstats.payload
3837 for i, mstat in enumerate(rstats):
3839 lu.LogWarning("Can't compute data for node %s/%s",
3840 node, disks[i].iv_name)
3843 cumul_degraded = (cumul_degraded or
3844 (mstat.is_degraded and mstat.sync_percent is None))
3845 if mstat.sync_percent is not None:
3847 if mstat.estimated_time is not None:
3848 rem_time = ("%s remaining (estimated)" %
3849 utils.FormatSeconds(mstat.estimated_time))
3850 max_time = mstat.estimated_time
3852 rem_time = "no time estimate"
3853 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3854 (disks[i].iv_name, mstat.sync_percent, rem_time))
3856 # if we're done but degraded, let's do a few small retries, to
3857 # make sure we see a stable and not transient situation; therefore
3858 # we force restart of the loop
3859 if (done or oneshot) and cumul_degraded and degr_retries > 0:
3860 logging.info("Degraded disks found, %d retries left", degr_retries)
3868 time.sleep(min(60, max_time))
3871 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3872 return not cumul_degraded
3875 def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3876 """Check that mirrors are not degraded.
3878 The ldisk parameter, if True, will change the test from the
3879 is_degraded attribute (which represents overall non-ok status for
3880 the device(s)) to the ldisk (representing the local storage status).
3883 lu.cfg.SetDiskID(dev, node)
3887 if on_primary or dev.AssembleOnSecondary():
3888 rstats = lu.rpc.call_blockdev_find(node, dev)
3889 msg = rstats.fail_msg
3891 lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3893 elif not rstats.payload:
3894 lu.LogWarning("Can't find disk on node %s", node)
3898 result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3900 result = result and not rstats.payload.is_degraded
3903 for child in dev.children:
3904 result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3909 class LUOobCommand(NoHooksLU):
3910 """Logical unit for OOB handling.
3914 _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
3916 def ExpandNames(self):
3917 """Gather locks we need.
3920 if self.op.node_names:
3921 self.op.node_names = _GetWantedNodes(self, self.op.node_names)
3922 lock_names = self.op.node_names
3924 lock_names = locking.ALL_SET
3926 self.needed_locks = {
3927 locking.LEVEL_NODE: lock_names,
3930 def CheckPrereq(self):
3931 """Check prerequisites.
3934 - the node exists in the configuration
3937 Any errors are signaled by raising errors.OpPrereqError.
3941 self.master_node = self.cfg.GetMasterNode()
3943 assert self.op.power_delay >= 0.0
3945 if self.op.node_names:
3946 if (self.op.command in self._SKIP_MASTER and
3947 self.master_node in self.op.node_names):
3948 master_node_obj = self.cfg.GetNodeInfo(self.master_node)
3949 master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
3951 if master_oob_handler:
3952 additional_text = ("run '%s %s %s' if you want to operate on the"
3953 " master regardless") % (master_oob_handler,
3957 additional_text = "it does not support out-of-band operations"
3959 raise errors.OpPrereqError(("Operating on the master node %s is not"
3960 " allowed for %s; %s") %
3961 (self.master_node, self.op.command,
3962 additional_text), errors.ECODE_INVAL)
3964 self.op.node_names = self.cfg.GetNodeList()
3965 if self.op.command in self._SKIP_MASTER:
3966 self.op.node_names.remove(self.master_node)
3968 if self.op.command in self._SKIP_MASTER:
3969 assert self.master_node not in self.op.node_names
3971 for (node_name, node) in self.cfg.GetMultiNodeInfo(self.op.node_names):
3973 raise errors.OpPrereqError("Node %s not found" % node_name,
3976 self.nodes.append(node)
3978 if (not self.op.ignore_status and
3979 (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
3980 raise errors.OpPrereqError(("Cannot power off node %s because it is"
3981 " not marked offline") % node_name,
3984 def Exec(self, feedback_fn):
3985 """Execute OOB and return result if we expect any.
3988 master_node = self.master_node
3991 for idx, node in enumerate(utils.NiceSort(self.nodes,
3992 key=lambda node: node.name)):
3993 node_entry = [(constants.RS_NORMAL, node.name)]
3994 ret.append(node_entry)
3996 oob_program = _SupportsOob(self.cfg, node)
3999 node_entry.append((constants.RS_UNAVAIL, None))
4002 logging.info("Executing out-of-band command '%s' using '%s' on %s",
4003 self.op.command, oob_program, node.name)
4004 result = self.rpc.call_run_oob(master_node, oob_program,
4005 self.op.command, node.name,
4009 self.LogWarning("Out-of-band RPC failed on node '%s': %s",
4010 node.name, result.fail_msg)
4011 node_entry.append((constants.RS_NODATA, None))
4014 self._CheckPayload(result)
4015 except errors.OpExecError, err:
4016 self.LogWarning("Payload returned by node '%s' is not valid: %s",
4018 node_entry.append((constants.RS_NODATA, None))
4020 if self.op.command == constants.OOB_HEALTH:
4021 # For health we should log important events
4022 for item, status in result.payload:
4023 if status in [constants.OOB_STATUS_WARNING,
4024 constants.OOB_STATUS_CRITICAL]:
4025 self.LogWarning("Item '%s' on node '%s' has status '%s'",
4026 item, node.name, status)
4028 if self.op.command == constants.OOB_POWER_ON:
4030 elif self.op.command == constants.OOB_POWER_OFF:
4031 node.powered = False
4032 elif self.op.command == constants.OOB_POWER_STATUS:
4033 powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
4034 if powered != node.powered:
4035 logging.warning(("Recorded power state (%s) of node '%s' does not"
4036 " match actual power state (%s)"), node.powered,
4039 # For configuration changing commands we should update the node
4040 if self.op.command in (constants.OOB_POWER_ON,
4041 constants.OOB_POWER_OFF):
4042 self.cfg.Update(node, feedback_fn)
4044 node_entry.append((constants.RS_NORMAL, result.payload))
4046 if (self.op.command == constants.OOB_POWER_ON and
4047 idx < len(self.nodes) - 1):
4048 time.sleep(self.op.power_delay)
4052 def _CheckPayload(self, result):
4053 """Checks if the payload is valid.
4055 @param result: RPC result
4056 @raises errors.OpExecError: If payload is not valid
4060 if self.op.command == constants.OOB_HEALTH:
4061 if not isinstance(result.payload, list):
4062 errs.append("command 'health' is expected to return a list but got %s" %
4063 type(result.payload))
4065 for item, status in result.payload:
4066 if status not in constants.OOB_STATUSES:
4067 errs.append("health item '%s' has invalid status '%s'" %
4070 if self.op.command == constants.OOB_POWER_STATUS:
4071 if not isinstance(result.payload, dict):
4072 errs.append("power-status is expected to return a dict but got %s" %
4073 type(result.payload))
4075 if self.op.command in [
4076 constants.OOB_POWER_ON,
4077 constants.OOB_POWER_OFF,
4078 constants.OOB_POWER_CYCLE,
4080 if result.payload is not None:
4081 errs.append("%s is expected to not return payload but got '%s'" %
4082 (self.op.command, result.payload))
4085 raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
4086 utils.CommaJoin(errs))
4089 class _OsQuery(_QueryBase):
4090 FIELDS = query.OS_FIELDS
4092 def ExpandNames(self, lu):
4093 # Lock all nodes in shared mode
4094 # Temporary removal of locks, should be reverted later
4095 # TODO: reintroduce locks when they are lighter-weight
4096 lu.needed_locks = {}
4097 #self.share_locks[locking.LEVEL_NODE] = 1
4098 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4100 # The following variables interact with _QueryBase._GetNames
4102 self.wanted = self.names
4104 self.wanted = locking.ALL_SET
4106 self.do_locking = self.use_locking
4108 def DeclareLocks(self, lu, level):
4112 def _DiagnoseByOS(rlist):
4113 """Remaps a per-node return list into an a per-os per-node dictionary
4115 @param rlist: a map with node names as keys and OS objects as values
4118 @return: a dictionary with osnames as keys and as value another
4119 map, with nodes as keys and tuples of (path, status, diagnose,
4120 variants, parameters, api_versions) as values, eg::
4122 {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
4123 (/srv/..., False, "invalid api")],
4124 "node2": [(/srv/..., True, "", [], [])]}
4129 # we build here the list of nodes that didn't fail the RPC (at RPC
4130 # level), so that nodes with a non-responding node daemon don't
4131 # make all OSes invalid
4132 good_nodes = [node_name for node_name in rlist
4133 if not rlist[node_name].fail_msg]
4134 for node_name, nr in rlist.items():
4135 if nr.fail_msg or not nr.payload:
4137 for (name, path, status, diagnose, variants,
4138 params, api_versions) in nr.payload:
4139 if name not in all_os:
4140 # build a list of nodes for this os containing empty lists
4141 # for each node in node_list
4143 for nname in good_nodes:
4144 all_os[name][nname] = []
4145 # convert params from [name, help] to (name, help)
4146 params = [tuple(v) for v in params]
4147 all_os[name][node_name].append((path, status, diagnose,
4148 variants, params, api_versions))
4151 def _GetQueryData(self, lu):
4152 """Computes the list of nodes and their attributes.
4155 # Locking is not used
4156 assert not (compat.any(lu.glm.is_owned(level)
4157 for level in locking.LEVELS
4158 if level != locking.LEVEL_CLUSTER) or
4159 self.do_locking or self.use_locking)
4161 valid_nodes = [node.name
4162 for node in lu.cfg.GetAllNodesInfo().values()
4163 if not node.offline and node.vm_capable]
4164 pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes))
4165 cluster = lu.cfg.GetClusterInfo()
4169 for (os_name, os_data) in pol.items():
4170 info = query.OsInfo(name=os_name, valid=True, node_status=os_data,
4171 hidden=(os_name in cluster.hidden_os),
4172 blacklisted=(os_name in cluster.blacklisted_os))
4176 api_versions = set()
4178 for idx, osl in enumerate(os_data.values()):
4179 info.valid = bool(info.valid and osl and osl[0][1])
4183 (node_variants, node_params, node_api) = osl[0][3:6]
4186 variants.update(node_variants)
4187 parameters.update(node_params)
4188 api_versions.update(node_api)
4190 # Filter out inconsistent values
4191 variants.intersection_update(node_variants)
4192 parameters.intersection_update(node_params)
4193 api_versions.intersection_update(node_api)
4195 info.variants = list(variants)
4196 info.parameters = list(parameters)
4197 info.api_versions = list(api_versions)
4199 data[os_name] = info
4201 # Prepare data in requested order
4202 return [data[name] for name in self._GetNames(lu, pol.keys(), None)
4206 class LUOsDiagnose(NoHooksLU):
4207 """Logical unit for OS diagnose/query.
4213 def _BuildFilter(fields, names):
4214 """Builds a filter for querying OSes.
4217 name_filter = qlang.MakeSimpleFilter("name", names)
4219 # Legacy behaviour: Hide hidden, blacklisted or invalid OSes if the
4220 # respective field is not requested
4221 status_filter = [[qlang.OP_NOT, [qlang.OP_TRUE, fname]]
4222 for fname in ["hidden", "blacklisted"]
4223 if fname not in fields]
4224 if "valid" not in fields:
4225 status_filter.append([qlang.OP_TRUE, "valid"])
4228 status_filter.insert(0, qlang.OP_AND)
4230 status_filter = None
4232 if name_filter and status_filter:
4233 return [qlang.OP_AND, name_filter, status_filter]
4237 return status_filter
4239 def CheckArguments(self):
4240 self.oq = _OsQuery(self._BuildFilter(self.op.output_fields, self.op.names),
4241 self.op.output_fields, False)
4243 def ExpandNames(self):
4244 self.oq.ExpandNames(self)
4246 def Exec(self, feedback_fn):
4247 return self.oq.OldStyleQuery(self)
4250 class LUNodeRemove(LogicalUnit):
4251 """Logical unit for removing a node.
4254 HPATH = "node-remove"
4255 HTYPE = constants.HTYPE_NODE
4257 def BuildHooksEnv(self):
4260 This doesn't run on the target node in the pre phase as a failed
4261 node would then be impossible to remove.
4265 "OP_TARGET": self.op.node_name,
4266 "NODE_NAME": self.op.node_name,
4269 def BuildHooksNodes(self):
4270 """Build hooks nodes.
4273 all_nodes = self.cfg.GetNodeList()
4275 all_nodes.remove(self.op.node_name)
4277 logging.warning("Node '%s', which is about to be removed, was not found"
4278 " in the list of all nodes", self.op.node_name)
4279 return (all_nodes, all_nodes)
4281 def CheckPrereq(self):
4282 """Check prerequisites.
4285 - the node exists in the configuration
4286 - it does not have primary or secondary instances
4287 - it's not the master
4289 Any errors are signaled by raising errors.OpPrereqError.
4292 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4293 node = self.cfg.GetNodeInfo(self.op.node_name)
4294 assert node is not None
4296 masternode = self.cfg.GetMasterNode()
4297 if node.name == masternode:
4298 raise errors.OpPrereqError("Node is the master node, failover to another"
4299 " node is required", errors.ECODE_INVAL)
4301 for instance_name, instance in self.cfg.GetAllInstancesInfo():
4302 if node.name in instance.all_nodes:
4303 raise errors.OpPrereqError("Instance %s is still running on the node,"
4304 " please remove first" % instance_name,
4306 self.op.node_name = node.name
4309 def Exec(self, feedback_fn):
4310 """Removes the node from the cluster.
4314 logging.info("Stopping the node daemon and removing configs from node %s",
4317 modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
4319 # Promote nodes to master candidate as needed
4320 _AdjustCandidatePool(self, exceptions=[node.name])
4321 self.context.RemoveNode(node.name)
4323 # Run post hooks on the node before it's removed
4324 _RunPostHook(self, node.name)
4326 result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
4327 msg = result.fail_msg
4329 self.LogWarning("Errors encountered on the remote node while leaving"
4330 " the cluster: %s", msg)
4332 # Remove node from our /etc/hosts
4333 if self.cfg.GetClusterInfo().modify_etc_hosts:
4334 master_node = self.cfg.GetMasterNode()
4335 result = self.rpc.call_etc_hosts_modify(master_node,
4336 constants.ETC_HOSTS_REMOVE,
4338 result.Raise("Can't update hosts file with new host data")
4339 _RedistributeAncillaryFiles(self)
4342 class _NodeQuery(_QueryBase):
4343 FIELDS = query.NODE_FIELDS
4345 def ExpandNames(self, lu):
4346 lu.needed_locks = {}
4347 lu.share_locks = _ShareAll()
4350 self.wanted = _GetWantedNodes(lu, self.names)
4352 self.wanted = locking.ALL_SET
4354 self.do_locking = (self.use_locking and
4355 query.NQ_LIVE in self.requested_data)
4358 # If any non-static field is requested we need to lock the nodes
4359 lu.needed_locks[locking.LEVEL_NODE] = self.wanted
4361 def DeclareLocks(self, lu, level):
4364 def _GetQueryData(self, lu):
4365 """Computes the list of nodes and their attributes.
4368 all_info = lu.cfg.GetAllNodesInfo()
4370 nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
4372 # Gather data as requested
4373 if query.NQ_LIVE in self.requested_data:
4374 # filter out non-vm_capable nodes
4375 toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
4377 node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
4378 lu.cfg.GetHypervisorType())
4379 live_data = dict((name, nresult.payload)
4380 for (name, nresult) in node_data.items()
4381 if not nresult.fail_msg and nresult.payload)
4385 if query.NQ_INST in self.requested_data:
4386 node_to_primary = dict([(name, set()) for name in nodenames])
4387 node_to_secondary = dict([(name, set()) for name in nodenames])
4389 inst_data = lu.cfg.GetAllInstancesInfo()
4391 for inst in inst_data.values():
4392 if inst.primary_node in node_to_primary:
4393 node_to_primary[inst.primary_node].add(inst.name)
4394 for secnode in inst.secondary_nodes:
4395 if secnode in node_to_secondary:
4396 node_to_secondary[secnode].add(inst.name)
4398 node_to_primary = None
4399 node_to_secondary = None
4401 if query.NQ_OOB in self.requested_data:
4402 oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
4403 for name, node in all_info.iteritems())
4407 if query.NQ_GROUP in self.requested_data:
4408 groups = lu.cfg.GetAllNodeGroupsInfo()
4412 return query.NodeQueryData([all_info[name] for name in nodenames],
4413 live_data, lu.cfg.GetMasterNode(),
4414 node_to_primary, node_to_secondary, groups,
4415 oob_support, lu.cfg.GetClusterInfo())
4418 class LUNodeQuery(NoHooksLU):
4419 """Logical unit for querying nodes.
4422 # pylint: disable=W0142
4425 def CheckArguments(self):
4426 self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
4427 self.op.output_fields, self.op.use_locking)
4429 def ExpandNames(self):
4430 self.nq.ExpandNames(self)
4432 def Exec(self, feedback_fn):
4433 return self.nq.OldStyleQuery(self)
4436 class LUNodeQueryvols(NoHooksLU):
4437 """Logical unit for getting volumes on node(s).
4441 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
4442 _FIELDS_STATIC = utils.FieldSet("node")
4444 def CheckArguments(self):
4445 _CheckOutputFields(static=self._FIELDS_STATIC,
4446 dynamic=self._FIELDS_DYNAMIC,
4447 selected=self.op.output_fields)
4449 def ExpandNames(self):
4450 self.needed_locks = {}
4451 self.share_locks[locking.LEVEL_NODE] = 1
4452 if not self.op.nodes:
4453 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4455 self.needed_locks[locking.LEVEL_NODE] = \
4456 _GetWantedNodes(self, self.op.nodes)
4458 def Exec(self, feedback_fn):
4459 """Computes the list of nodes and their attributes.
4462 nodenames = self.owned_locks(locking.LEVEL_NODE)
4463 volumes = self.rpc.call_node_volumes(nodenames)
4465 ilist = self.cfg.GetAllInstancesInfo()
4466 vol2inst = _MapInstanceDisksToNodes(ilist.values())
4469 for node in nodenames:
4470 nresult = volumes[node]
4473 msg = nresult.fail_msg
4475 self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
4478 node_vols = sorted(nresult.payload,
4479 key=operator.itemgetter("dev"))
4481 for vol in node_vols:
4483 for field in self.op.output_fields:
4486 elif field == "phys":
4490 elif field == "name":
4492 elif field == "size":
4493 val = int(float(vol["size"]))
4494 elif field == "instance":
4495 val = vol2inst.get((node, vol["vg"] + "/" + vol["name"]), "-")
4497 raise errors.ParameterError(field)
4498 node_output.append(str(val))
4500 output.append(node_output)
4505 class LUNodeQueryStorage(NoHooksLU):
4506 """Logical unit for getting information on storage units on node(s).
4509 _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
4512 def CheckArguments(self):
4513 _CheckOutputFields(static=self._FIELDS_STATIC,
4514 dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
4515 selected=self.op.output_fields)
4517 def ExpandNames(self):
4518 self.needed_locks = {}
4519 self.share_locks[locking.LEVEL_NODE] = 1
4522 self.needed_locks[locking.LEVEL_NODE] = \
4523 _GetWantedNodes(self, self.op.nodes)
4525 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4527 def Exec(self, feedback_fn):
4528 """Computes the list of nodes and their attributes.
4531 self.nodes = self.owned_locks(locking.LEVEL_NODE)
4533 # Always get name to sort by
4534 if constants.SF_NAME in self.op.output_fields:
4535 fields = self.op.output_fields[:]
4537 fields = [constants.SF_NAME] + self.op.output_fields
4539 # Never ask for node or type as it's only known to the LU
4540 for extra in [constants.SF_NODE, constants.SF_TYPE]:
4541 while extra in fields:
4542 fields.remove(extra)
4544 field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
4545 name_idx = field_idx[constants.SF_NAME]
4547 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4548 data = self.rpc.call_storage_list(self.nodes,
4549 self.op.storage_type, st_args,
4550 self.op.name, fields)
4554 for node in utils.NiceSort(self.nodes):
4555 nresult = data[node]
4559 msg = nresult.fail_msg
4561 self.LogWarning("Can't get storage data from node %s: %s", node, msg)
4564 rows = dict([(row[name_idx], row) for row in nresult.payload])
4566 for name in utils.NiceSort(rows.keys()):
4571 for field in self.op.output_fields:
4572 if field == constants.SF_NODE:
4574 elif field == constants.SF_TYPE:
4575 val = self.op.storage_type
4576 elif field in field_idx:
4577 val = row[field_idx[field]]
4579 raise errors.ParameterError(field)
4588 class _InstanceQuery(_QueryBase):
4589 FIELDS = query.INSTANCE_FIELDS
4591 def ExpandNames(self, lu):
4592 lu.needed_locks = {}
4593 lu.share_locks = _ShareAll()
4596 self.wanted = _GetWantedInstances(lu, self.names)
4598 self.wanted = locking.ALL_SET
4600 self.do_locking = (self.use_locking and
4601 query.IQ_LIVE in self.requested_data)
4603 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
4604 lu.needed_locks[locking.LEVEL_NODEGROUP] = []
4605 lu.needed_locks[locking.LEVEL_NODE] = []
4606 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4608 self.do_grouplocks = (self.do_locking and
4609 query.IQ_NODES in self.requested_data)
4611 def DeclareLocks(self, lu, level):
4613 if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
4614 assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
4616 # Lock all groups used by instances optimistically; this requires going
4617 # via the node before it's locked, requiring verification later on
4618 lu.needed_locks[locking.LEVEL_NODEGROUP] = \
4620 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
4621 for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
4622 elif level == locking.LEVEL_NODE:
4623 lu._LockInstancesNodes() # pylint: disable=W0212
4626 def _CheckGroupLocks(lu):
4627 owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
4628 owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
4630 # Check if node groups for locked instances are still correct
4631 for instance_name in owned_instances:
4632 _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
4634 def _GetQueryData(self, lu):
4635 """Computes the list of instances and their attributes.
4638 if self.do_grouplocks:
4639 self._CheckGroupLocks(lu)
4641 cluster = lu.cfg.GetClusterInfo()
4642 all_info = lu.cfg.GetAllInstancesInfo()
4644 instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
4646 instance_list = [all_info[name] for name in instance_names]
4647 nodes = frozenset(itertools.chain(*(inst.all_nodes
4648 for inst in instance_list)))
4649 hv_list = list(set([inst.hypervisor for inst in instance_list]))
4652 wrongnode_inst = set()
4654 # Gather data as requested
4655 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
4657 node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
4659 result = node_data[name]
4661 # offline nodes will be in both lists
4662 assert result.fail_msg
4663 offline_nodes.append(name)
4665 bad_nodes.append(name)
4666 elif result.payload:
4667 for inst in result.payload:
4668 if inst in all_info:
4669 if all_info[inst].primary_node == name:
4670 live_data.update(result.payload)
4672 wrongnode_inst.add(inst)
4674 # orphan instance; we don't list it here as we don't
4675 # handle this case yet in the output of instance listing
4676 logging.warning("Orphan instance '%s' found on node %s",
4678 # else no instance is alive
4682 if query.IQ_DISKUSAGE in self.requested_data:
4683 disk_usage = dict((inst.name,
4684 _ComputeDiskSize(inst.disk_template,
4685 [{constants.IDISK_SIZE: disk.size}
4686 for disk in inst.disks]))
4687 for inst in instance_list)
4691 if query.IQ_CONSOLE in self.requested_data:
4693 for inst in instance_list:
4694 if inst.name in live_data:
4695 # Instance is running
4696 consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
4698 consinfo[inst.name] = None
4699 assert set(consinfo.keys()) == set(instance_names)
4703 if query.IQ_NODES in self.requested_data:
4704 node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
4706 nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
4707 groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
4708 for uuid in set(map(operator.attrgetter("group"),
4714 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
4715 disk_usage, offline_nodes, bad_nodes,
4716 live_data, wrongnode_inst, consinfo,
4720 class LUQuery(NoHooksLU):
4721 """Query for resources/items of a certain kind.
4724 # pylint: disable=W0142
4727 def CheckArguments(self):
4728 qcls = _GetQueryImplementation(self.op.what)
4730 self.impl = qcls(self.op.filter, self.op.fields, self.op.use_locking)
4732 def ExpandNames(self):
4733 self.impl.ExpandNames(self)
4735 def DeclareLocks(self, level):
4736 self.impl.DeclareLocks(self, level)
4738 def Exec(self, feedback_fn):
4739 return self.impl.NewStyleQuery(self)
4742 class LUQueryFields(NoHooksLU):
4743 """Query for resources/items of a certain kind.
4746 # pylint: disable=W0142
4749 def CheckArguments(self):
4750 self.qcls = _GetQueryImplementation(self.op.what)
4752 def ExpandNames(self):
4753 self.needed_locks = {}
4755 def Exec(self, feedback_fn):
4756 return query.QueryFields(self.qcls.FIELDS, self.op.fields)
4759 class LUNodeModifyStorage(NoHooksLU):
4760 """Logical unit for modifying a storage volume on a node.
4765 def CheckArguments(self):
4766 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4768 storage_type = self.op.storage_type
4771 modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
4773 raise errors.OpPrereqError("Storage units of type '%s' can not be"
4774 " modified" % storage_type,
4777 diff = set(self.op.changes.keys()) - modifiable
4779 raise errors.OpPrereqError("The following fields can not be modified for"
4780 " storage units of type '%s': %r" %
4781 (storage_type, list(diff)),
4784 def ExpandNames(self):
4785 self.needed_locks = {
4786 locking.LEVEL_NODE: self.op.node_name,
4789 def Exec(self, feedback_fn):
4790 """Computes the list of nodes and their attributes.
4793 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4794 result = self.rpc.call_storage_modify(self.op.node_name,
4795 self.op.storage_type, st_args,
4796 self.op.name, self.op.changes)
4797 result.Raise("Failed to modify storage unit '%s' on %s" %
4798 (self.op.name, self.op.node_name))
4801 class LUNodeAdd(LogicalUnit):
4802 """Logical unit for adding node to the cluster.
4806 HTYPE = constants.HTYPE_NODE
4807 _NFLAGS = ["master_capable", "vm_capable"]
4809 def CheckArguments(self):
4810 self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
4811 # validate/normalize the node name
4812 self.hostname = netutils.GetHostname(name=self.op.node_name,
4813 family=self.primary_ip_family)
4814 self.op.node_name = self.hostname.name
4816 if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
4817 raise errors.OpPrereqError("Cannot readd the master node",
4820 if self.op.readd and self.op.group:
4821 raise errors.OpPrereqError("Cannot pass a node group when a node is"
4822 " being readded", errors.ECODE_INVAL)
4824 def BuildHooksEnv(self):
4827 This will run on all nodes before, and on all nodes + the new node after.
4831 "OP_TARGET": self.op.node_name,
4832 "NODE_NAME": self.op.node_name,
4833 "NODE_PIP": self.op.primary_ip,
4834 "NODE_SIP": self.op.secondary_ip,
4835 "MASTER_CAPABLE": str(self.op.master_capable),
4836 "VM_CAPABLE": str(self.op.vm_capable),
4839 def BuildHooksNodes(self):
4840 """Build hooks nodes.
4843 # Exclude added node
4844 pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name]))
4845 post_nodes = pre_nodes + [self.op.node_name, ]
4847 return (pre_nodes, post_nodes)
4849 def CheckPrereq(self):
4850 """Check prerequisites.
4853 - the new node is not already in the config
4855 - its parameters (single/dual homed) matches the cluster
4857 Any errors are signaled by raising errors.OpPrereqError.
4861 hostname = self.hostname
4862 node = hostname.name
4863 primary_ip = self.op.primary_ip = hostname.ip
4864 if self.op.secondary_ip is None:
4865 if self.primary_ip_family == netutils.IP6Address.family:
4866 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
4867 " IPv4 address must be given as secondary",
4869 self.op.secondary_ip = primary_ip
4871 secondary_ip = self.op.secondary_ip
4872 if not netutils.IP4Address.IsValid(secondary_ip):
4873 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4874 " address" % secondary_ip, errors.ECODE_INVAL)
4876 node_list = cfg.GetNodeList()
4877 if not self.op.readd and node in node_list:
4878 raise errors.OpPrereqError("Node %s is already in the configuration" %
4879 node, errors.ECODE_EXISTS)
4880 elif self.op.readd and node not in node_list:
4881 raise errors.OpPrereqError("Node %s is not in the configuration" % node,
4884 self.changed_primary_ip = False
4886 for existing_node_name, existing_node in cfg.GetMultiNodeInfo(node_list):
4887 if self.op.readd and node == existing_node_name:
4888 if existing_node.secondary_ip != secondary_ip:
4889 raise errors.OpPrereqError("Readded node doesn't have the same IP"
4890 " address configuration as before",
4892 if existing_node.primary_ip != primary_ip:
4893 self.changed_primary_ip = True
4897 if (existing_node.primary_ip == primary_ip or
4898 existing_node.secondary_ip == primary_ip or
4899 existing_node.primary_ip == secondary_ip or
4900 existing_node.secondary_ip == secondary_ip):
4901 raise errors.OpPrereqError("New node ip address(es) conflict with"
4902 " existing node %s" % existing_node.name,
4903 errors.ECODE_NOTUNIQUE)
4905 # After this 'if' block, None is no longer a valid value for the
4906 # _capable op attributes
4908 old_node = self.cfg.GetNodeInfo(node)
4909 assert old_node is not None, "Can't retrieve locked node %s" % node
4910 for attr in self._NFLAGS:
4911 if getattr(self.op, attr) is None:
4912 setattr(self.op, attr, getattr(old_node, attr))
4914 for attr in self._NFLAGS:
4915 if getattr(self.op, attr) is None:
4916 setattr(self.op, attr, True)
4918 if self.op.readd and not self.op.vm_capable:
4919 pri, sec = cfg.GetNodeInstances(node)
4921 raise errors.OpPrereqError("Node %s being re-added with vm_capable"
4922 " flag set to false, but it already holds"
4923 " instances" % node,
4926 # check that the type of the node (single versus dual homed) is the
4927 # same as for the master
4928 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
4929 master_singlehomed = myself.secondary_ip == myself.primary_ip
4930 newbie_singlehomed = secondary_ip == primary_ip
4931 if master_singlehomed != newbie_singlehomed:
4932 if master_singlehomed:
4933 raise errors.OpPrereqError("The master has no secondary ip but the"
4934 " new node has one",
4937 raise errors.OpPrereqError("The master has a secondary ip but the"
4938 " new node doesn't have one",
4941 # checks reachability
4942 if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
4943 raise errors.OpPrereqError("Node not reachable by ping",
4944 errors.ECODE_ENVIRON)
4946 if not newbie_singlehomed:
4947 # check reachability from my secondary ip to newbie's secondary ip
4948 if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
4949 source=myself.secondary_ip):
4950 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4951 " based ping to node daemon port",
4952 errors.ECODE_ENVIRON)
4959 if self.op.master_capable:
4960 self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
4962 self.master_candidate = False
4965 self.new_node = old_node
4967 node_group = cfg.LookupNodeGroup(self.op.group)
4968 self.new_node = objects.Node(name=node,
4969 primary_ip=primary_ip,
4970 secondary_ip=secondary_ip,
4971 master_candidate=self.master_candidate,
4972 offline=False, drained=False,
4975 if self.op.ndparams:
4976 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
4978 def Exec(self, feedback_fn):
4979 """Adds the new node to the cluster.
4982 new_node = self.new_node
4983 node = new_node.name
4985 # We adding a new node so we assume it's powered
4986 new_node.powered = True
4988 # for re-adds, reset the offline/drained/master-candidate flags;
4989 # we need to reset here, otherwise offline would prevent RPC calls
4990 # later in the procedure; this also means that if the re-add
4991 # fails, we are left with a non-offlined, broken node
4993 new_node.drained = new_node.offline = False # pylint: disable=W0201
4994 self.LogInfo("Readding a node, the offline/drained flags were reset")
4995 # if we demote the node, we do cleanup later in the procedure
4996 new_node.master_candidate = self.master_candidate
4997 if self.changed_primary_ip:
4998 new_node.primary_ip = self.op.primary_ip
5000 # copy the master/vm_capable flags
5001 for attr in self._NFLAGS:
5002 setattr(new_node, attr, getattr(self.op, attr))
5004 # notify the user about any possible mc promotion
5005 if new_node.master_candidate:
5006 self.LogInfo("Node will be a master candidate")
5008 if self.op.ndparams:
5009 new_node.ndparams = self.op.ndparams
5011 new_node.ndparams = {}
5013 # check connectivity
5014 result = self.rpc.call_version([node])[node]
5015 result.Raise("Can't get version information from node %s" % node)
5016 if constants.PROTOCOL_VERSION == result.payload:
5017 logging.info("Communication to node %s fine, sw version %s match",
5018 node, result.payload)
5020 raise errors.OpExecError("Version mismatch master version %s,"
5021 " node version %s" %
5022 (constants.PROTOCOL_VERSION, result.payload))
5024 # Add node to our /etc/hosts, and add key to known_hosts
5025 if self.cfg.GetClusterInfo().modify_etc_hosts:
5026 master_node = self.cfg.GetMasterNode()
5027 result = self.rpc.call_etc_hosts_modify(master_node,
5028 constants.ETC_HOSTS_ADD,
5031 result.Raise("Can't update hosts file with new host data")
5033 if new_node.secondary_ip != new_node.primary_ip:
5034 _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
5037 node_verify_list = [self.cfg.GetMasterNode()]
5038 node_verify_param = {
5039 constants.NV_NODELIST: [node],
5040 # TODO: do a node-net-test as well?
5043 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
5044 self.cfg.GetClusterName())
5045 for verifier in node_verify_list:
5046 result[verifier].Raise("Cannot communicate with node %s" % verifier)
5047 nl_payload = result[verifier].payload[constants.NV_NODELIST]
5049 for failed in nl_payload:
5050 feedback_fn("ssh/hostname verification failed"
5051 " (checking from %s): %s" %
5052 (verifier, nl_payload[failed]))
5053 raise errors.OpExecError("ssh/hostname verification failed")
5056 _RedistributeAncillaryFiles(self)
5057 self.context.ReaddNode(new_node)
5058 # make sure we redistribute the config
5059 self.cfg.Update(new_node, feedback_fn)
5060 # and make sure the new node will not have old files around
5061 if not new_node.master_candidate:
5062 result = self.rpc.call_node_demote_from_mc(new_node.name)
5063 msg = result.fail_msg
5065 self.LogWarning("Node failed to demote itself from master"
5066 " candidate status: %s" % msg)
5068 _RedistributeAncillaryFiles(self, additional_nodes=[node],
5069 additional_vm=self.op.vm_capable)
5070 self.context.AddNode(new_node, self.proc.GetECId())
5073 class LUNodeSetParams(LogicalUnit):
5074 """Modifies the parameters of a node.
5076 @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
5077 to the node role (as _ROLE_*)
5078 @cvar _R2F: a dictionary from node role to tuples of flags
5079 @cvar _FLAGS: a list of attribute names corresponding to the flags
5082 HPATH = "node-modify"
5083 HTYPE = constants.HTYPE_NODE
5085 (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
5087 (True, False, False): _ROLE_CANDIDATE,
5088 (False, True, False): _ROLE_DRAINED,
5089 (False, False, True): _ROLE_OFFLINE,
5090 (False, False, False): _ROLE_REGULAR,
5092 _R2F = dict((v, k) for k, v in _F2R.items())
5093 _FLAGS = ["master_candidate", "drained", "offline"]
5095 def CheckArguments(self):
5096 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5097 all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
5098 self.op.master_capable, self.op.vm_capable,
5099 self.op.secondary_ip, self.op.ndparams]
5100 if all_mods.count(None) == len(all_mods):
5101 raise errors.OpPrereqError("Please pass at least one modification",
5103 if all_mods.count(True) > 1:
5104 raise errors.OpPrereqError("Can't set the node into more than one"
5105 " state at the same time",
5108 # Boolean value that tells us whether we might be demoting from MC
5109 self.might_demote = (self.op.master_candidate == False or
5110 self.op.offline == True or
5111 self.op.drained == True or
5112 self.op.master_capable == False)
5114 if self.op.secondary_ip:
5115 if not netutils.IP4Address.IsValid(self.op.secondary_ip):
5116 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
5117 " address" % self.op.secondary_ip,
5120 self.lock_all = self.op.auto_promote and self.might_demote
5121 self.lock_instances = self.op.secondary_ip is not None
5123 def ExpandNames(self):
5125 self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
5127 self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
5129 if self.lock_instances:
5130 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5132 def DeclareLocks(self, level):
5133 # If we have locked all instances, before waiting to lock nodes, release
5134 # all the ones living on nodes unrelated to the current operation.
5135 if level == locking.LEVEL_NODE and self.lock_instances:
5136 self.affected_instances = []
5137 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
5140 # Build list of instances to release
5141 locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
5142 for instance_name, instance in self.cfg.GetMultiInstanceInfo(locked_i):
5143 if (instance.disk_template in constants.DTS_INT_MIRROR and
5144 self.op.node_name in instance.all_nodes):
5145 instances_keep.append(instance_name)
5146 self.affected_instances.append(instance)
5148 _ReleaseLocks(self, locking.LEVEL_INSTANCE, keep=instances_keep)
5150 assert (set(self.owned_locks(locking.LEVEL_INSTANCE)) ==
5151 set(instances_keep))
5153 def BuildHooksEnv(self):
5156 This runs on the master node.
5160 "OP_TARGET": self.op.node_name,
5161 "MASTER_CANDIDATE": str(self.op.master_candidate),
5162 "OFFLINE": str(self.op.offline),
5163 "DRAINED": str(self.op.drained),
5164 "MASTER_CAPABLE": str(self.op.master_capable),
5165 "VM_CAPABLE": str(self.op.vm_capable),
5168 def BuildHooksNodes(self):
5169 """Build hooks nodes.
5172 nl = [self.cfg.GetMasterNode(), self.op.node_name]
5175 def CheckPrereq(self):
5176 """Check prerequisites.
5178 This only checks the instance list against the existing names.
5181 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
5183 if (self.op.master_candidate is not None or
5184 self.op.drained is not None or
5185 self.op.offline is not None):
5186 # we can't change the master's node flags
5187 if self.op.node_name == self.cfg.GetMasterNode():
5188 raise errors.OpPrereqError("The master role can be changed"
5189 " only via master-failover",
5192 if self.op.master_candidate and not node.master_capable:
5193 raise errors.OpPrereqError("Node %s is not master capable, cannot make"
5194 " it a master candidate" % node.name,
5197 if self.op.vm_capable == False:
5198 (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
5200 raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
5201 " the vm_capable flag" % node.name,
5204 if node.master_candidate and self.might_demote and not self.lock_all:
5205 assert not self.op.auto_promote, "auto_promote set but lock_all not"
5206 # check if after removing the current node, we're missing master
5208 (mc_remaining, mc_should, _) = \
5209 self.cfg.GetMasterCandidateStats(exceptions=[node.name])
5210 if mc_remaining < mc_should:
5211 raise errors.OpPrereqError("Not enough master candidates, please"
5212 " pass auto promote option to allow"
5213 " promotion", errors.ECODE_STATE)
5215 self.old_flags = old_flags = (node.master_candidate,
5216 node.drained, node.offline)
5217 assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
5218 self.old_role = old_role = self._F2R[old_flags]
5220 # Check for ineffective changes
5221 for attr in self._FLAGS:
5222 if (getattr(self.op, attr) == False and getattr(node, attr) == False):
5223 self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
5224 setattr(self.op, attr, None)
5226 # Past this point, any flag change to False means a transition
5227 # away from the respective state, as only real changes are kept
5229 # TODO: We might query the real power state if it supports OOB
5230 if _SupportsOob(self.cfg, node):
5231 if self.op.offline is False and not (node.powered or
5232 self.op.powered == True):
5233 raise errors.OpPrereqError(("Node %s needs to be turned on before its"
5234 " offline status can be reset") %
5236 elif self.op.powered is not None:
5237 raise errors.OpPrereqError(("Unable to change powered state for node %s"
5238 " as it does not support out-of-band"
5239 " handling") % self.op.node_name)
5241 # If we're being deofflined/drained, we'll MC ourself if needed
5242 if (self.op.drained == False or self.op.offline == False or
5243 (self.op.master_capable and not node.master_capable)):
5244 if _DecideSelfPromotion(self):
5245 self.op.master_candidate = True
5246 self.LogInfo("Auto-promoting node to master candidate")
5248 # If we're no longer master capable, we'll demote ourselves from MC
5249 if self.op.master_capable == False and node.master_candidate:
5250 self.LogInfo("Demoting from master candidate")
5251 self.op.master_candidate = False
5254 assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
5255 if self.op.master_candidate:
5256 new_role = self._ROLE_CANDIDATE
5257 elif self.op.drained:
5258 new_role = self._ROLE_DRAINED
5259 elif self.op.offline:
5260 new_role = self._ROLE_OFFLINE
5261 elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
5262 # False is still in new flags, which means we're un-setting (the
5264 new_role = self._ROLE_REGULAR
5265 else: # no new flags, nothing, keep old role
5268 self.new_role = new_role
5270 if old_role == self._ROLE_OFFLINE and new_role != old_role:
5271 # Trying to transition out of offline status
5272 result = self.rpc.call_version([node.name])[node.name]
5274 raise errors.OpPrereqError("Node %s is being de-offlined but fails"
5275 " to report its version: %s" %
5276 (node.name, result.fail_msg),
5279 self.LogWarning("Transitioning node from offline to online state"
5280 " without using re-add. Please make sure the node"
5283 if self.op.secondary_ip:
5284 # Ok even without locking, because this can't be changed by any LU
5285 master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
5286 master_singlehomed = master.secondary_ip == master.primary_ip
5287 if master_singlehomed and self.op.secondary_ip:
5288 raise errors.OpPrereqError("Cannot change the secondary ip on a single"
5289 " homed cluster", errors.ECODE_INVAL)
5292 if self.affected_instances:
5293 raise errors.OpPrereqError("Cannot change secondary ip: offline"
5294 " node has instances (%s) configured"
5295 " to use it" % self.affected_instances)
5297 # On online nodes, check that no instances are running, and that
5298 # the node has the new ip and we can reach it.
5299 for instance in self.affected_instances:
5300 _CheckInstanceDown(self, instance, "cannot change secondary ip")
5302 _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
5303 if master.name != node.name:
5304 # check reachability from master secondary ip to new secondary ip
5305 if not netutils.TcpPing(self.op.secondary_ip,
5306 constants.DEFAULT_NODED_PORT,
5307 source=master.secondary_ip):
5308 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
5309 " based ping to node daemon port",
5310 errors.ECODE_ENVIRON)
5312 if self.op.ndparams:
5313 new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
5314 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
5315 self.new_ndparams = new_ndparams
5317 def Exec(self, feedback_fn):
5322 old_role = self.old_role
5323 new_role = self.new_role
5327 if self.op.ndparams:
5328 node.ndparams = self.new_ndparams
5330 if self.op.powered is not None:
5331 node.powered = self.op.powered
5333 for attr in ["master_capable", "vm_capable"]:
5334 val = getattr(self.op, attr)
5336 setattr(node, attr, val)
5337 result.append((attr, str(val)))
5339 if new_role != old_role:
5340 # Tell the node to demote itself, if no longer MC and not offline
5341 if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
5342 msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
5344 self.LogWarning("Node failed to demote itself: %s", msg)
5346 new_flags = self._R2F[new_role]
5347 for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
5349 result.append((desc, str(nf)))
5350 (node.master_candidate, node.drained, node.offline) = new_flags
5352 # we locked all nodes, we adjust the CP before updating this node
5354 _AdjustCandidatePool(self, [node.name])
5356 if self.op.secondary_ip:
5357 node.secondary_ip = self.op.secondary_ip
5358 result.append(("secondary_ip", self.op.secondary_ip))
5360 # this will trigger configuration file update, if needed
5361 self.cfg.Update(node, feedback_fn)
5363 # this will trigger job queue propagation or cleanup if the mc
5365 if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
5366 self.context.ReaddNode(node)
5371 class LUNodePowercycle(NoHooksLU):
5372 """Powercycles a node.
5377 def CheckArguments(self):
5378 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5379 if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
5380 raise errors.OpPrereqError("The node is the master and the force"
5381 " parameter was not set",
5384 def ExpandNames(self):
5385 """Locking for PowercycleNode.
5387 This is a last-resort option and shouldn't block on other
5388 jobs. Therefore, we grab no locks.
5391 self.needed_locks = {}
5393 def Exec(self, feedback_fn):
5397 result = self.rpc.call_node_powercycle(self.op.node_name,
5398 self.cfg.GetHypervisorType())
5399 result.Raise("Failed to schedule the reboot")
5400 return result.payload
5403 class LUClusterQuery(NoHooksLU):
5404 """Query cluster configuration.
5409 def ExpandNames(self):
5410 self.needed_locks = {}
5412 def Exec(self, feedback_fn):
5413 """Return cluster config.
5416 cluster = self.cfg.GetClusterInfo()
5419 # Filter just for enabled hypervisors
5420 for os_name, hv_dict in cluster.os_hvp.items():
5421 os_hvp[os_name] = {}
5422 for hv_name, hv_params in hv_dict.items():
5423 if hv_name in cluster.enabled_hypervisors:
5424 os_hvp[os_name][hv_name] = hv_params
5426 # Convert ip_family to ip_version
5427 primary_ip_version = constants.IP4_VERSION
5428 if cluster.primary_ip_family == netutils.IP6Address.family:
5429 primary_ip_version = constants.IP6_VERSION
5432 "software_version": constants.RELEASE_VERSION,
5433 "protocol_version": constants.PROTOCOL_VERSION,
5434 "config_version": constants.CONFIG_VERSION,
5435 "os_api_version": max(constants.OS_API_VERSIONS),
5436 "export_version": constants.EXPORT_VERSION,
5437 "architecture": (platform.architecture()[0], platform.machine()),
5438 "name": cluster.cluster_name,
5439 "master": cluster.master_node,
5440 "default_hypervisor": cluster.enabled_hypervisors[0],
5441 "enabled_hypervisors": cluster.enabled_hypervisors,
5442 "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
5443 for hypervisor_name in cluster.enabled_hypervisors]),
5445 "beparams": cluster.beparams,
5446 "osparams": cluster.osparams,
5447 "nicparams": cluster.nicparams,
5448 "ndparams": cluster.ndparams,
5449 "candidate_pool_size": cluster.candidate_pool_size,
5450 "master_netdev": cluster.master_netdev,
5451 "volume_group_name": cluster.volume_group_name,
5452 "drbd_usermode_helper": cluster.drbd_usermode_helper,
5453 "file_storage_dir": cluster.file_storage_dir,
5454 "shared_file_storage_dir": cluster.shared_file_storage_dir,
5455 "maintain_node_health": cluster.maintain_node_health,
5456 "ctime": cluster.ctime,
5457 "mtime": cluster.mtime,
5458 "uuid": cluster.uuid,
5459 "tags": list(cluster.GetTags()),
5460 "uid_pool": cluster.uid_pool,
5461 "default_iallocator": cluster.default_iallocator,
5462 "reserved_lvs": cluster.reserved_lvs,
5463 "primary_ip_version": primary_ip_version,
5464 "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
5465 "hidden_os": cluster.hidden_os,
5466 "blacklisted_os": cluster.blacklisted_os,
5472 class LUClusterConfigQuery(NoHooksLU):
5473 """Return configuration values.
5477 _FIELDS_DYNAMIC = utils.FieldSet()
5478 _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
5479 "watcher_pause", "volume_group_name")
5481 def CheckArguments(self):
5482 _CheckOutputFields(static=self._FIELDS_STATIC,
5483 dynamic=self._FIELDS_DYNAMIC,
5484 selected=self.op.output_fields)
5486 def ExpandNames(self):
5487 self.needed_locks = {}
5489 def Exec(self, feedback_fn):
5490 """Dump a representation of the cluster config to the standard output.
5494 for field in self.op.output_fields:
5495 if field == "cluster_name":
5496 entry = self.cfg.GetClusterName()
5497 elif field == "master_node":
5498 entry = self.cfg.GetMasterNode()
5499 elif field == "drain_flag":
5500 entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
5501 elif field == "watcher_pause":
5502 entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
5503 elif field == "volume_group_name":
5504 entry = self.cfg.GetVGName()
5506 raise errors.ParameterError(field)
5507 values.append(entry)
5511 class LUInstanceActivateDisks(NoHooksLU):
5512 """Bring up an instance's disks.
5517 def ExpandNames(self):
5518 self._ExpandAndLockInstance()
5519 self.needed_locks[locking.LEVEL_NODE] = []
5520 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5522 def DeclareLocks(self, level):
5523 if level == locking.LEVEL_NODE:
5524 self._LockInstancesNodes()
5526 def CheckPrereq(self):
5527 """Check prerequisites.
5529 This checks that the instance is in the cluster.
5532 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5533 assert self.instance is not None, \
5534 "Cannot retrieve locked instance %s" % self.op.instance_name
5535 _CheckNodeOnline(self, self.instance.primary_node)
5537 def Exec(self, feedback_fn):
5538 """Activate the disks.
5541 disks_ok, disks_info = \
5542 _AssembleInstanceDisks(self, self.instance,
5543 ignore_size=self.op.ignore_size)
5545 raise errors.OpExecError("Cannot activate block devices")
5550 def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
5552 """Prepare the block devices for an instance.
5554 This sets up the block devices on all nodes.
5556 @type lu: L{LogicalUnit}
5557 @param lu: the logical unit on whose behalf we execute
5558 @type instance: L{objects.Instance}
5559 @param instance: the instance for whose disks we assemble
5560 @type disks: list of L{objects.Disk} or None
5561 @param disks: which disks to assemble (or all, if None)
5562 @type ignore_secondaries: boolean
5563 @param ignore_secondaries: if true, errors on secondary nodes
5564 won't result in an error return from the function
5565 @type ignore_size: boolean
5566 @param ignore_size: if true, the current known size of the disk
5567 will not be used during the disk activation, useful for cases
5568 when the size is wrong
5569 @return: False if the operation failed, otherwise a list of
5570 (host, instance_visible_name, node_visible_name)
5571 with the mapping from node devices to instance devices
5576 iname = instance.name
5577 disks = _ExpandCheckDisks(instance, disks)
5579 # With the two passes mechanism we try to reduce the window of
5580 # opportunity for the race condition of switching DRBD to primary
5581 # before handshaking occured, but we do not eliminate it
5583 # The proper fix would be to wait (with some limits) until the
5584 # connection has been made and drbd transitions from WFConnection
5585 # into any other network-connected state (Connected, SyncTarget,
5588 # 1st pass, assemble on all nodes in secondary mode
5589 for idx, inst_disk in enumerate(disks):
5590 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
5592 node_disk = node_disk.Copy()
5593 node_disk.UnsetSize()
5594 lu.cfg.SetDiskID(node_disk, node)
5595 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx)
5596 msg = result.fail_msg
5598 lu.proc.LogWarning("Could not prepare block device %s on node %s"
5599 " (is_primary=False, pass=1): %s",
5600 inst_disk.iv_name, node, msg)
5601 if not ignore_secondaries:
5604 # FIXME: race condition on drbd migration to primary
5606 # 2nd pass, do only the primary node
5607 for idx, inst_disk in enumerate(disks):
5610 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
5611 if node != instance.primary_node:
5614 node_disk = node_disk.Copy()
5615 node_disk.UnsetSize()
5616 lu.cfg.SetDiskID(node_disk, node)
5617 result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx)
5618 msg = result.fail_msg
5620 lu.proc.LogWarning("Could not prepare block device %s on node %s"
5621 " (is_primary=True, pass=2): %s",
5622 inst_disk.iv_name, node, msg)
5625 dev_path = result.payload
5627 device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
5629 # leave the disks configured for the primary node
5630 # this is a workaround that would be fixed better by
5631 # improving the logical/physical id handling
5633 lu.cfg.SetDiskID(disk, instance.primary_node)
5635 return disks_ok, device_info
5638 def _StartInstanceDisks(lu, instance, force):
5639 """Start the disks of an instance.
5642 disks_ok, _ = _AssembleInstanceDisks(lu, instance,
5643 ignore_secondaries=force)
5645 _ShutdownInstanceDisks(lu, instance)
5646 if force is not None and not force:
5647 lu.proc.LogWarning("", hint="If the message above refers to a"
5649 " you can retry the operation using '--force'.")
5650 raise errors.OpExecError("Disk consistency error")
5653 class LUInstanceDeactivateDisks(NoHooksLU):
5654 """Shutdown an instance's disks.
5659 def ExpandNames(self):
5660 self._ExpandAndLockInstance()
5661 self.needed_locks[locking.LEVEL_NODE] = []
5662 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5664 def DeclareLocks(self, level):
5665 if level == locking.LEVEL_NODE:
5666 self._LockInstancesNodes()
5668 def CheckPrereq(self):
5669 """Check prerequisites.
5671 This checks that the instance is in the cluster.
5674 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5675 assert self.instance is not None, \
5676 "Cannot retrieve locked instance %s" % self.op.instance_name
5678 def Exec(self, feedback_fn):
5679 """Deactivate the disks
5682 instance = self.instance
5684 _ShutdownInstanceDisks(self, instance)
5686 _SafeShutdownInstanceDisks(self, instance)
5689 def _SafeShutdownInstanceDisks(lu, instance, disks=None):
5690 """Shutdown block devices of an instance.
5692 This function checks if an instance is running, before calling
5693 _ShutdownInstanceDisks.
5696 _CheckInstanceDown(lu, instance, "cannot shutdown disks")
5697 _ShutdownInstanceDisks(lu, instance, disks=disks)
5700 def _ExpandCheckDisks(instance, disks):
5701 """Return the instance disks selected by the disks list
5703 @type disks: list of L{objects.Disk} or None
5704 @param disks: selected disks
5705 @rtype: list of L{objects.Disk}
5706 @return: selected instance disks to act on
5710 return instance.disks
5712 if not set(disks).issubset(instance.disks):
5713 raise errors.ProgrammerError("Can only act on disks belonging to the"
5718 def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
5719 """Shutdown block devices of an instance.
5721 This does the shutdown on all nodes of the instance.
5723 If the ignore_primary is false, errors on the primary node are
5728 disks = _ExpandCheckDisks(instance, disks)
5731 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
5732 lu.cfg.SetDiskID(top_disk, node)
5733 result = lu.rpc.call_blockdev_shutdown(node, top_disk)
5734 msg = result.fail_msg
5736 lu.LogWarning("Could not shutdown block device %s on node %s: %s",
5737 disk.iv_name, node, msg)
5738 if ((node == instance.primary_node and not ignore_primary) or
5739 (node != instance.primary_node and not result.offline)):
5744 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
5745 """Checks if a node has enough free memory.
5747 This function check if a given node has the needed amount of free
5748 memory. In case the node has less memory or we cannot get the
5749 information from the node, this function raise an OpPrereqError
5752 @type lu: C{LogicalUnit}
5753 @param lu: a logical unit from which we get configuration data
5755 @param node: the node to check
5756 @type reason: C{str}
5757 @param reason: string to use in the error message
5758 @type requested: C{int}
5759 @param requested: the amount of memory in MiB to check for
5760 @type hypervisor_name: C{str}
5761 @param hypervisor_name: the hypervisor to ask for memory stats
5762 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
5763 we cannot check the node
5766 nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
5767 nodeinfo[node].Raise("Can't get data from node %s" % node,
5768 prereq=True, ecode=errors.ECODE_ENVIRON)
5769 free_mem = nodeinfo[node].payload.get("memory_free", None)
5770 if not isinstance(free_mem, int):
5771 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
5772 " was '%s'" % (node, free_mem),
5773 errors.ECODE_ENVIRON)
5774 if requested > free_mem:
5775 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
5776 " needed %s MiB, available %s MiB" %
5777 (node, reason, requested, free_mem),
5781 def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
5782 """Checks if nodes have enough free disk space in the all VGs.
5784 This function check if all given nodes have the needed amount of
5785 free disk. In case any node has less disk or we cannot get the
5786 information from the node, this function raise an OpPrereqError
5789 @type lu: C{LogicalUnit}
5790 @param lu: a logical unit from which we get configuration data
5791 @type nodenames: C{list}
5792 @param nodenames: the list of node names to check
5793 @type req_sizes: C{dict}
5794 @param req_sizes: the hash of vg and corresponding amount of disk in
5796 @raise errors.OpPrereqError: if the node doesn't have enough disk,
5797 or we cannot check the node
5800 for vg, req_size in req_sizes.items():
5801 _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
5804 def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
5805 """Checks if nodes have enough free disk space in the specified VG.
5807 This function check if all given nodes have the needed amount of
5808 free disk. In case any node has less disk or we cannot get the
5809 information from the node, this function raise an OpPrereqError
5812 @type lu: C{LogicalUnit}
5813 @param lu: a logical unit from which we get configuration data
5814 @type nodenames: C{list}
5815 @param nodenames: the list of node names to check
5817 @param vg: the volume group to check
5818 @type requested: C{int}
5819 @param requested: the amount of disk in MiB to check for
5820 @raise errors.OpPrereqError: if the node doesn't have enough disk,
5821 or we cannot check the node
5824 nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
5825 for node in nodenames:
5826 info = nodeinfo[node]
5827 info.Raise("Cannot get current information from node %s" % node,
5828 prereq=True, ecode=errors.ECODE_ENVIRON)
5829 vg_free = info.payload.get("vg_free", None)
5830 if not isinstance(vg_free, int):
5831 raise errors.OpPrereqError("Can't compute free disk space on node"
5832 " %s for vg %s, result was '%s'" %
5833 (node, vg, vg_free), errors.ECODE_ENVIRON)
5834 if requested > vg_free:
5835 raise errors.OpPrereqError("Not enough disk space on target node %s"
5836 " vg %s: required %d MiB, available %d MiB" %
5837 (node, vg, requested, vg_free),
5841 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
5842 """Checks if nodes have enough physical CPUs
5844 This function checks if all given nodes have the needed number of
5845 physical CPUs. In case any node has less CPUs or we cannot get the
5846 information from the node, this function raises an OpPrereqError
5849 @type lu: C{LogicalUnit}
5850 @param lu: a logical unit from which we get configuration data
5851 @type nodenames: C{list}
5852 @param nodenames: the list of node names to check
5853 @type requested: C{int}
5854 @param requested: the minimum acceptable number of physical CPUs
5855 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
5856 or we cannot check the node
5859 nodeinfo = lu.rpc.call_node_info(nodenames, None, hypervisor_name)
5860 for node in nodenames:
5861 info = nodeinfo[node]
5862 info.Raise("Cannot get current information from node %s" % node,
5863 prereq=True, ecode=errors.ECODE_ENVIRON)
5864 num_cpus = info.payload.get("cpu_total", None)
5865 if not isinstance(num_cpus, int):
5866 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
5867 " on node %s, result was '%s'" %
5868 (node, num_cpus), errors.ECODE_ENVIRON)
5869 if requested > num_cpus:
5870 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
5871 "required" % (node, num_cpus, requested),
5875 class LUInstanceStartup(LogicalUnit):
5876 """Starts an instance.
5879 HPATH = "instance-start"
5880 HTYPE = constants.HTYPE_INSTANCE
5883 def CheckArguments(self):
5885 if self.op.beparams:
5886 # fill the beparams dict
5887 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5889 def ExpandNames(self):
5890 self._ExpandAndLockInstance()
5892 def BuildHooksEnv(self):
5895 This runs on master, primary and secondary nodes of the instance.
5899 "FORCE": self.op.force,
5902 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5906 def BuildHooksNodes(self):
5907 """Build hooks nodes.
5910 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5913 def CheckPrereq(self):
5914 """Check prerequisites.
5916 This checks that the instance is in the cluster.
5919 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5920 assert self.instance is not None, \
5921 "Cannot retrieve locked instance %s" % self.op.instance_name
5924 if self.op.hvparams:
5925 # check hypervisor parameter syntax (locally)
5926 cluster = self.cfg.GetClusterInfo()
5927 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5928 filled_hvp = cluster.FillHV(instance)
5929 filled_hvp.update(self.op.hvparams)
5930 hv_type = hypervisor.GetHypervisor(instance.hypervisor)
5931 hv_type.CheckParameterSyntax(filled_hvp)
5932 _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
5934 self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
5936 if self.primary_offline and self.op.ignore_offline_nodes:
5937 self.proc.LogWarning("Ignoring offline primary node")
5939 if self.op.hvparams or self.op.beparams:
5940 self.proc.LogWarning("Overridden parameters are ignored")
5942 _CheckNodeOnline(self, instance.primary_node)
5944 bep = self.cfg.GetClusterInfo().FillBE(instance)
5946 # check bridges existence
5947 _CheckInstanceBridgesExist(self, instance)
5949 remote_info = self.rpc.call_instance_info(instance.primary_node,
5951 instance.hypervisor)
5952 remote_info.Raise("Error checking node %s" % instance.primary_node,
5953 prereq=True, ecode=errors.ECODE_ENVIRON)
5954 if not remote_info.payload: # not running already
5955 _CheckNodeFreeMemory(self, instance.primary_node,
5956 "starting instance %s" % instance.name,
5957 bep[constants.BE_MEMORY], instance.hypervisor)
5959 def Exec(self, feedback_fn):
5960 """Start the instance.
5963 instance = self.instance
5964 force = self.op.force
5966 if not self.op.no_remember:
5967 self.cfg.MarkInstanceUp(instance.name)
5969 if self.primary_offline:
5970 assert self.op.ignore_offline_nodes
5971 self.proc.LogInfo("Primary node offline, marked instance as started")
5973 node_current = instance.primary_node
5975 _StartInstanceDisks(self, instance, force)
5977 result = self.rpc.call_instance_start(node_current, instance,
5978 self.op.hvparams, self.op.beparams,
5979 self.op.startup_paused)
5980 msg = result.fail_msg
5982 _ShutdownInstanceDisks(self, instance)
5983 raise errors.OpExecError("Could not start instance: %s" % msg)
5986 class LUInstanceReboot(LogicalUnit):
5987 """Reboot an instance.
5990 HPATH = "instance-reboot"
5991 HTYPE = constants.HTYPE_INSTANCE
5994 def ExpandNames(self):
5995 self._ExpandAndLockInstance()
5997 def BuildHooksEnv(self):
6000 This runs on master, primary and secondary nodes of the instance.
6004 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
6005 "REBOOT_TYPE": self.op.reboot_type,
6006 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6009 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6013 def BuildHooksNodes(self):
6014 """Build hooks nodes.
6017 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6020 def CheckPrereq(self):
6021 """Check prerequisites.
6023 This checks that the instance is in the cluster.
6026 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6027 assert self.instance is not None, \
6028 "Cannot retrieve locked instance %s" % self.op.instance_name
6030 _CheckNodeOnline(self, instance.primary_node)
6032 # check bridges existence
6033 _CheckInstanceBridgesExist(self, instance)
6035 def Exec(self, feedback_fn):
6036 """Reboot the instance.
6039 instance = self.instance
6040 ignore_secondaries = self.op.ignore_secondaries
6041 reboot_type = self.op.reboot_type
6043 remote_info = self.rpc.call_instance_info(instance.primary_node,
6045 instance.hypervisor)
6046 remote_info.Raise("Error checking node %s" % instance.primary_node)
6047 instance_running = bool(remote_info.payload)
6049 node_current = instance.primary_node
6051 if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
6052 constants.INSTANCE_REBOOT_HARD]:
6053 for disk in instance.disks:
6054 self.cfg.SetDiskID(disk, node_current)
6055 result = self.rpc.call_instance_reboot(node_current, instance,
6057 self.op.shutdown_timeout)
6058 result.Raise("Could not reboot instance")
6060 if instance_running:
6061 result = self.rpc.call_instance_shutdown(node_current, instance,
6062 self.op.shutdown_timeout)
6063 result.Raise("Could not shutdown instance for full reboot")
6064 _ShutdownInstanceDisks(self, instance)
6066 self.LogInfo("Instance %s was already stopped, starting now",
6068 _StartInstanceDisks(self, instance, ignore_secondaries)
6069 result = self.rpc.call_instance_start(node_current, instance,
6071 msg = result.fail_msg
6073 _ShutdownInstanceDisks(self, instance)
6074 raise errors.OpExecError("Could not start instance for"
6075 " full reboot: %s" % msg)
6077 self.cfg.MarkInstanceUp(instance.name)
6080 class LUInstanceShutdown(LogicalUnit):
6081 """Shutdown an instance.
6084 HPATH = "instance-stop"
6085 HTYPE = constants.HTYPE_INSTANCE
6088 def ExpandNames(self):
6089 self._ExpandAndLockInstance()
6091 def BuildHooksEnv(self):
6094 This runs on master, primary and secondary nodes of the instance.
6097 env = _BuildInstanceHookEnvByObject(self, self.instance)
6098 env["TIMEOUT"] = self.op.timeout
6101 def BuildHooksNodes(self):
6102 """Build hooks nodes.
6105 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6108 def CheckPrereq(self):
6109 """Check prerequisites.
6111 This checks that the instance is in the cluster.
6114 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6115 assert self.instance is not None, \
6116 "Cannot retrieve locked instance %s" % self.op.instance_name
6118 self.primary_offline = \
6119 self.cfg.GetNodeInfo(self.instance.primary_node).offline
6121 if self.primary_offline and self.op.ignore_offline_nodes:
6122 self.proc.LogWarning("Ignoring offline primary node")
6124 _CheckNodeOnline(self, self.instance.primary_node)
6126 def Exec(self, feedback_fn):
6127 """Shutdown the instance.
6130 instance = self.instance
6131 node_current = instance.primary_node
6132 timeout = self.op.timeout
6134 if not self.op.no_remember:
6135 self.cfg.MarkInstanceDown(instance.name)
6137 if self.primary_offline:
6138 assert self.op.ignore_offline_nodes
6139 self.proc.LogInfo("Primary node offline, marked instance as stopped")
6141 result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
6142 msg = result.fail_msg
6144 self.proc.LogWarning("Could not shutdown instance: %s" % msg)
6146 _ShutdownInstanceDisks(self, instance)
6149 class LUInstanceReinstall(LogicalUnit):
6150 """Reinstall an instance.
6153 HPATH = "instance-reinstall"
6154 HTYPE = constants.HTYPE_INSTANCE
6157 def ExpandNames(self):
6158 self._ExpandAndLockInstance()
6160 def BuildHooksEnv(self):
6163 This runs on master, primary and secondary nodes of the instance.
6166 return _BuildInstanceHookEnvByObject(self, self.instance)
6168 def BuildHooksNodes(self):
6169 """Build hooks nodes.
6172 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6175 def CheckPrereq(self):
6176 """Check prerequisites.
6178 This checks that the instance is in the cluster and is not running.
6181 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6182 assert instance is not None, \
6183 "Cannot retrieve locked instance %s" % self.op.instance_name
6184 _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
6185 " offline, cannot reinstall")
6186 for node in instance.secondary_nodes:
6187 _CheckNodeOnline(self, node, "Instance secondary node offline,"
6188 " cannot reinstall")
6190 if instance.disk_template == constants.DT_DISKLESS:
6191 raise errors.OpPrereqError("Instance '%s' has no disks" %
6192 self.op.instance_name,
6194 _CheckInstanceDown(self, instance, "cannot reinstall")
6196 if self.op.os_type is not None:
6198 pnode = _ExpandNodeName(self.cfg, instance.primary_node)
6199 _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
6200 instance_os = self.op.os_type
6202 instance_os = instance.os
6204 nodelist = list(instance.all_nodes)
6206 if self.op.osparams:
6207 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
6208 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
6209 self.os_inst = i_osdict # the new dict (without defaults)
6213 self.instance = instance
6215 def Exec(self, feedback_fn):
6216 """Reinstall the instance.
6219 inst = self.instance
6221 if self.op.os_type is not None:
6222 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
6223 inst.os = self.op.os_type
6224 # Write to configuration
6225 self.cfg.Update(inst, feedback_fn)
6227 _StartInstanceDisks(self, inst, None)
6229 feedback_fn("Running the instance OS create scripts...")
6230 # FIXME: pass debug option from opcode to backend
6231 result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
6232 self.op.debug_level,
6233 osparams=self.os_inst)
6234 result.Raise("Could not install OS for instance %s on node %s" %
6235 (inst.name, inst.primary_node))
6237 _ShutdownInstanceDisks(self, inst)
6240 class LUInstanceRecreateDisks(LogicalUnit):
6241 """Recreate an instance's missing disks.
6244 HPATH = "instance-recreate-disks"
6245 HTYPE = constants.HTYPE_INSTANCE
6248 def CheckArguments(self):
6249 # normalise the disk list
6250 self.op.disks = sorted(frozenset(self.op.disks))
6252 def ExpandNames(self):
6253 self._ExpandAndLockInstance()
6254 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6256 self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
6257 self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
6259 self.needed_locks[locking.LEVEL_NODE] = []
6261 def DeclareLocks(self, level):
6262 if level == locking.LEVEL_NODE:
6263 # if we replace the nodes, we only need to lock the old primary,
6264 # otherwise we need to lock all nodes for disk re-creation
6265 primary_only = bool(self.op.nodes)
6266 self._LockInstancesNodes(primary_only=primary_only)
6268 def BuildHooksEnv(self):
6271 This runs on master, primary and secondary nodes of the instance.
6274 return _BuildInstanceHookEnvByObject(self, self.instance)
6276 def BuildHooksNodes(self):
6277 """Build hooks nodes.
6280 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6283 def CheckPrereq(self):
6284 """Check prerequisites.
6286 This checks that the instance is in the cluster and is not running.
6289 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6290 assert instance is not None, \
6291 "Cannot retrieve locked instance %s" % self.op.instance_name
6293 if len(self.op.nodes) != len(instance.all_nodes):
6294 raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
6295 " %d replacement nodes were specified" %
6296 (instance.name, len(instance.all_nodes),
6297 len(self.op.nodes)),
6299 assert instance.disk_template != constants.DT_DRBD8 or \
6300 len(self.op.nodes) == 2
6301 assert instance.disk_template != constants.DT_PLAIN or \
6302 len(self.op.nodes) == 1
6303 primary_node = self.op.nodes[0]
6305 primary_node = instance.primary_node
6306 _CheckNodeOnline(self, primary_node)
6308 if instance.disk_template == constants.DT_DISKLESS:
6309 raise errors.OpPrereqError("Instance '%s' has no disks" %
6310 self.op.instance_name, errors.ECODE_INVAL)
6311 # if we replace nodes *and* the old primary is offline, we don't
6313 assert instance.primary_node in self.needed_locks[locking.LEVEL_NODE]
6314 old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
6315 if not (self.op.nodes and old_pnode.offline):
6316 _CheckInstanceDown(self, instance, "cannot recreate disks")
6318 if not self.op.disks:
6319 self.op.disks = range(len(instance.disks))
6321 for idx in self.op.disks:
6322 if idx >= len(instance.disks):
6323 raise errors.OpPrereqError("Invalid disk index '%s'" % idx,
6325 if self.op.disks != range(len(instance.disks)) and self.op.nodes:
6326 raise errors.OpPrereqError("Can't recreate disks partially and"
6327 " change the nodes at the same time",
6329 self.instance = instance
6331 def Exec(self, feedback_fn):
6332 """Recreate the disks.
6335 instance = self.instance
6338 mods = [] # keeps track of needed logical_id changes
6340 for idx, disk in enumerate(instance.disks):
6341 if idx not in self.op.disks: # disk idx has not been passed in
6344 # update secondaries for disks, if needed
6346 if disk.dev_type == constants.LD_DRBD8:
6347 # need to update the nodes and minors
6348 assert len(self.op.nodes) == 2
6349 assert len(disk.logical_id) == 6 # otherwise disk internals
6351 (_, _, old_port, _, _, old_secret) = disk.logical_id
6352 new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name)
6353 new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
6354 new_minors[0], new_minors[1], old_secret)
6355 assert len(disk.logical_id) == len(new_id)
6356 mods.append((idx, new_id))
6358 # now that we have passed all asserts above, we can apply the mods
6359 # in a single run (to avoid partial changes)
6360 for idx, new_id in mods:
6361 instance.disks[idx].logical_id = new_id
6363 # change primary node, if needed
6365 instance.primary_node = self.op.nodes[0]
6366 self.LogWarning("Changing the instance's nodes, you will have to"
6367 " remove any disks left on the older nodes manually")
6370 self.cfg.Update(instance, feedback_fn)
6372 _CreateDisks(self, instance, to_skip=to_skip)
6375 class LUInstanceRename(LogicalUnit):
6376 """Rename an instance.
6379 HPATH = "instance-rename"
6380 HTYPE = constants.HTYPE_INSTANCE
6382 def CheckArguments(self):
6386 if self.op.ip_check and not self.op.name_check:
6387 # TODO: make the ip check more flexible and not depend on the name check
6388 raise errors.OpPrereqError("IP address check requires a name check",
6391 def BuildHooksEnv(self):
6394 This runs on master, primary and secondary nodes of the instance.
6397 env = _BuildInstanceHookEnvByObject(self, self.instance)
6398 env["INSTANCE_NEW_NAME"] = self.op.new_name
6401 def BuildHooksNodes(self):
6402 """Build hooks nodes.
6405 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6408 def CheckPrereq(self):
6409 """Check prerequisites.
6411 This checks that the instance is in the cluster and is not running.
6414 self.op.instance_name = _ExpandInstanceName(self.cfg,
6415 self.op.instance_name)
6416 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6417 assert instance is not None
6418 _CheckNodeOnline(self, instance.primary_node)
6419 _CheckInstanceDown(self, instance, "cannot rename")
6420 self.instance = instance
6422 new_name = self.op.new_name
6423 if self.op.name_check:
6424 hostname = netutils.GetHostname(name=new_name)
6425 if hostname != new_name:
6426 self.LogInfo("Resolved given name '%s' to '%s'", new_name,
6428 if not utils.MatchNameComponent(self.op.new_name, [hostname.name]):
6429 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
6430 " same as given hostname '%s'") %
6431 (hostname.name, self.op.new_name),
6433 new_name = self.op.new_name = hostname.name
6434 if (self.op.ip_check and
6435 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
6436 raise errors.OpPrereqError("IP %s of instance %s already in use" %
6437 (hostname.ip, new_name),
6438 errors.ECODE_NOTUNIQUE)
6440 instance_list = self.cfg.GetInstanceList()
6441 if new_name in instance_list and new_name != instance.name:
6442 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
6443 new_name, errors.ECODE_EXISTS)
6445 def Exec(self, feedback_fn):
6446 """Rename the instance.
6449 inst = self.instance
6450 old_name = inst.name
6452 rename_file_storage = False
6453 if (inst.disk_template in constants.DTS_FILEBASED and
6454 self.op.new_name != inst.name):
6455 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
6456 rename_file_storage = True
6458 self.cfg.RenameInstance(inst.name, self.op.new_name)
6459 # Change the instance lock. This is definitely safe while we hold the BGL.
6460 # Otherwise the new lock would have to be added in acquired mode.
6462 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
6463 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
6465 # re-read the instance from the configuration after rename
6466 inst = self.cfg.GetInstanceInfo(self.op.new_name)
6468 if rename_file_storage:
6469 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
6470 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
6471 old_file_storage_dir,
6472 new_file_storage_dir)
6473 result.Raise("Could not rename on node %s directory '%s' to '%s'"
6474 " (but the instance has been renamed in Ganeti)" %
6475 (inst.primary_node, old_file_storage_dir,
6476 new_file_storage_dir))
6478 _StartInstanceDisks(self, inst, None)
6480 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
6481 old_name, self.op.debug_level)
6482 msg = result.fail_msg
6484 msg = ("Could not run OS rename script for instance %s on node %s"
6485 " (but the instance has been renamed in Ganeti): %s" %
6486 (inst.name, inst.primary_node, msg))
6487 self.proc.LogWarning(msg)
6489 _ShutdownInstanceDisks(self, inst)
6494 class LUInstanceRemove(LogicalUnit):
6495 """Remove an instance.
6498 HPATH = "instance-remove"
6499 HTYPE = constants.HTYPE_INSTANCE
6502 def ExpandNames(self):
6503 self._ExpandAndLockInstance()
6504 self.needed_locks[locking.LEVEL_NODE] = []
6505 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6507 def DeclareLocks(self, level):
6508 if level == locking.LEVEL_NODE:
6509 self._LockInstancesNodes()
6511 def BuildHooksEnv(self):
6514 This runs on master, primary and secondary nodes of the instance.
6517 env = _BuildInstanceHookEnvByObject(self, self.instance)
6518 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
6521 def BuildHooksNodes(self):
6522 """Build hooks nodes.
6525 nl = [self.cfg.GetMasterNode()]
6526 nl_post = list(self.instance.all_nodes) + nl
6527 return (nl, nl_post)
6529 def CheckPrereq(self):
6530 """Check prerequisites.
6532 This checks that the instance is in the cluster.
6535 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6536 assert self.instance is not None, \
6537 "Cannot retrieve locked instance %s" % self.op.instance_name
6539 def Exec(self, feedback_fn):
6540 """Remove the instance.
6543 instance = self.instance
6544 logging.info("Shutting down instance %s on node %s",
6545 instance.name, instance.primary_node)
6547 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
6548 self.op.shutdown_timeout)
6549 msg = result.fail_msg
6551 if self.op.ignore_failures:
6552 feedback_fn("Warning: can't shutdown instance: %s" % msg)
6554 raise errors.OpExecError("Could not shutdown instance %s on"
6556 (instance.name, instance.primary_node, msg))
6558 _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
6561 def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
6562 """Utility function to remove an instance.
6565 logging.info("Removing block devices for instance %s", instance.name)
6567 if not _RemoveDisks(lu, instance):
6568 if not ignore_failures:
6569 raise errors.OpExecError("Can't remove instance's disks")
6570 feedback_fn("Warning: can't remove instance's disks")
6572 logging.info("Removing instance %s out of cluster config", instance.name)
6574 lu.cfg.RemoveInstance(instance.name)
6576 assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
6577 "Instance lock removal conflict"
6579 # Remove lock for the instance
6580 lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
6583 class LUInstanceQuery(NoHooksLU):
6584 """Logical unit for querying instances.
6587 # pylint: disable=W0142
6590 def CheckArguments(self):
6591 self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
6592 self.op.output_fields, self.op.use_locking)
6594 def ExpandNames(self):
6595 self.iq.ExpandNames(self)
6597 def DeclareLocks(self, level):
6598 self.iq.DeclareLocks(self, level)
6600 def Exec(self, feedback_fn):
6601 return self.iq.OldStyleQuery(self)
6604 class LUInstanceFailover(LogicalUnit):
6605 """Failover an instance.
6608 HPATH = "instance-failover"
6609 HTYPE = constants.HTYPE_INSTANCE
6612 def CheckArguments(self):
6613 """Check the arguments.
6616 self.iallocator = getattr(self.op, "iallocator", None)
6617 self.target_node = getattr(self.op, "target_node", None)
6619 def ExpandNames(self):
6620 self._ExpandAndLockInstance()
6622 if self.op.target_node is not None:
6623 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6625 self.needed_locks[locking.LEVEL_NODE] = []
6626 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6628 ignore_consistency = self.op.ignore_consistency
6629 shutdown_timeout = self.op.shutdown_timeout
6630 self._migrater = TLMigrateInstance(self, self.op.instance_name,
6633 ignore_consistency=ignore_consistency,
6634 shutdown_timeout=shutdown_timeout)
6635 self.tasklets = [self._migrater]
6637 def DeclareLocks(self, level):
6638 if level == locking.LEVEL_NODE:
6639 instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
6640 if instance.disk_template in constants.DTS_EXT_MIRROR:
6641 if self.op.target_node is None:
6642 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6644 self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
6645 self.op.target_node]
6646 del self.recalculate_locks[locking.LEVEL_NODE]
6648 self._LockInstancesNodes()
6650 def BuildHooksEnv(self):
6653 This runs on master, primary and secondary nodes of the instance.
6656 instance = self._migrater.instance
6657 source_node = instance.primary_node
6658 target_node = self.op.target_node
6660 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
6661 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6662 "OLD_PRIMARY": source_node,
6663 "NEW_PRIMARY": target_node,
6666 if instance.disk_template in constants.DTS_INT_MIRROR:
6667 env["OLD_SECONDARY"] = instance.secondary_nodes[0]
6668 env["NEW_SECONDARY"] = source_node
6670 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
6672 env.update(_BuildInstanceHookEnvByObject(self, instance))
6676 def BuildHooksNodes(self):
6677 """Build hooks nodes.
6680 instance = self._migrater.instance
6681 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
6682 return (nl, nl + [instance.primary_node])
6685 class LUInstanceMigrate(LogicalUnit):
6686 """Migrate an instance.
6688 This is migration without shutting down, compared to the failover,
6689 which is done with shutdown.
6692 HPATH = "instance-migrate"
6693 HTYPE = constants.HTYPE_INSTANCE
6696 def ExpandNames(self):
6697 self._ExpandAndLockInstance()
6699 if self.op.target_node is not None:
6700 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6702 self.needed_locks[locking.LEVEL_NODE] = []
6703 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6705 self._migrater = TLMigrateInstance(self, self.op.instance_name,
6706 cleanup=self.op.cleanup,
6708 fallback=self.op.allow_failover)
6709 self.tasklets = [self._migrater]
6711 def DeclareLocks(self, level):
6712 if level == locking.LEVEL_NODE:
6713 instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
6714 if instance.disk_template in constants.DTS_EXT_MIRROR:
6715 if self.op.target_node is None:
6716 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6718 self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
6719 self.op.target_node]
6720 del self.recalculate_locks[locking.LEVEL_NODE]
6722 self._LockInstancesNodes()
6724 def BuildHooksEnv(self):
6727 This runs on master, primary and secondary nodes of the instance.
6730 instance = self._migrater.instance
6731 source_node = instance.primary_node
6732 target_node = self.op.target_node
6733 env = _BuildInstanceHookEnvByObject(self, instance)
6735 "MIGRATE_LIVE": self._migrater.live,
6736 "MIGRATE_CLEANUP": self.op.cleanup,
6737 "OLD_PRIMARY": source_node,
6738 "NEW_PRIMARY": target_node,
6741 if instance.disk_template in constants.DTS_INT_MIRROR:
6742 env["OLD_SECONDARY"] = target_node
6743 env["NEW_SECONDARY"] = source_node
6745 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
6749 def BuildHooksNodes(self):
6750 """Build hooks nodes.
6753 instance = self._migrater.instance
6754 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
6755 return (nl, nl + [instance.primary_node])
6758 class LUInstanceMove(LogicalUnit):
6759 """Move an instance by data-copying.
6762 HPATH = "instance-move"
6763 HTYPE = constants.HTYPE_INSTANCE
6766 def ExpandNames(self):
6767 self._ExpandAndLockInstance()
6768 target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6769 self.op.target_node = target_node
6770 self.needed_locks[locking.LEVEL_NODE] = [target_node]
6771 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6773 def DeclareLocks(self, level):
6774 if level == locking.LEVEL_NODE:
6775 self._LockInstancesNodes(primary_only=True)
6777 def BuildHooksEnv(self):
6780 This runs on master, primary and secondary nodes of the instance.
6784 "TARGET_NODE": self.op.target_node,
6785 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6787 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6790 def BuildHooksNodes(self):
6791 """Build hooks nodes.
6795 self.cfg.GetMasterNode(),
6796 self.instance.primary_node,
6797 self.op.target_node,
6801 def CheckPrereq(self):
6802 """Check prerequisites.
6804 This checks that the instance is in the cluster.
6807 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6808 assert self.instance is not None, \
6809 "Cannot retrieve locked instance %s" % self.op.instance_name
6811 node = self.cfg.GetNodeInfo(self.op.target_node)
6812 assert node is not None, \
6813 "Cannot retrieve locked node %s" % self.op.target_node
6815 self.target_node = target_node = node.name
6817 if target_node == instance.primary_node:
6818 raise errors.OpPrereqError("Instance %s is already on the node %s" %
6819 (instance.name, target_node),
6822 bep = self.cfg.GetClusterInfo().FillBE(instance)
6824 for idx, dsk in enumerate(instance.disks):
6825 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
6826 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
6827 " cannot copy" % idx, errors.ECODE_STATE)
6829 _CheckNodeOnline(self, target_node)
6830 _CheckNodeNotDrained(self, target_node)
6831 _CheckNodeVmCapable(self, target_node)
6833 if instance.admin_up:
6834 # check memory requirements on the secondary node
6835 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
6836 instance.name, bep[constants.BE_MEMORY],
6837 instance.hypervisor)
6839 self.LogInfo("Not checking memory on the secondary node as"
6840 " instance will not be started")
6842 # check bridge existance
6843 _CheckInstanceBridgesExist(self, instance, node=target_node)
6845 def Exec(self, feedback_fn):
6846 """Move an instance.
6848 The move is done by shutting it down on its present node, copying
6849 the data over (slow) and starting it on the new node.
6852 instance = self.instance
6854 source_node = instance.primary_node
6855 target_node = self.target_node
6857 self.LogInfo("Shutting down instance %s on source node %s",
6858 instance.name, source_node)
6860 result = self.rpc.call_instance_shutdown(source_node, instance,
6861 self.op.shutdown_timeout)
6862 msg = result.fail_msg
6864 if self.op.ignore_consistency:
6865 self.proc.LogWarning("Could not shutdown instance %s on node %s."
6866 " Proceeding anyway. Please make sure node"
6867 " %s is down. Error details: %s",
6868 instance.name, source_node, source_node, msg)
6870 raise errors.OpExecError("Could not shutdown instance %s on"
6872 (instance.name, source_node, msg))
6874 # create the target disks
6876 _CreateDisks(self, instance, target_node=target_node)
6877 except errors.OpExecError:
6878 self.LogWarning("Device creation failed, reverting...")
6880 _RemoveDisks(self, instance, target_node=target_node)
6882 self.cfg.ReleaseDRBDMinors(instance.name)
6885 cluster_name = self.cfg.GetClusterInfo().cluster_name
6888 # activate, get path, copy the data over
6889 for idx, disk in enumerate(instance.disks):
6890 self.LogInfo("Copying data for disk %d", idx)
6891 result = self.rpc.call_blockdev_assemble(target_node, disk,
6892 instance.name, True, idx)
6894 self.LogWarning("Can't assemble newly created disk %d: %s",
6895 idx, result.fail_msg)
6896 errs.append(result.fail_msg)
6898 dev_path = result.payload
6899 result = self.rpc.call_blockdev_export(source_node, disk,
6900 target_node, dev_path,
6903 self.LogWarning("Can't copy data over for disk %d: %s",
6904 idx, result.fail_msg)
6905 errs.append(result.fail_msg)
6909 self.LogWarning("Some disks failed to copy, aborting")
6911 _RemoveDisks(self, instance, target_node=target_node)
6913 self.cfg.ReleaseDRBDMinors(instance.name)
6914 raise errors.OpExecError("Errors during disk copy: %s" %
6917 instance.primary_node = target_node
6918 self.cfg.Update(instance, feedback_fn)
6920 self.LogInfo("Removing the disks on the original node")
6921 _RemoveDisks(self, instance, target_node=source_node)
6923 # Only start the instance if it's marked as up
6924 if instance.admin_up:
6925 self.LogInfo("Starting instance %s on node %s",
6926 instance.name, target_node)
6928 disks_ok, _ = _AssembleInstanceDisks(self, instance,
6929 ignore_secondaries=True)
6931 _ShutdownInstanceDisks(self, instance)
6932 raise errors.OpExecError("Can't activate the instance's disks")
6934 result = self.rpc.call_instance_start(target_node, instance,
6936 msg = result.fail_msg
6938 _ShutdownInstanceDisks(self, instance)
6939 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
6940 (instance.name, target_node, msg))
6943 class LUNodeMigrate(LogicalUnit):
6944 """Migrate all instances from a node.
6947 HPATH = "node-migrate"
6948 HTYPE = constants.HTYPE_NODE
6951 def CheckArguments(self):
6954 def ExpandNames(self):
6955 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6957 self.share_locks = _ShareAll()
6958 self.needed_locks = {
6959 locking.LEVEL_NODE: [self.op.node_name],
6962 def BuildHooksEnv(self):
6965 This runs on the master, the primary and all the secondaries.
6969 "NODE_NAME": self.op.node_name,
6972 def BuildHooksNodes(self):
6973 """Build hooks nodes.
6976 nl = [self.cfg.GetMasterNode()]
6979 def CheckPrereq(self):
6982 def Exec(self, feedback_fn):
6983 # Prepare jobs for migration instances
6985 [opcodes.OpInstanceMigrate(instance_name=inst.name,
6988 iallocator=self.op.iallocator,
6989 target_node=self.op.target_node)]
6990 for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)
6993 # TODO: Run iallocator in this opcode and pass correct placement options to
6994 # OpInstanceMigrate. Since other jobs can modify the cluster between
6995 # running the iallocator and the actual migration, a good consistency model
6996 # will have to be found.
6998 assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
6999 frozenset([self.op.node_name]))
7001 return ResultWithJobs(jobs)
7004 class TLMigrateInstance(Tasklet):
7005 """Tasklet class for instance migration.
7008 @ivar live: whether the migration will be done live or non-live;
7009 this variable is initalized only after CheckPrereq has run
7010 @type cleanup: boolean
7011 @ivar cleanup: Wheater we cleanup from a failed migration
7012 @type iallocator: string
7013 @ivar iallocator: The iallocator used to determine target_node
7014 @type target_node: string
7015 @ivar target_node: If given, the target_node to reallocate the instance to
7016 @type failover: boolean
7017 @ivar failover: Whether operation results in failover or migration
7018 @type fallback: boolean
7019 @ivar fallback: Whether fallback to failover is allowed if migration not
7021 @type ignore_consistency: boolean
7022 @ivar ignore_consistency: Wheter we should ignore consistency between source
7024 @type shutdown_timeout: int
7025 @ivar shutdown_timeout: In case of failover timeout of the shutdown
7030 _MIGRATION_POLL_INTERVAL = 1 # seconds
7031 _MIGRATION_FEEDBACK_INTERVAL = 10 # seconds
7033 def __init__(self, lu, instance_name, cleanup=False,
7034 failover=False, fallback=False,
7035 ignore_consistency=False,
7036 shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT):
7037 """Initializes this class.
7040 Tasklet.__init__(self, lu)
7043 self.instance_name = instance_name
7044 self.cleanup = cleanup
7045 self.live = False # will be overridden later
7046 self.failover = failover
7047 self.fallback = fallback
7048 self.ignore_consistency = ignore_consistency
7049 self.shutdown_timeout = shutdown_timeout
7051 def CheckPrereq(self):
7052 """Check prerequisites.
7054 This checks that the instance is in the cluster.
7057 instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
7058 instance = self.cfg.GetInstanceInfo(instance_name)
7059 assert instance is not None
7060 self.instance = instance
7062 if (not self.cleanup and not instance.admin_up and not self.failover and
7064 self.lu.LogInfo("Instance is marked down, fallback allowed, switching"
7066 self.failover = True
7068 if instance.disk_template not in constants.DTS_MIRRORED:
7073 raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
7074 " %s" % (instance.disk_template, text),
7077 if instance.disk_template in constants.DTS_EXT_MIRROR:
7078 _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
7080 if self.lu.op.iallocator:
7081 self._RunAllocator()
7083 # We set set self.target_node as it is required by
7085 self.target_node = self.lu.op.target_node
7087 # self.target_node is already populated, either directly or by the
7089 target_node = self.target_node
7090 if self.target_node == instance.primary_node:
7091 raise errors.OpPrereqError("Cannot migrate instance %s"
7092 " to its primary (%s)" %
7093 (instance.name, instance.primary_node))
7095 if len(self.lu.tasklets) == 1:
7096 # It is safe to release locks only when we're the only tasklet
7098 _ReleaseLocks(self.lu, locking.LEVEL_NODE,
7099 keep=[instance.primary_node, self.target_node])
7102 secondary_nodes = instance.secondary_nodes
7103 if not secondary_nodes:
7104 raise errors.ConfigurationError("No secondary node but using"
7105 " %s disk template" %
7106 instance.disk_template)
7107 target_node = secondary_nodes[0]
7108 if self.lu.op.iallocator or (self.lu.op.target_node and
7109 self.lu.op.target_node != target_node):
7111 text = "failed over"
7114 raise errors.OpPrereqError("Instances with disk template %s cannot"
7115 " be %s to arbitrary nodes"
7116 " (neither an iallocator nor a target"
7117 " node can be passed)" %
7118 (instance.disk_template, text),
7121 i_be = self.cfg.GetClusterInfo().FillBE(instance)
7123 # check memory requirements on the secondary node
7124 if not self.failover or instance.admin_up:
7125 _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
7126 instance.name, i_be[constants.BE_MEMORY],
7127 instance.hypervisor)
7129 self.lu.LogInfo("Not checking memory on the secondary node as"
7130 " instance will not be started")
7132 # check bridge existance
7133 _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
7135 if not self.cleanup:
7136 _CheckNodeNotDrained(self.lu, target_node)
7137 if not self.failover:
7138 result = self.rpc.call_instance_migratable(instance.primary_node,
7140 if result.fail_msg and self.fallback:
7141 self.lu.LogInfo("Can't migrate, instance offline, fallback to"
7143 self.failover = True
7145 result.Raise("Can't migrate, please use failover",
7146 prereq=True, ecode=errors.ECODE_STATE)
7148 assert not (self.failover and self.cleanup)
7150 if not self.failover:
7151 if self.lu.op.live is not None and self.lu.op.mode is not None:
7152 raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
7153 " parameters are accepted",
7155 if self.lu.op.live is not None:
7157 self.lu.op.mode = constants.HT_MIGRATION_LIVE
7159 self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
7160 # reset the 'live' parameter to None so that repeated
7161 # invocations of CheckPrereq do not raise an exception
7162 self.lu.op.live = None
7163 elif self.lu.op.mode is None:
7164 # read the default value from the hypervisor
7165 i_hv = self.cfg.GetClusterInfo().FillHV(self.instance,
7167 self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
7169 self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
7171 # Failover is never live
7174 def _RunAllocator(self):
7175 """Run the allocator based on input opcode.
7178 ial = IAllocator(self.cfg, self.rpc,
7179 mode=constants.IALLOCATOR_MODE_RELOC,
7180 name=self.instance_name,
7181 # TODO See why hail breaks with a single node below
7182 relocate_from=[self.instance.primary_node,
7183 self.instance.primary_node],
7186 ial.Run(self.lu.op.iallocator)
7189 raise errors.OpPrereqError("Can't compute nodes using"
7190 " iallocator '%s': %s" %
7191 (self.lu.op.iallocator, ial.info),
7193 if len(ial.result) != ial.required_nodes:
7194 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7195 " of nodes (%s), required %s" %
7196 (self.lu.op.iallocator, len(ial.result),
7197 ial.required_nodes), errors.ECODE_FAULT)
7198 self.target_node = ial.result[0]
7199 self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
7200 self.instance_name, self.lu.op.iallocator,
7201 utils.CommaJoin(ial.result))
7203 def _WaitUntilSync(self):
7204 """Poll with custom rpc for disk sync.
7206 This uses our own step-based rpc call.
7209 self.feedback_fn("* wait until resync is done")
7213 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
7215 self.instance.disks)
7217 for node, nres in result.items():
7218 nres.Raise("Cannot resync disks on node %s" % node)
7219 node_done, node_percent = nres.payload
7220 all_done = all_done and node_done
7221 if node_percent is not None:
7222 min_percent = min(min_percent, node_percent)
7224 if min_percent < 100:
7225 self.feedback_fn(" - progress: %.1f%%" % min_percent)
7228 def _EnsureSecondary(self, node):
7229 """Demote a node to secondary.
7232 self.feedback_fn("* switching node %s to secondary mode" % node)
7234 for dev in self.instance.disks:
7235 self.cfg.SetDiskID(dev, node)
7237 result = self.rpc.call_blockdev_close(node, self.instance.name,
7238 self.instance.disks)
7239 result.Raise("Cannot change disk to secondary on node %s" % node)
7241 def _GoStandalone(self):
7242 """Disconnect from the network.
7245 self.feedback_fn("* changing into standalone mode")
7246 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
7247 self.instance.disks)
7248 for node, nres in result.items():
7249 nres.Raise("Cannot disconnect disks node %s" % node)
7251 def _GoReconnect(self, multimaster):
7252 """Reconnect to the network.
7258 msg = "single-master"
7259 self.feedback_fn("* changing disks into %s mode" % msg)
7260 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
7261 self.instance.disks,
7262 self.instance.name, multimaster)
7263 for node, nres in result.items():
7264 nres.Raise("Cannot change disks config on node %s" % node)
7266 def _ExecCleanup(self):
7267 """Try to cleanup after a failed migration.
7269 The cleanup is done by:
7270 - check that the instance is running only on one node
7271 (and update the config if needed)
7272 - change disks on its secondary node to secondary
7273 - wait until disks are fully synchronized
7274 - disconnect from the network
7275 - change disks into single-master mode
7276 - wait again until disks are fully synchronized
7279 instance = self.instance
7280 target_node = self.target_node
7281 source_node = self.source_node
7283 # check running on only one node
7284 self.feedback_fn("* checking where the instance actually runs"
7285 " (if this hangs, the hypervisor might be in"
7287 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
7288 for node, result in ins_l.items():
7289 result.Raise("Can't contact node %s" % node)
7291 runningon_source = instance.name in ins_l[source_node].payload
7292 runningon_target = instance.name in ins_l[target_node].payload
7294 if runningon_source and runningon_target:
7295 raise errors.OpExecError("Instance seems to be running on two nodes,"
7296 " or the hypervisor is confused; you will have"
7297 " to ensure manually that it runs only on one"
7298 " and restart this operation")
7300 if not (runningon_source or runningon_target):
7301 raise errors.OpExecError("Instance does not seem to be running at all;"
7302 " in this case it's safer to repair by"
7303 " running 'gnt-instance stop' to ensure disk"
7304 " shutdown, and then restarting it")
7306 if runningon_target:
7307 # the migration has actually succeeded, we need to update the config
7308 self.feedback_fn("* instance running on secondary node (%s),"
7309 " updating config" % target_node)
7310 instance.primary_node = target_node
7311 self.cfg.Update(instance, self.feedback_fn)
7312 demoted_node = source_node
7314 self.feedback_fn("* instance confirmed to be running on its"
7315 " primary node (%s)" % source_node)
7316 demoted_node = target_node
7318 if instance.disk_template in constants.DTS_INT_MIRROR:
7319 self._EnsureSecondary(demoted_node)
7321 self._WaitUntilSync()
7322 except errors.OpExecError:
7323 # we ignore here errors, since if the device is standalone, it
7324 # won't be able to sync
7326 self._GoStandalone()
7327 self._GoReconnect(False)
7328 self._WaitUntilSync()
7330 self.feedback_fn("* done")
7332 def _RevertDiskStatus(self):
7333 """Try to revert the disk status after a failed migration.
7336 target_node = self.target_node
7337 if self.instance.disk_template in constants.DTS_EXT_MIRROR:
7341 self._EnsureSecondary(target_node)
7342 self._GoStandalone()
7343 self._GoReconnect(False)
7344 self._WaitUntilSync()
7345 except errors.OpExecError, err:
7346 self.lu.LogWarning("Migration failed and I can't reconnect the drives,"
7347 " please try to recover the instance manually;"
7348 " error '%s'" % str(err))
7350 def _AbortMigration(self):
7351 """Call the hypervisor code to abort a started migration.
7354 instance = self.instance
7355 target_node = self.target_node
7356 source_node = self.source_node
7357 migration_info = self.migration_info
7359 abort_result = self.rpc.call_instance_finalize_migration_dst(target_node,
7363 abort_msg = abort_result.fail_msg
7365 logging.error("Aborting migration failed on target node %s: %s",
7366 target_node, abort_msg)
7367 # Don't raise an exception here, as we stil have to try to revert the
7368 # disk status, even if this step failed.
7370 abort_result = self.rpc.call_instance_finalize_migration_src(source_node,
7371 instance, False, self.live)
7372 abort_msg = abort_result.fail_msg
7374 logging.error("Aborting migration failed on source node %s: %s",
7375 source_node, abort_msg)
7377 def _ExecMigration(self):
7378 """Migrate an instance.
7380 The migrate is done by:
7381 - change the disks into dual-master mode
7382 - wait until disks are fully synchronized again
7383 - migrate the instance
7384 - change disks on the new secondary node (the old primary) to secondary
7385 - wait until disks are fully synchronized
7386 - change disks into single-master mode
7389 instance = self.instance
7390 target_node = self.target_node
7391 source_node = self.source_node
7393 self.feedback_fn("* checking disk consistency between source and target")
7394 for dev in instance.disks:
7395 if not _CheckDiskConsistency(self.lu, dev, target_node, False):
7396 raise errors.OpExecError("Disk %s is degraded or not fully"
7397 " synchronized on target node,"
7398 " aborting migration" % dev.iv_name)
7400 # First get the migration information from the remote node
7401 result = self.rpc.call_migration_info(source_node, instance)
7402 msg = result.fail_msg
7404 log_err = ("Failed fetching source migration information from %s: %s" %
7406 logging.error(log_err)
7407 raise errors.OpExecError(log_err)
7409 self.migration_info = migration_info = result.payload
7411 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
7412 # Then switch the disks to master/master mode
7413 self._EnsureSecondary(target_node)
7414 self._GoStandalone()
7415 self._GoReconnect(True)
7416 self._WaitUntilSync()
7418 self.feedback_fn("* preparing %s to accept the instance" % target_node)
7419 result = self.rpc.call_accept_instance(target_node,
7422 self.nodes_ip[target_node])
7424 msg = result.fail_msg
7426 logging.error("Instance pre-migration failed, trying to revert"
7427 " disk status: %s", msg)
7428 self.feedback_fn("Pre-migration failed, aborting")
7429 self._AbortMigration()
7430 self._RevertDiskStatus()
7431 raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
7432 (instance.name, msg))
7434 self.feedback_fn("* migrating instance to %s" % target_node)
7435 result = self.rpc.call_instance_migrate(source_node, instance,
7436 self.nodes_ip[target_node],
7438 msg = result.fail_msg
7440 logging.error("Instance migration failed, trying to revert"
7441 " disk status: %s", msg)
7442 self.feedback_fn("Migration failed, aborting")
7443 self._AbortMigration()
7444 self._RevertDiskStatus()
7445 raise errors.OpExecError("Could not migrate instance %s: %s" %
7446 (instance.name, msg))
7448 self.feedback_fn("* starting memory transfer")
7449 last_feedback = time.time()
7451 result = self.rpc.call_instance_get_migration_status(source_node,
7453 msg = result.fail_msg
7454 ms = result.payload # MigrationStatus instance
7455 if msg or (ms.status in constants.HV_MIGRATION_FAILED_STATUSES):
7456 logging.error("Instance migration failed, trying to revert"
7457 " disk status: %s", msg)
7458 self.feedback_fn("Migration failed, aborting")
7459 self._AbortMigration()
7460 self._RevertDiskStatus()
7461 raise errors.OpExecError("Could not migrate instance %s: %s" %
7462 (instance.name, msg))
7464 if result.payload.status != constants.HV_MIGRATION_ACTIVE:
7465 self.feedback_fn("* memory transfer complete")
7468 if (utils.TimeoutExpired(last_feedback,
7469 self._MIGRATION_FEEDBACK_INTERVAL) and
7470 ms.transferred_ram is not None):
7471 mem_progress = 100 * float(ms.transferred_ram) / float(ms.total_ram)
7472 self.feedback_fn("* memory transfer progress: %.2f %%" % mem_progress)
7473 last_feedback = time.time()
7475 time.sleep(self._MIGRATION_POLL_INTERVAL)
7477 result = self.rpc.call_instance_finalize_migration_src(source_node,
7481 msg = result.fail_msg
7483 logging.error("Instance migration succeeded, but finalization failed"
7484 " on the source node: %s", msg)
7485 raise errors.OpExecError("Could not finalize instance migration: %s" %
7488 instance.primary_node = target_node
7490 # distribute new instance config to the other nodes
7491 self.cfg.Update(instance, self.feedback_fn)
7493 result = self.rpc.call_instance_finalize_migration_dst(target_node,
7497 msg = result.fail_msg
7499 logging.error("Instance migration succeeded, but finalization failed"
7500 " on the target node: %s", msg)
7501 raise errors.OpExecError("Could not finalize instance migration: %s" %
7504 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
7505 self._EnsureSecondary(source_node)
7506 self._WaitUntilSync()
7507 self._GoStandalone()
7508 self._GoReconnect(False)
7509 self._WaitUntilSync()
7511 self.feedback_fn("* done")
7513 def _ExecFailover(self):
7514 """Failover an instance.
7516 The failover is done by shutting it down on its present node and
7517 starting it on the secondary.
7520 instance = self.instance
7521 primary_node = self.cfg.GetNodeInfo(instance.primary_node)
7523 source_node = instance.primary_node
7524 target_node = self.target_node
7526 if instance.admin_up:
7527 self.feedback_fn("* checking disk consistency between source and target")
7528 for dev in instance.disks:
7529 # for drbd, these are drbd over lvm
7530 if not _CheckDiskConsistency(self.lu, dev, target_node, False):
7531 if primary_node.offline:
7532 self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
7534 (primary_node.name, dev.iv_name, target_node))
7535 elif not self.ignore_consistency:
7536 raise errors.OpExecError("Disk %s is degraded on target node,"
7537 " aborting failover" % dev.iv_name)
7539 self.feedback_fn("* not checking disk consistency as instance is not"
7542 self.feedback_fn("* shutting down instance on source node")
7543 logging.info("Shutting down instance %s on node %s",
7544 instance.name, source_node)
7546 result = self.rpc.call_instance_shutdown(source_node, instance,
7547 self.shutdown_timeout)
7548 msg = result.fail_msg
7550 if self.ignore_consistency or primary_node.offline:
7551 self.lu.LogWarning("Could not shutdown instance %s on node %s,"
7552 " proceeding anyway; please make sure node"
7553 " %s is down; error details: %s",
7554 instance.name, source_node, source_node, msg)
7556 raise errors.OpExecError("Could not shutdown instance %s on"
7558 (instance.name, source_node, msg))
7560 self.feedback_fn("* deactivating the instance's disks on source node")
7561 if not _ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
7562 raise errors.OpExecError("Can't shut down the instance's disks")
7564 instance.primary_node = target_node
7565 # distribute new instance config to the other nodes
7566 self.cfg.Update(instance, self.feedback_fn)
7568 # Only start the instance if it's marked as up
7569 if instance.admin_up:
7570 self.feedback_fn("* activating the instance's disks on target node %s" %
7572 logging.info("Starting instance %s on node %s",
7573 instance.name, target_node)
7575 disks_ok, _ = _AssembleInstanceDisks(self.lu, instance,
7576 ignore_secondaries=True)
7578 _ShutdownInstanceDisks(self.lu, instance)
7579 raise errors.OpExecError("Can't activate the instance's disks")
7581 self.feedback_fn("* starting the instance on the target node %s" %
7583 result = self.rpc.call_instance_start(target_node, instance, None, None,
7585 msg = result.fail_msg
7587 _ShutdownInstanceDisks(self.lu, instance)
7588 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
7589 (instance.name, target_node, msg))
7591 def Exec(self, feedback_fn):
7592 """Perform the migration.
7595 self.feedback_fn = feedback_fn
7596 self.source_node = self.instance.primary_node
7598 # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
7599 if self.instance.disk_template in constants.DTS_INT_MIRROR:
7600 self.target_node = self.instance.secondary_nodes[0]
7601 # Otherwise self.target_node has been populated either
7602 # directly, or through an iallocator.
7604 self.all_nodes = [self.source_node, self.target_node]
7605 self.nodes_ip = dict((name, node.secondary_ip) for (name, node)
7606 in self.cfg.GetMultiNodeInfo(self.all_nodes))
7609 feedback_fn("Failover instance %s" % self.instance.name)
7610 self._ExecFailover()
7612 feedback_fn("Migrating instance %s" % self.instance.name)
7615 return self._ExecCleanup()
7617 return self._ExecMigration()
7620 def _CreateBlockDev(lu, node, instance, device, force_create,
7622 """Create a tree of block devices on a given node.
7624 If this device type has to be created on secondaries, create it and
7627 If not, just recurse to children keeping the same 'force' value.
7629 @param lu: the lu on whose behalf we execute
7630 @param node: the node on which to create the device
7631 @type instance: L{objects.Instance}
7632 @param instance: the instance which owns the device
7633 @type device: L{objects.Disk}
7634 @param device: the device to create
7635 @type force_create: boolean
7636 @param force_create: whether to force creation of this device; this
7637 will be change to True whenever we find a device which has
7638 CreateOnSecondary() attribute
7639 @param info: the extra 'metadata' we should attach to the device
7640 (this will be represented as a LVM tag)
7641 @type force_open: boolean
7642 @param force_open: this parameter will be passes to the
7643 L{backend.BlockdevCreate} function where it specifies
7644 whether we run on primary or not, and it affects both
7645 the child assembly and the device own Open() execution
7648 if device.CreateOnSecondary():
7652 for child in device.children:
7653 _CreateBlockDev(lu, node, instance, child, force_create,
7656 if not force_create:
7659 _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
7662 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
7663 """Create a single block device on a given node.
7665 This will not recurse over children of the device, so they must be
7668 @param lu: the lu on whose behalf we execute
7669 @param node: the node on which to create the device
7670 @type instance: L{objects.Instance}
7671 @param instance: the instance which owns the device
7672 @type device: L{objects.Disk}
7673 @param device: the device to create
7674 @param info: the extra 'metadata' we should attach to the device
7675 (this will be represented as a LVM tag)
7676 @type force_open: boolean
7677 @param force_open: this parameter will be passes to the
7678 L{backend.BlockdevCreate} function where it specifies
7679 whether we run on primary or not, and it affects both
7680 the child assembly and the device own Open() execution
7683 lu.cfg.SetDiskID(device, node)
7684 result = lu.rpc.call_blockdev_create(node, device, device.size,
7685 instance.name, force_open, info)
7686 result.Raise("Can't create block device %s on"
7687 " node %s for instance %s" % (device, node, instance.name))
7688 if device.physical_id is None:
7689 device.physical_id = result.payload
7692 def _GenerateUniqueNames(lu, exts):
7693 """Generate a suitable LV name.
7695 This will generate a logical volume name for the given instance.
7700 new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
7701 results.append("%s%s" % (new_id, val))
7705 def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
7706 iv_name, p_minor, s_minor):
7707 """Generate a drbd8 device complete with its children.
7710 assert len(vgnames) == len(names) == 2
7711 port = lu.cfg.AllocatePort()
7712 shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
7713 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
7714 logical_id=(vgnames[0], names[0]))
7715 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
7716 logical_id=(vgnames[1], names[1]))
7717 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
7718 logical_id=(primary, secondary, port,
7721 children=[dev_data, dev_meta],
7726 def _GenerateDiskTemplate(lu, template_name,
7727 instance_name, primary_node,
7728 secondary_nodes, disk_info,
7729 file_storage_dir, file_driver,
7730 base_index, feedback_fn):
7731 """Generate the entire disk layout for a given template type.
7734 #TODO: compute space requirements
7736 vgname = lu.cfg.GetVGName()
7737 disk_count = len(disk_info)
7739 if template_name == constants.DT_DISKLESS:
7741 elif template_name == constants.DT_PLAIN:
7742 if len(secondary_nodes) != 0:
7743 raise errors.ProgrammerError("Wrong template configuration")
7745 names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
7746 for i in range(disk_count)])
7747 for idx, disk in enumerate(disk_info):
7748 disk_index = idx + base_index
7749 vg = disk.get(constants.IDISK_VG, vgname)
7750 feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
7751 disk_dev = objects.Disk(dev_type=constants.LD_LV,
7752 size=disk[constants.IDISK_SIZE],
7753 logical_id=(vg, names[idx]),
7754 iv_name="disk/%d" % disk_index,
7755 mode=disk[constants.IDISK_MODE])
7756 disks.append(disk_dev)
7757 elif template_name == constants.DT_DRBD8:
7758 if len(secondary_nodes) != 1:
7759 raise errors.ProgrammerError("Wrong template configuration")
7760 remote_node = secondary_nodes[0]
7761 minors = lu.cfg.AllocateDRBDMinor(
7762 [primary_node, remote_node] * len(disk_info), instance_name)
7765 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
7766 for i in range(disk_count)]):
7767 names.append(lv_prefix + "_data")
7768 names.append(lv_prefix + "_meta")
7769 for idx, disk in enumerate(disk_info):
7770 disk_index = idx + base_index
7771 data_vg = disk.get(constants.IDISK_VG, vgname)
7772 meta_vg = disk.get(constants.IDISK_METAVG, data_vg)
7773 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
7774 disk[constants.IDISK_SIZE],
7776 names[idx * 2:idx * 2 + 2],
7777 "disk/%d" % disk_index,
7778 minors[idx * 2], minors[idx * 2 + 1])
7779 disk_dev.mode = disk[constants.IDISK_MODE]
7780 disks.append(disk_dev)
7781 elif template_name == constants.DT_FILE:
7782 if len(secondary_nodes) != 0:
7783 raise errors.ProgrammerError("Wrong template configuration")
7785 opcodes.RequireFileStorage()
7787 for idx, disk in enumerate(disk_info):
7788 disk_index = idx + base_index
7789 disk_dev = objects.Disk(dev_type=constants.LD_FILE,
7790 size=disk[constants.IDISK_SIZE],
7791 iv_name="disk/%d" % disk_index,
7792 logical_id=(file_driver,
7793 "%s/disk%d" % (file_storage_dir,
7795 mode=disk[constants.IDISK_MODE])
7796 disks.append(disk_dev)
7797 elif template_name == constants.DT_SHARED_FILE:
7798 if len(secondary_nodes) != 0:
7799 raise errors.ProgrammerError("Wrong template configuration")
7801 opcodes.RequireSharedFileStorage()
7803 for idx, disk in enumerate(disk_info):
7804 disk_index = idx + base_index
7805 disk_dev = objects.Disk(dev_type=constants.LD_FILE,
7806 size=disk[constants.IDISK_SIZE],
7807 iv_name="disk/%d" % disk_index,
7808 logical_id=(file_driver,
7809 "%s/disk%d" % (file_storage_dir,
7811 mode=disk[constants.IDISK_MODE])
7812 disks.append(disk_dev)
7813 elif template_name == constants.DT_BLOCK:
7814 if len(secondary_nodes) != 0:
7815 raise errors.ProgrammerError("Wrong template configuration")
7817 for idx, disk in enumerate(disk_info):
7818 disk_index = idx + base_index
7819 disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV,
7820 size=disk[constants.IDISK_SIZE],
7821 logical_id=(constants.BLOCKDEV_DRIVER_MANUAL,
7822 disk[constants.IDISK_ADOPT]),
7823 iv_name="disk/%d" % disk_index,
7824 mode=disk[constants.IDISK_MODE])
7825 disks.append(disk_dev)
7828 raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
7832 def _GetInstanceInfoText(instance):
7833 """Compute that text that should be added to the disk's metadata.
7836 return "originstname+%s" % instance.name
7839 def _CalcEta(time_taken, written, total_size):
7840 """Calculates the ETA based on size written and total size.
7842 @param time_taken: The time taken so far
7843 @param written: amount written so far
7844 @param total_size: The total size of data to be written
7845 @return: The remaining time in seconds
7848 avg_time = time_taken / float(written)
7849 return (total_size - written) * avg_time
7852 def _WipeDisks(lu, instance):
7853 """Wipes instance disks.
7855 @type lu: L{LogicalUnit}
7856 @param lu: the logical unit on whose behalf we execute
7857 @type instance: L{objects.Instance}
7858 @param instance: the instance whose disks we should create
7859 @return: the success of the wipe
7862 node = instance.primary_node
7864 for device in instance.disks:
7865 lu.cfg.SetDiskID(device, node)
7867 logging.info("Pause sync of instance %s disks", instance.name)
7868 result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
7870 for idx, success in enumerate(result.payload):
7872 logging.warn("pause-sync of instance %s for disks %d failed",
7876 for idx, device in enumerate(instance.disks):
7877 # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
7878 # MAX_WIPE_CHUNK at max
7879 wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
7880 constants.MIN_WIPE_CHUNK_PERCENT)
7881 # we _must_ make this an int, otherwise rounding errors will
7883 wipe_chunk_size = int(wipe_chunk_size)
7885 lu.LogInfo("* Wiping disk %d", idx)
7886 logging.info("Wiping disk %d for instance %s, node %s using"
7887 " chunk size %s", idx, instance.name, node, wipe_chunk_size)
7892 start_time = time.time()
7894 while offset < size:
7895 wipe_size = min(wipe_chunk_size, size - offset)
7896 logging.debug("Wiping disk %d, offset %s, chunk %s",
7897 idx, offset, wipe_size)
7898 result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
7899 result.Raise("Could not wipe disk %d at offset %d for size %d" %
7900 (idx, offset, wipe_size))
7903 if now - last_output >= 60:
7904 eta = _CalcEta(now - start_time, offset, size)
7905 lu.LogInfo(" - done: %.1f%% ETA: %s" %
7906 (offset / float(size) * 100, utils.FormatSeconds(eta)))
7909 logging.info("Resume sync of instance %s disks", instance.name)
7911 result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
7913 for idx, success in enumerate(result.payload):
7915 lu.LogWarning("Resume sync of disk %d failed, please have a"
7916 " look at the status and troubleshoot the issue", idx)
7917 logging.warn("resume-sync of instance %s for disks %d failed",
7921 def _CreateDisks(lu, instance, to_skip=None, target_node=None):
7922 """Create all disks for an instance.
7924 This abstracts away some work from AddInstance.
7926 @type lu: L{LogicalUnit}
7927 @param lu: the logical unit on whose behalf we execute
7928 @type instance: L{objects.Instance}
7929 @param instance: the instance whose disks we should create
7931 @param to_skip: list of indices to skip
7932 @type target_node: string
7933 @param target_node: if passed, overrides the target node for creation
7935 @return: the success of the creation
7938 info = _GetInstanceInfoText(instance)
7939 if target_node is None:
7940 pnode = instance.primary_node
7941 all_nodes = instance.all_nodes
7946 if instance.disk_template in constants.DTS_FILEBASED:
7947 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
7948 result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
7950 result.Raise("Failed to create directory '%s' on"
7951 " node %s" % (file_storage_dir, pnode))
7953 # Note: this needs to be kept in sync with adding of disks in
7954 # LUInstanceSetParams
7955 for idx, device in enumerate(instance.disks):
7956 if to_skip and idx in to_skip:
7958 logging.info("Creating volume %s for instance %s",
7959 device.iv_name, instance.name)
7961 for node in all_nodes:
7962 f_create = node == pnode
7963 _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
7966 def _RemoveDisks(lu, instance, target_node=None):
7967 """Remove all disks for an instance.
7969 This abstracts away some work from `AddInstance()` and
7970 `RemoveInstance()`. Note that in case some of the devices couldn't
7971 be removed, the removal will continue with the other ones (compare
7972 with `_CreateDisks()`).
7974 @type lu: L{LogicalUnit}
7975 @param lu: the logical unit on whose behalf we execute
7976 @type instance: L{objects.Instance}
7977 @param instance: the instance whose disks we should remove
7978 @type target_node: string
7979 @param target_node: used to override the node on which to remove the disks
7981 @return: the success of the removal
7984 logging.info("Removing block devices for instance %s", instance.name)
7987 for device in instance.disks:
7989 edata = [(target_node, device)]
7991 edata = device.ComputeNodeTree(instance.primary_node)
7992 for node, disk in edata:
7993 lu.cfg.SetDiskID(disk, node)
7994 msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
7996 lu.LogWarning("Could not remove block device %s on node %s,"
7997 " continuing anyway: %s", device.iv_name, node, msg)
8000 if instance.disk_template == constants.DT_FILE:
8001 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
8005 tgt = instance.primary_node
8006 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
8008 lu.LogWarning("Could not remove directory '%s' on node %s: %s",
8009 file_storage_dir, instance.primary_node, result.fail_msg)
8015 def _ComputeDiskSizePerVG(disk_template, disks):
8016 """Compute disk size requirements in the volume group
8019 def _compute(disks, payload):
8020 """Universal algorithm.
8025 vgs[disk[constants.IDISK_VG]] = \
8026 vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
8030 # Required free disk space as a function of disk and swap space
8032 constants.DT_DISKLESS: {},
8033 constants.DT_PLAIN: _compute(disks, 0),
8034 # 128 MB are added for drbd metadata for each disk
8035 constants.DT_DRBD8: _compute(disks, 128),
8036 constants.DT_FILE: {},
8037 constants.DT_SHARED_FILE: {},
8040 if disk_template not in req_size_dict:
8041 raise errors.ProgrammerError("Disk template '%s' size requirement"
8042 " is unknown" % disk_template)
8044 return req_size_dict[disk_template]
8047 def _ComputeDiskSize(disk_template, disks):
8048 """Compute disk size requirements in the volume group
8051 # Required free disk space as a function of disk and swap space
8053 constants.DT_DISKLESS: None,
8054 constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks),
8055 # 128 MB are added for drbd metadata for each disk
8056 constants.DT_DRBD8: sum(d[constants.IDISK_SIZE] + 128 for d in disks),
8057 constants.DT_FILE: None,
8058 constants.DT_SHARED_FILE: 0,
8059 constants.DT_BLOCK: 0,
8062 if disk_template not in req_size_dict:
8063 raise errors.ProgrammerError("Disk template '%s' size requirement"
8064 " is unknown" % disk_template)
8066 return req_size_dict[disk_template]
8069 def _FilterVmNodes(lu, nodenames):
8070 """Filters out non-vm_capable nodes from a list.
8072 @type lu: L{LogicalUnit}
8073 @param lu: the logical unit for which we check
8074 @type nodenames: list
8075 @param nodenames: the list of nodes on which we should check
8077 @return: the list of vm-capable nodes
8080 vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
8081 return [name for name in nodenames if name not in vm_nodes]
8084 def _CheckHVParams(lu, nodenames, hvname, hvparams):
8085 """Hypervisor parameter validation.
8087 This function abstract the hypervisor parameter validation to be
8088 used in both instance create and instance modify.
8090 @type lu: L{LogicalUnit}
8091 @param lu: the logical unit for which we check
8092 @type nodenames: list
8093 @param nodenames: the list of nodes on which we should check
8094 @type hvname: string
8095 @param hvname: the name of the hypervisor we should use
8096 @type hvparams: dict
8097 @param hvparams: the parameters which we need to check
8098 @raise errors.OpPrereqError: if the parameters are not valid
8101 nodenames = _FilterVmNodes(lu, nodenames)
8102 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
8105 for node in nodenames:
8109 info.Raise("Hypervisor parameter validation failed on node %s" % node)
8112 def _CheckOSParams(lu, required, nodenames, osname, osparams):
8113 """OS parameters validation.
8115 @type lu: L{LogicalUnit}
8116 @param lu: the logical unit for which we check
8117 @type required: boolean
8118 @param required: whether the validation should fail if the OS is not
8120 @type nodenames: list
8121 @param nodenames: the list of nodes on which we should check
8122 @type osname: string
8123 @param osname: the name of the hypervisor we should use
8124 @type osparams: dict
8125 @param osparams: the parameters which we need to check
8126 @raise errors.OpPrereqError: if the parameters are not valid
8129 nodenames = _FilterVmNodes(lu, nodenames)
8130 result = lu.rpc.call_os_validate(required, nodenames, osname,
8131 [constants.OS_VALIDATE_PARAMETERS],
8133 for node, nres in result.items():
8134 # we don't check for offline cases since this should be run only
8135 # against the master node and/or an instance's nodes
8136 nres.Raise("OS Parameters validation failed on node %s" % node)
8137 if not nres.payload:
8138 lu.LogInfo("OS %s not found on node %s, validation skipped",
8142 class LUInstanceCreate(LogicalUnit):
8143 """Create an instance.
8146 HPATH = "instance-add"
8147 HTYPE = constants.HTYPE_INSTANCE
8150 def CheckArguments(self):
8154 # do not require name_check to ease forward/backward compatibility
8156 if self.op.no_install and self.op.start:
8157 self.LogInfo("No-installation mode selected, disabling startup")
8158 self.op.start = False
8159 # validate/normalize the instance name
8160 self.op.instance_name = \
8161 netutils.Hostname.GetNormalizedName(self.op.instance_name)
8163 if self.op.ip_check and not self.op.name_check:
8164 # TODO: make the ip check more flexible and not depend on the name check
8165 raise errors.OpPrereqError("Cannot do IP address check without a name"
8166 " check", errors.ECODE_INVAL)
8168 # check nics' parameter names
8169 for nic in self.op.nics:
8170 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
8172 # check disks. parameter names and consistent adopt/no-adopt strategy
8173 has_adopt = has_no_adopt = False
8174 for disk in self.op.disks:
8175 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
8176 if constants.IDISK_ADOPT in disk:
8180 if has_adopt and has_no_adopt:
8181 raise errors.OpPrereqError("Either all disks are adopted or none is",
8184 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
8185 raise errors.OpPrereqError("Disk adoption is not supported for the"
8186 " '%s' disk template" %
8187 self.op.disk_template,
8189 if self.op.iallocator is not None:
8190 raise errors.OpPrereqError("Disk adoption not allowed with an"
8191 " iallocator script", errors.ECODE_INVAL)
8192 if self.op.mode == constants.INSTANCE_IMPORT:
8193 raise errors.OpPrereqError("Disk adoption not allowed for"
8194 " instance import", errors.ECODE_INVAL)
8196 if self.op.disk_template in constants.DTS_MUST_ADOPT:
8197 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
8198 " but no 'adopt' parameter given" %
8199 self.op.disk_template,
8202 self.adopt_disks = has_adopt
8204 # instance name verification
8205 if self.op.name_check:
8206 self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
8207 self.op.instance_name = self.hostname1.name
8208 # used in CheckPrereq for ip ping check
8209 self.check_ip = self.hostname1.ip
8211 self.check_ip = None
8213 # file storage checks
8214 if (self.op.file_driver and
8215 not self.op.file_driver in constants.FILE_DRIVER):
8216 raise errors.OpPrereqError("Invalid file driver name '%s'" %
8217 self.op.file_driver, errors.ECODE_INVAL)
8219 if self.op.disk_template == constants.DT_FILE:
8220 opcodes.RequireFileStorage()
8221 elif self.op.disk_template == constants.DT_SHARED_FILE:
8222 opcodes.RequireSharedFileStorage()
8224 ### Node/iallocator related checks
8225 _CheckIAllocatorOrNode(self, "iallocator", "pnode")
8227 if self.op.pnode is not None:
8228 if self.op.disk_template in constants.DTS_INT_MIRROR:
8229 if self.op.snode is None:
8230 raise errors.OpPrereqError("The networked disk templates need"
8231 " a mirror node", errors.ECODE_INVAL)
8233 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
8235 self.op.snode = None
8237 self._cds = _GetClusterDomainSecret()
8239 if self.op.mode == constants.INSTANCE_IMPORT:
8240 # On import force_variant must be True, because if we forced it at
8241 # initial install, our only chance when importing it back is that it
8243 self.op.force_variant = True
8245 if self.op.no_install:
8246 self.LogInfo("No-installation mode has no effect during import")
8248 elif self.op.mode == constants.INSTANCE_CREATE:
8249 if self.op.os_type is None:
8250 raise errors.OpPrereqError("No guest OS specified",
8252 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
8253 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
8254 " installation" % self.op.os_type,
8256 if self.op.disk_template is None:
8257 raise errors.OpPrereqError("No disk template specified",
8260 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
8261 # Check handshake to ensure both clusters have the same domain secret
8262 src_handshake = self.op.source_handshake
8263 if not src_handshake:
8264 raise errors.OpPrereqError("Missing source handshake",
8267 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
8270 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
8273 # Load and check source CA
8274 self.source_x509_ca_pem = self.op.source_x509_ca
8275 if not self.source_x509_ca_pem:
8276 raise errors.OpPrereqError("Missing source X509 CA",
8280 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
8282 except OpenSSL.crypto.Error, err:
8283 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
8284 (err, ), errors.ECODE_INVAL)
8286 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
8287 if errcode is not None:
8288 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
8291 self.source_x509_ca = cert
8293 src_instance_name = self.op.source_instance_name
8294 if not src_instance_name:
8295 raise errors.OpPrereqError("Missing source instance name",
8298 self.source_instance_name = \
8299 netutils.GetHostname(name=src_instance_name).name
8302 raise errors.OpPrereqError("Invalid instance creation mode %r" %
8303 self.op.mode, errors.ECODE_INVAL)
8305 def ExpandNames(self):
8306 """ExpandNames for CreateInstance.
8308 Figure out the right locks for instance creation.
8311 self.needed_locks = {}
8313 instance_name = self.op.instance_name
8314 # this is just a preventive check, but someone might still add this
8315 # instance in the meantime, and creation will fail at lock-add time
8316 if instance_name in self.cfg.GetInstanceList():
8317 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
8318 instance_name, errors.ECODE_EXISTS)
8320 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
8322 if self.op.iallocator:
8323 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8325 self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
8326 nodelist = [self.op.pnode]
8327 if self.op.snode is not None:
8328 self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
8329 nodelist.append(self.op.snode)
8330 self.needed_locks[locking.LEVEL_NODE] = nodelist
8332 # in case of import lock the source node too
8333 if self.op.mode == constants.INSTANCE_IMPORT:
8334 src_node = self.op.src_node
8335 src_path = self.op.src_path
8337 if src_path is None:
8338 self.op.src_path = src_path = self.op.instance_name
8340 if src_node is None:
8341 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8342 self.op.src_node = None
8343 if os.path.isabs(src_path):
8344 raise errors.OpPrereqError("Importing an instance from a path"
8345 " requires a source node option",
8348 self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
8349 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
8350 self.needed_locks[locking.LEVEL_NODE].append(src_node)
8351 if not os.path.isabs(src_path):
8352 self.op.src_path = src_path = \
8353 utils.PathJoin(constants.EXPORT_DIR, src_path)
8355 def _RunAllocator(self):
8356 """Run the allocator based on input opcode.
8359 nics = [n.ToDict() for n in self.nics]
8360 ial = IAllocator(self.cfg, self.rpc,
8361 mode=constants.IALLOCATOR_MODE_ALLOC,
8362 name=self.op.instance_name,
8363 disk_template=self.op.disk_template,
8366 vcpus=self.be_full[constants.BE_VCPUS],
8367 memory=self.be_full[constants.BE_MEMORY],
8370 hypervisor=self.op.hypervisor,
8373 ial.Run(self.op.iallocator)
8376 raise errors.OpPrereqError("Can't compute nodes using"
8377 " iallocator '%s': %s" %
8378 (self.op.iallocator, ial.info),
8380 if len(ial.result) != ial.required_nodes:
8381 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8382 " of nodes (%s), required %s" %
8383 (self.op.iallocator, len(ial.result),
8384 ial.required_nodes), errors.ECODE_FAULT)
8385 self.op.pnode = ial.result[0]
8386 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
8387 self.op.instance_name, self.op.iallocator,
8388 utils.CommaJoin(ial.result))
8389 if ial.required_nodes == 2:
8390 self.op.snode = ial.result[1]
8392 def BuildHooksEnv(self):
8395 This runs on master, primary and secondary nodes of the instance.
8399 "ADD_MODE": self.op.mode,
8401 if self.op.mode == constants.INSTANCE_IMPORT:
8402 env["SRC_NODE"] = self.op.src_node
8403 env["SRC_PATH"] = self.op.src_path
8404 env["SRC_IMAGES"] = self.src_images
8406 env.update(_BuildInstanceHookEnv(
8407 name=self.op.instance_name,
8408 primary_node=self.op.pnode,
8409 secondary_nodes=self.secondaries,
8410 status=self.op.start,
8411 os_type=self.op.os_type,
8412 memory=self.be_full[constants.BE_MEMORY],
8413 vcpus=self.be_full[constants.BE_VCPUS],
8414 nics=_NICListToTuple(self, self.nics),
8415 disk_template=self.op.disk_template,
8416 disks=[(d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
8417 for d in self.disks],
8420 hypervisor_name=self.op.hypervisor,
8426 def BuildHooksNodes(self):
8427 """Build hooks nodes.
8430 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
8433 def _ReadExportInfo(self):
8434 """Reads the export information from disk.
8436 It will override the opcode source node and path with the actual
8437 information, if these two were not specified before.
8439 @return: the export information
8442 assert self.op.mode == constants.INSTANCE_IMPORT
8444 src_node = self.op.src_node
8445 src_path = self.op.src_path
8447 if src_node is None:
8448 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
8449 exp_list = self.rpc.call_export_list(locked_nodes)
8451 for node in exp_list:
8452 if exp_list[node].fail_msg:
8454 if src_path in exp_list[node].payload:
8456 self.op.src_node = src_node = node
8457 self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
8461 raise errors.OpPrereqError("No export found for relative path %s" %
8462 src_path, errors.ECODE_INVAL)
8464 _CheckNodeOnline(self, src_node)
8465 result = self.rpc.call_export_info(src_node, src_path)
8466 result.Raise("No export or invalid export found in dir %s" % src_path)
8468 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
8469 if not export_info.has_section(constants.INISECT_EXP):
8470 raise errors.ProgrammerError("Corrupted export config",
8471 errors.ECODE_ENVIRON)
8473 ei_version = export_info.get(constants.INISECT_EXP, "version")
8474 if (int(ei_version) != constants.EXPORT_VERSION):
8475 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
8476 (ei_version, constants.EXPORT_VERSION),
8477 errors.ECODE_ENVIRON)
8480 def _ReadExportParams(self, einfo):
8481 """Use export parameters as defaults.
8483 In case the opcode doesn't specify (as in override) some instance
8484 parameters, then try to use them from the export information, if
8488 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
8490 if self.op.disk_template is None:
8491 if einfo.has_option(constants.INISECT_INS, "disk_template"):
8492 self.op.disk_template = einfo.get(constants.INISECT_INS,
8494 if self.op.disk_template not in constants.DISK_TEMPLATES:
8495 raise errors.OpPrereqError("Disk template specified in configuration"
8496 " file is not one of the allowed values:"
8497 " %s" % " ".join(constants.DISK_TEMPLATES))
8499 raise errors.OpPrereqError("No disk template specified and the export"
8500 " is missing the disk_template information",
8503 if not self.op.disks:
8505 # TODO: import the disk iv_name too
8506 for idx in range(constants.MAX_DISKS):
8507 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
8508 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
8509 disks.append({constants.IDISK_SIZE: disk_sz})
8510 self.op.disks = disks
8511 if not disks and self.op.disk_template != constants.DT_DISKLESS:
8512 raise errors.OpPrereqError("No disk info specified and the export"
8513 " is missing the disk information",
8516 if not self.op.nics:
8518 for idx in range(constants.MAX_NICS):
8519 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
8521 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
8522 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
8529 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
8530 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
8532 if (self.op.hypervisor is None and
8533 einfo.has_option(constants.INISECT_INS, "hypervisor")):
8534 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
8536 if einfo.has_section(constants.INISECT_HYP):
8537 # use the export parameters but do not override the ones
8538 # specified by the user
8539 for name, value in einfo.items(constants.INISECT_HYP):
8540 if name not in self.op.hvparams:
8541 self.op.hvparams[name] = value
8543 if einfo.has_section(constants.INISECT_BEP):
8544 # use the parameters, without overriding
8545 for name, value in einfo.items(constants.INISECT_BEP):
8546 if name not in self.op.beparams:
8547 self.op.beparams[name] = value
8549 # try to read the parameters old style, from the main section
8550 for name in constants.BES_PARAMETERS:
8551 if (name not in self.op.beparams and
8552 einfo.has_option(constants.INISECT_INS, name)):
8553 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
8555 if einfo.has_section(constants.INISECT_OSP):
8556 # use the parameters, without overriding
8557 for name, value in einfo.items(constants.INISECT_OSP):
8558 if name not in self.op.osparams:
8559 self.op.osparams[name] = value
8561 def _RevertToDefaults(self, cluster):
8562 """Revert the instance parameters to the default values.
8566 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
8567 for name in self.op.hvparams.keys():
8568 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
8569 del self.op.hvparams[name]
8571 be_defs = cluster.SimpleFillBE({})
8572 for name in self.op.beparams.keys():
8573 if name in be_defs and be_defs[name] == self.op.beparams[name]:
8574 del self.op.beparams[name]
8576 nic_defs = cluster.SimpleFillNIC({})
8577 for nic in self.op.nics:
8578 for name in constants.NICS_PARAMETERS:
8579 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
8582 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
8583 for name in self.op.osparams.keys():
8584 if name in os_defs and os_defs[name] == self.op.osparams[name]:
8585 del self.op.osparams[name]
8587 def _CalculateFileStorageDir(self):
8588 """Calculate final instance file storage dir.
8591 # file storage dir calculation/check
8592 self.instance_file_storage_dir = None
8593 if self.op.disk_template in constants.DTS_FILEBASED:
8594 # build the full file storage dir path
8597 if self.op.disk_template == constants.DT_SHARED_FILE:
8598 get_fsd_fn = self.cfg.GetSharedFileStorageDir
8600 get_fsd_fn = self.cfg.GetFileStorageDir
8602 cfg_storagedir = get_fsd_fn()
8603 if not cfg_storagedir:
8604 raise errors.OpPrereqError("Cluster file storage dir not defined")
8605 joinargs.append(cfg_storagedir)
8607 if self.op.file_storage_dir is not None:
8608 joinargs.append(self.op.file_storage_dir)
8610 joinargs.append(self.op.instance_name)
8612 # pylint: disable=W0142
8613 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
8615 def CheckPrereq(self):
8616 """Check prerequisites.
8619 self._CalculateFileStorageDir()
8621 if self.op.mode == constants.INSTANCE_IMPORT:
8622 export_info = self._ReadExportInfo()
8623 self._ReadExportParams(export_info)
8625 if (not self.cfg.GetVGName() and
8626 self.op.disk_template not in constants.DTS_NOT_LVM):
8627 raise errors.OpPrereqError("Cluster does not support lvm-based"
8628 " instances", errors.ECODE_STATE)
8630 if (self.op.hypervisor is None or
8631 self.op.hypervisor == constants.VALUE_AUTO):
8632 self.op.hypervisor = self.cfg.GetHypervisorType()
8634 cluster = self.cfg.GetClusterInfo()
8635 enabled_hvs = cluster.enabled_hypervisors
8636 if self.op.hypervisor not in enabled_hvs:
8637 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
8638 " cluster (%s)" % (self.op.hypervisor,
8639 ",".join(enabled_hvs)),
8642 # Check tag validity
8643 for tag in self.op.tags:
8644 objects.TaggableObject.ValidateTag(tag)
8646 # check hypervisor parameter syntax (locally)
8647 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
8648 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
8650 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
8651 hv_type.CheckParameterSyntax(filled_hvp)
8652 self.hv_full = filled_hvp
8653 # check that we don't specify global parameters on an instance
8654 _CheckGlobalHvParams(self.op.hvparams)
8656 # fill and remember the beparams dict
8657 default_beparams = cluster.beparams[constants.PP_DEFAULT]
8658 for param, value in self.op.beparams.iteritems():
8659 if value == constants.VALUE_AUTO:
8660 self.op.beparams[param] = default_beparams[param]
8661 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
8662 self.be_full = cluster.SimpleFillBE(self.op.beparams)
8664 # build os parameters
8665 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
8667 # now that hvp/bep are in final format, let's reset to defaults,
8669 if self.op.identify_defaults:
8670 self._RevertToDefaults(cluster)
8674 for idx, nic in enumerate(self.op.nics):
8675 nic_mode_req = nic.get(constants.INIC_MODE, None)
8676 nic_mode = nic_mode_req
8677 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
8678 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
8680 # in routed mode, for the first nic, the default ip is 'auto'
8681 if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
8682 default_ip_mode = constants.VALUE_AUTO
8684 default_ip_mode = constants.VALUE_NONE
8686 # ip validity checks
8687 ip = nic.get(constants.INIC_IP, default_ip_mode)
8688 if ip is None or ip.lower() == constants.VALUE_NONE:
8690 elif ip.lower() == constants.VALUE_AUTO:
8691 if not self.op.name_check:
8692 raise errors.OpPrereqError("IP address set to auto but name checks"
8693 " have been skipped",
8695 nic_ip = self.hostname1.ip
8697 if not netutils.IPAddress.IsValid(ip):
8698 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
8702 # TODO: check the ip address for uniqueness
8703 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
8704 raise errors.OpPrereqError("Routed nic mode requires an ip address",
8707 # MAC address verification
8708 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
8709 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8710 mac = utils.NormalizeAndValidateMac(mac)
8713 self.cfg.ReserveMAC(mac, self.proc.GetECId())
8714 except errors.ReservationError:
8715 raise errors.OpPrereqError("MAC address %s already in use"
8716 " in cluster" % mac,
8717 errors.ECODE_NOTUNIQUE)
8719 # Build nic parameters
8720 link = nic.get(constants.INIC_LINK, None)
8721 if link == constants.VALUE_AUTO:
8722 link = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_LINK]
8725 nicparams[constants.NIC_MODE] = nic_mode
8727 nicparams[constants.NIC_LINK] = link
8729 check_params = cluster.SimpleFillNIC(nicparams)
8730 objects.NIC.CheckParameterSyntax(check_params)
8731 self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
8733 # disk checks/pre-build
8734 default_vg = self.cfg.GetVGName()
8736 for disk in self.op.disks:
8737 mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
8738 if mode not in constants.DISK_ACCESS_SET:
8739 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
8740 mode, errors.ECODE_INVAL)
8741 size = disk.get(constants.IDISK_SIZE, None)
8743 raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
8746 except (TypeError, ValueError):
8747 raise errors.OpPrereqError("Invalid disk size '%s'" % size,
8750 data_vg = disk.get(constants.IDISK_VG, default_vg)
8752 constants.IDISK_SIZE: size,
8753 constants.IDISK_MODE: mode,
8754 constants.IDISK_VG: data_vg,
8755 constants.IDISK_METAVG: disk.get(constants.IDISK_METAVG, data_vg),
8757 if constants.IDISK_ADOPT in disk:
8758 new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
8759 self.disks.append(new_disk)
8761 if self.op.mode == constants.INSTANCE_IMPORT:
8763 for idx in range(len(self.disks)):
8764 option = "disk%d_dump" % idx
8765 if export_info.has_option(constants.INISECT_INS, option):
8766 # FIXME: are the old os-es, disk sizes, etc. useful?
8767 export_name = export_info.get(constants.INISECT_INS, option)
8768 image = utils.PathJoin(self.op.src_path, export_name)
8769 disk_images.append(image)
8771 disk_images.append(False)
8773 self.src_images = disk_images
8775 old_name = export_info.get(constants.INISECT_INS, "name")
8776 if self.op.instance_name == old_name:
8777 for idx, nic in enumerate(self.nics):
8778 if nic.mac == constants.VALUE_AUTO:
8779 nic_mac_ini = "nic%d_mac" % idx
8780 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
8782 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
8784 # ip ping checks (we use the same ip that was resolved in ExpandNames)
8785 if self.op.ip_check:
8786 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
8787 raise errors.OpPrereqError("IP %s of instance %s already in use" %
8788 (self.check_ip, self.op.instance_name),
8789 errors.ECODE_NOTUNIQUE)
8791 #### mac address generation
8792 # By generating here the mac address both the allocator and the hooks get
8793 # the real final mac address rather than the 'auto' or 'generate' value.
8794 # There is a race condition between the generation and the instance object
8795 # creation, which means that we know the mac is valid now, but we're not
8796 # sure it will be when we actually add the instance. If things go bad
8797 # adding the instance will abort because of a duplicate mac, and the
8798 # creation job will fail.
8799 for nic in self.nics:
8800 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8801 nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
8805 if self.op.iallocator is not None:
8806 self._RunAllocator()
8808 #### node related checks
8810 # check primary node
8811 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
8812 assert self.pnode is not None, \
8813 "Cannot retrieve locked node %s" % self.op.pnode
8815 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
8816 pnode.name, errors.ECODE_STATE)
8818 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
8819 pnode.name, errors.ECODE_STATE)
8820 if not pnode.vm_capable:
8821 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
8822 " '%s'" % pnode.name, errors.ECODE_STATE)
8824 self.secondaries = []
8826 # mirror node verification
8827 if self.op.disk_template in constants.DTS_INT_MIRROR:
8828 if self.op.snode == pnode.name:
8829 raise errors.OpPrereqError("The secondary node cannot be the"
8830 " primary node", errors.ECODE_INVAL)
8831 _CheckNodeOnline(self, self.op.snode)
8832 _CheckNodeNotDrained(self, self.op.snode)
8833 _CheckNodeVmCapable(self, self.op.snode)
8834 self.secondaries.append(self.op.snode)
8836 nodenames = [pnode.name] + self.secondaries
8838 if not self.adopt_disks:
8839 # Check lv size requirements, if not adopting
8840 req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
8841 _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
8843 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
8844 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
8845 disk[constants.IDISK_ADOPT])
8846 for disk in self.disks])
8847 if len(all_lvs) != len(self.disks):
8848 raise errors.OpPrereqError("Duplicate volume names given for adoption",
8850 for lv_name in all_lvs:
8852 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
8853 # to ReserveLV uses the same syntax
8854 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
8855 except errors.ReservationError:
8856 raise errors.OpPrereqError("LV named %s used by another instance" %
8857 lv_name, errors.ECODE_NOTUNIQUE)
8859 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
8860 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
8862 node_lvs = self.rpc.call_lv_list([pnode.name],
8863 vg_names.payload.keys())[pnode.name]
8864 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
8865 node_lvs = node_lvs.payload
8867 delta = all_lvs.difference(node_lvs.keys())
8869 raise errors.OpPrereqError("Missing logical volume(s): %s" %
8870 utils.CommaJoin(delta),
8872 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
8874 raise errors.OpPrereqError("Online logical volumes found, cannot"
8875 " adopt: %s" % utils.CommaJoin(online_lvs),
8877 # update the size of disk based on what is found
8878 for dsk in self.disks:
8879 dsk[constants.IDISK_SIZE] = \
8880 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
8881 dsk[constants.IDISK_ADOPT])][0]))
8883 elif self.op.disk_template == constants.DT_BLOCK:
8884 # Normalize and de-duplicate device paths
8885 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
8886 for disk in self.disks])
8887 if len(all_disks) != len(self.disks):
8888 raise errors.OpPrereqError("Duplicate disk names given for adoption",
8890 baddisks = [d for d in all_disks
8891 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
8893 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
8894 " cannot be adopted" %
8895 (", ".join(baddisks),
8896 constants.ADOPTABLE_BLOCKDEV_ROOT),
8899 node_disks = self.rpc.call_bdev_sizes([pnode.name],
8900 list(all_disks))[pnode.name]
8901 node_disks.Raise("Cannot get block device information from node %s" %
8903 node_disks = node_disks.payload
8904 delta = all_disks.difference(node_disks.keys())
8906 raise errors.OpPrereqError("Missing block device(s): %s" %
8907 utils.CommaJoin(delta),
8909 for dsk in self.disks:
8910 dsk[constants.IDISK_SIZE] = \
8911 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
8913 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
8915 _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
8916 # check OS parameters (remotely)
8917 _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
8919 _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
8921 # memory check on primary node
8923 _CheckNodeFreeMemory(self, self.pnode.name,
8924 "creating instance %s" % self.op.instance_name,
8925 self.be_full[constants.BE_MEMORY],
8928 self.dry_run_result = list(nodenames)
8930 def Exec(self, feedback_fn):
8931 """Create and add the instance to the cluster.
8934 instance = self.op.instance_name
8935 pnode_name = self.pnode.name
8937 ht_kind = self.op.hypervisor
8938 if ht_kind in constants.HTS_REQ_PORT:
8939 network_port = self.cfg.AllocatePort()
8943 disks = _GenerateDiskTemplate(self,
8944 self.op.disk_template,
8945 instance, pnode_name,
8948 self.instance_file_storage_dir,
8949 self.op.file_driver,
8953 iobj = objects.Instance(name=instance, os=self.op.os_type,
8954 primary_node=pnode_name,
8955 nics=self.nics, disks=disks,
8956 disk_template=self.op.disk_template,
8958 network_port=network_port,
8959 beparams=self.op.beparams,
8960 hvparams=self.op.hvparams,
8961 hypervisor=self.op.hypervisor,
8962 osparams=self.op.osparams,
8966 for tag in self.op.tags:
8969 if self.adopt_disks:
8970 if self.op.disk_template == constants.DT_PLAIN:
8971 # rename LVs to the newly-generated names; we need to construct
8972 # 'fake' LV disks with the old data, plus the new unique_id
8973 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
8975 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
8976 rename_to.append(t_dsk.logical_id)
8977 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
8978 self.cfg.SetDiskID(t_dsk, pnode_name)
8979 result = self.rpc.call_blockdev_rename(pnode_name,
8980 zip(tmp_disks, rename_to))
8981 result.Raise("Failed to rename adoped LVs")
8983 feedback_fn("* creating instance disks...")
8985 _CreateDisks(self, iobj)
8986 except errors.OpExecError:
8987 self.LogWarning("Device creation failed, reverting...")
8989 _RemoveDisks(self, iobj)
8991 self.cfg.ReleaseDRBDMinors(instance)
8994 feedback_fn("adding instance %s to cluster config" % instance)
8996 self.cfg.AddInstance(iobj, self.proc.GetECId())
8998 # Declare that we don't want to remove the instance lock anymore, as we've
8999 # added the instance to the config
9000 del self.remove_locks[locking.LEVEL_INSTANCE]
9002 if self.op.mode == constants.INSTANCE_IMPORT:
9003 # Release unused nodes
9004 _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
9007 _ReleaseLocks(self, locking.LEVEL_NODE)
9010 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
9011 feedback_fn("* wiping instance disks...")
9013 _WipeDisks(self, iobj)
9014 except errors.OpExecError, err:
9015 logging.exception("Wiping disks failed")
9016 self.LogWarning("Wiping instance disks failed (%s)", err)
9020 # Something is already wrong with the disks, don't do anything else
9022 elif self.op.wait_for_sync:
9023 disk_abort = not _WaitForSync(self, iobj)
9024 elif iobj.disk_template in constants.DTS_INT_MIRROR:
9025 # make sure the disks are not degraded (still sync-ing is ok)
9026 feedback_fn("* checking mirrors status")
9027 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
9032 _RemoveDisks(self, iobj)
9033 self.cfg.RemoveInstance(iobj.name)
9034 # Make sure the instance lock gets removed
9035 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
9036 raise errors.OpExecError("There are some degraded disks for"
9039 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
9040 if self.op.mode == constants.INSTANCE_CREATE:
9041 if not self.op.no_install:
9042 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
9043 not self.op.wait_for_sync)
9045 feedback_fn("* pausing disk sync to install instance OS")
9046 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
9048 for idx, success in enumerate(result.payload):
9050 logging.warn("pause-sync of instance %s for disk %d failed",
9053 feedback_fn("* running the instance OS create scripts...")
9054 # FIXME: pass debug option from opcode to backend
9056 self.rpc.call_instance_os_add(pnode_name, iobj, False,
9057 self.op.debug_level)
9059 feedback_fn("* resuming disk sync")
9060 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
9062 for idx, success in enumerate(result.payload):
9064 logging.warn("resume-sync of instance %s for disk %d failed",
9067 os_add_result.Raise("Could not add os for instance %s"
9068 " on node %s" % (instance, pnode_name))
9070 elif self.op.mode == constants.INSTANCE_IMPORT:
9071 feedback_fn("* running the instance OS import scripts...")
9075 for idx, image in enumerate(self.src_images):
9079 # FIXME: pass debug option from opcode to backend
9080 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
9081 constants.IEIO_FILE, (image, ),
9082 constants.IEIO_SCRIPT,
9083 (iobj.disks[idx], idx),
9085 transfers.append(dt)
9088 masterd.instance.TransferInstanceData(self, feedback_fn,
9089 self.op.src_node, pnode_name,
9090 self.pnode.secondary_ip,
9092 if not compat.all(import_result):
9093 self.LogWarning("Some disks for instance %s on node %s were not"
9094 " imported successfully" % (instance, pnode_name))
9096 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
9097 feedback_fn("* preparing remote import...")
9098 # The source cluster will stop the instance before attempting to make a
9099 # connection. In some cases stopping an instance can take a long time,
9100 # hence the shutdown timeout is added to the connection timeout.
9101 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
9102 self.op.source_shutdown_timeout)
9103 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
9105 assert iobj.primary_node == self.pnode.name
9107 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
9108 self.source_x509_ca,
9109 self._cds, timeouts)
9110 if not compat.all(disk_results):
9111 # TODO: Should the instance still be started, even if some disks
9112 # failed to import (valid for local imports, too)?
9113 self.LogWarning("Some disks for instance %s on node %s were not"
9114 " imported successfully" % (instance, pnode_name))
9116 # Run rename script on newly imported instance
9117 assert iobj.name == instance
9118 feedback_fn("Running rename script for %s" % instance)
9119 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
9120 self.source_instance_name,
9121 self.op.debug_level)
9123 self.LogWarning("Failed to run rename script for %s on node"
9124 " %s: %s" % (instance, pnode_name, result.fail_msg))
9127 # also checked in the prereq part
9128 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
9132 iobj.admin_up = True
9133 self.cfg.Update(iobj, feedback_fn)
9134 logging.info("Starting instance %s on node %s", instance, pnode_name)
9135 feedback_fn("* starting instance...")
9136 result = self.rpc.call_instance_start(pnode_name, iobj,
9138 result.Raise("Could not start instance")
9140 return list(iobj.all_nodes)
9143 class LUInstanceConsole(NoHooksLU):
9144 """Connect to an instance's console.
9146 This is somewhat special in that it returns the command line that
9147 you need to run on the master node in order to connect to the
9153 def ExpandNames(self):
9154 self._ExpandAndLockInstance()
9156 def CheckPrereq(self):
9157 """Check prerequisites.
9159 This checks that the instance is in the cluster.
9162 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9163 assert self.instance is not None, \
9164 "Cannot retrieve locked instance %s" % self.op.instance_name
9165 _CheckNodeOnline(self, self.instance.primary_node)
9167 def Exec(self, feedback_fn):
9168 """Connect to the console of an instance
9171 instance = self.instance
9172 node = instance.primary_node
9174 node_insts = self.rpc.call_instance_list([node],
9175 [instance.hypervisor])[node]
9176 node_insts.Raise("Can't get node information from %s" % node)
9178 if instance.name not in node_insts.payload:
9179 if instance.admin_up:
9180 state = constants.INSTST_ERRORDOWN
9182 state = constants.INSTST_ADMINDOWN
9183 raise errors.OpExecError("Instance %s is not running (state %s)" %
9184 (instance.name, state))
9186 logging.debug("Connecting to console of %s on %s", instance.name, node)
9188 return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
9191 def _GetInstanceConsole(cluster, instance):
9192 """Returns console information for an instance.
9194 @type cluster: L{objects.Cluster}
9195 @type instance: L{objects.Instance}
9199 hyper = hypervisor.GetHypervisor(instance.hypervisor)
9200 # beparams and hvparams are passed separately, to avoid editing the
9201 # instance and then saving the defaults in the instance itself.
9202 hvparams = cluster.FillHV(instance)
9203 beparams = cluster.FillBE(instance)
9204 console = hyper.GetInstanceConsole(instance, hvparams, beparams)
9206 assert console.instance == instance.name
9207 assert console.Validate()
9209 return console.ToDict()
9212 class LUInstanceReplaceDisks(LogicalUnit):
9213 """Replace the disks of an instance.
9216 HPATH = "mirrors-replace"
9217 HTYPE = constants.HTYPE_INSTANCE
9220 def CheckArguments(self):
9221 TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
9224 def ExpandNames(self):
9225 self._ExpandAndLockInstance()
9227 assert locking.LEVEL_NODE not in self.needed_locks
9228 assert locking.LEVEL_NODEGROUP not in self.needed_locks
9230 assert self.op.iallocator is None or self.op.remote_node is None, \
9231 "Conflicting options"
9233 if self.op.remote_node is not None:
9234 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9236 # Warning: do not remove the locking of the new secondary here
9237 # unless DRBD8.AddChildren is changed to work in parallel;
9238 # currently it doesn't since parallel invocations of
9239 # FindUnusedMinor will conflict
9240 self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
9241 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
9243 self.needed_locks[locking.LEVEL_NODE] = []
9244 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9246 if self.op.iallocator is not None:
9247 # iallocator will select a new node in the same group
9248 self.needed_locks[locking.LEVEL_NODEGROUP] = []
9250 self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
9251 self.op.iallocator, self.op.remote_node,
9252 self.op.disks, False, self.op.early_release)
9254 self.tasklets = [self.replacer]
9256 def DeclareLocks(self, level):
9257 if level == locking.LEVEL_NODEGROUP:
9258 assert self.op.remote_node is None
9259 assert self.op.iallocator is not None
9260 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
9262 self.share_locks[locking.LEVEL_NODEGROUP] = 1
9263 self.needed_locks[locking.LEVEL_NODEGROUP] = \
9264 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
9266 elif level == locking.LEVEL_NODE:
9267 if self.op.iallocator is not None:
9268 assert self.op.remote_node is None
9269 assert not self.needed_locks[locking.LEVEL_NODE]
9271 # Lock member nodes of all locked groups
9272 self.needed_locks[locking.LEVEL_NODE] = [node_name
9273 for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
9274 for node_name in self.cfg.GetNodeGroup(group_uuid).members]
9276 self._LockInstancesNodes()
9278 def BuildHooksEnv(self):
9281 This runs on the master, the primary and all the secondaries.
9284 instance = self.replacer.instance
9286 "MODE": self.op.mode,
9287 "NEW_SECONDARY": self.op.remote_node,
9288 "OLD_SECONDARY": instance.secondary_nodes[0],
9290 env.update(_BuildInstanceHookEnvByObject(self, instance))
9293 def BuildHooksNodes(self):
9294 """Build hooks nodes.
9297 instance = self.replacer.instance
9299 self.cfg.GetMasterNode(),
9300 instance.primary_node,
9302 if self.op.remote_node is not None:
9303 nl.append(self.op.remote_node)
9306 def CheckPrereq(self):
9307 """Check prerequisites.
9310 assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
9311 self.op.iallocator is None)
9313 owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
9315 _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
9317 return LogicalUnit.CheckPrereq(self)
9320 class TLReplaceDisks(Tasklet):
9321 """Replaces disks for an instance.
9323 Note: Locking is not within the scope of this class.
9326 def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
9327 disks, delay_iallocator, early_release):
9328 """Initializes this class.
9331 Tasklet.__init__(self, lu)
9334 self.instance_name = instance_name
9336 self.iallocator_name = iallocator_name
9337 self.remote_node = remote_node
9339 self.delay_iallocator = delay_iallocator
9340 self.early_release = early_release
9343 self.instance = None
9344 self.new_node = None
9345 self.target_node = None
9346 self.other_node = None
9347 self.remote_node_info = None
9348 self.node_secondary_ip = None
9351 def CheckArguments(mode, remote_node, iallocator):
9352 """Helper function for users of this class.
9355 # check for valid parameter combination
9356 if mode == constants.REPLACE_DISK_CHG:
9357 if remote_node is None and iallocator is None:
9358 raise errors.OpPrereqError("When changing the secondary either an"
9359 " iallocator script must be used or the"
9360 " new node given", errors.ECODE_INVAL)
9362 if remote_node is not None and iallocator is not None:
9363 raise errors.OpPrereqError("Give either the iallocator or the new"
9364 " secondary, not both", errors.ECODE_INVAL)
9366 elif remote_node is not None or iallocator is not None:
9367 # Not replacing the secondary
9368 raise errors.OpPrereqError("The iallocator and new node options can"
9369 " only be used when changing the"
9370 " secondary node", errors.ECODE_INVAL)
9373 def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
9374 """Compute a new secondary node using an IAllocator.
9377 ial = IAllocator(lu.cfg, lu.rpc,
9378 mode=constants.IALLOCATOR_MODE_RELOC,
9380 relocate_from=list(relocate_from))
9382 ial.Run(iallocator_name)
9385 raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
9386 " %s" % (iallocator_name, ial.info),
9389 if len(ial.result) != ial.required_nodes:
9390 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
9391 " of nodes (%s), required %s" %
9393 len(ial.result), ial.required_nodes),
9396 remote_node_name = ial.result[0]
9398 lu.LogInfo("Selected new secondary for instance '%s': %s",
9399 instance_name, remote_node_name)
9401 return remote_node_name
9403 def _FindFaultyDisks(self, node_name):
9404 """Wrapper for L{_FindFaultyInstanceDisks}.
9407 return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
9410 def _CheckDisksActivated(self, instance):
9411 """Checks if the instance disks are activated.
9413 @param instance: The instance to check disks
9414 @return: True if they are activated, False otherwise
9417 nodes = instance.all_nodes
9419 for idx, dev in enumerate(instance.disks):
9421 self.lu.LogInfo("Checking disk/%d on %s", idx, node)
9422 self.cfg.SetDiskID(dev, node)
9424 result = self.rpc.call_blockdev_find(node, dev)
9428 elif result.fail_msg or not result.payload:
9433 def CheckPrereq(self):
9434 """Check prerequisites.
9436 This checks that the instance is in the cluster.
9439 self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
9440 assert instance is not None, \
9441 "Cannot retrieve locked instance %s" % self.instance_name
9443 if instance.disk_template != constants.DT_DRBD8:
9444 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
9445 " instances", errors.ECODE_INVAL)
9447 if len(instance.secondary_nodes) != 1:
9448 raise errors.OpPrereqError("The instance has a strange layout,"
9449 " expected one secondary but found %d" %
9450 len(instance.secondary_nodes),
9453 if not self.delay_iallocator:
9454 self._CheckPrereq2()
9456 def _CheckPrereq2(self):
9457 """Check prerequisites, second part.
9459 This function should always be part of CheckPrereq. It was separated and is
9460 now called from Exec because during node evacuation iallocator was only
9461 called with an unmodified cluster model, not taking planned changes into
9465 instance = self.instance
9466 secondary_node = instance.secondary_nodes[0]
9468 if self.iallocator_name is None:
9469 remote_node = self.remote_node
9471 remote_node = self._RunAllocator(self.lu, self.iallocator_name,
9472 instance.name, instance.secondary_nodes)
9474 if remote_node is None:
9475 self.remote_node_info = None
9477 assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \
9478 "Remote node '%s' is not locked" % remote_node
9480 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
9481 assert self.remote_node_info is not None, \
9482 "Cannot retrieve locked node %s" % remote_node
9484 if remote_node == self.instance.primary_node:
9485 raise errors.OpPrereqError("The specified node is the primary node of"
9486 " the instance", errors.ECODE_INVAL)
9488 if remote_node == secondary_node:
9489 raise errors.OpPrereqError("The specified node is already the"
9490 " secondary node of the instance",
9493 if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
9494 constants.REPLACE_DISK_CHG):
9495 raise errors.OpPrereqError("Cannot specify disks to be replaced",
9498 if self.mode == constants.REPLACE_DISK_AUTO:
9499 if not self._CheckDisksActivated(instance):
9500 raise errors.OpPrereqError("Please run activate-disks on instance %s"
9501 " first" % self.instance_name,
9503 faulty_primary = self._FindFaultyDisks(instance.primary_node)
9504 faulty_secondary = self._FindFaultyDisks(secondary_node)
9506 if faulty_primary and faulty_secondary:
9507 raise errors.OpPrereqError("Instance %s has faulty disks on more than"
9508 " one node and can not be repaired"
9509 " automatically" % self.instance_name,
9513 self.disks = faulty_primary
9514 self.target_node = instance.primary_node
9515 self.other_node = secondary_node
9516 check_nodes = [self.target_node, self.other_node]
9517 elif faulty_secondary:
9518 self.disks = faulty_secondary
9519 self.target_node = secondary_node
9520 self.other_node = instance.primary_node
9521 check_nodes = [self.target_node, self.other_node]
9527 # Non-automatic modes
9528 if self.mode == constants.REPLACE_DISK_PRI:
9529 self.target_node = instance.primary_node
9530 self.other_node = secondary_node
9531 check_nodes = [self.target_node, self.other_node]
9533 elif self.mode == constants.REPLACE_DISK_SEC:
9534 self.target_node = secondary_node
9535 self.other_node = instance.primary_node
9536 check_nodes = [self.target_node, self.other_node]
9538 elif self.mode == constants.REPLACE_DISK_CHG:
9539 self.new_node = remote_node
9540 self.other_node = instance.primary_node
9541 self.target_node = secondary_node
9542 check_nodes = [self.new_node, self.other_node]
9544 _CheckNodeNotDrained(self.lu, remote_node)
9545 _CheckNodeVmCapable(self.lu, remote_node)
9547 old_node_info = self.cfg.GetNodeInfo(secondary_node)
9548 assert old_node_info is not None
9549 if old_node_info.offline and not self.early_release:
9550 # doesn't make sense to delay the release
9551 self.early_release = True
9552 self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
9553 " early-release mode", secondary_node)
9556 raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
9559 # If not specified all disks should be replaced
9561 self.disks = range(len(self.instance.disks))
9563 for node in check_nodes:
9564 _CheckNodeOnline(self.lu, node)
9566 touched_nodes = frozenset(node_name for node_name in [self.new_node,
9569 if node_name is not None)
9571 # Release unneeded node locks
9572 _ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
9574 # Release any owned node group
9575 if self.lu.glm.is_owned(locking.LEVEL_NODEGROUP):
9576 _ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
9578 # Check whether disks are valid
9579 for disk_idx in self.disks:
9580 instance.FindDisk(disk_idx)
9582 # Get secondary node IP addresses
9583 self.node_secondary_ip = dict((name, node.secondary_ip) for (name, node)
9584 in self.cfg.GetMultiNodeInfo(touched_nodes))
9586 def Exec(self, feedback_fn):
9587 """Execute disk replacement.
9589 This dispatches the disk replacement to the appropriate handler.
9592 if self.delay_iallocator:
9593 self._CheckPrereq2()
9596 # Verify owned locks before starting operation
9597 owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
9598 assert set(owned_nodes) == set(self.node_secondary_ip), \
9599 ("Incorrect node locks, owning %s, expected %s" %
9600 (owned_nodes, self.node_secondary_ip.keys()))
9602 owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
9603 assert list(owned_instances) == [self.instance_name], \
9604 "Instance '%s' not locked" % self.instance_name
9606 assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
9607 "Should not own any node group lock at this point"
9610 feedback_fn("No disks need replacement")
9613 feedback_fn("Replacing disk(s) %s for %s" %
9614 (utils.CommaJoin(self.disks), self.instance.name))
9616 activate_disks = (not self.instance.admin_up)
9618 # Activate the instance disks if we're replacing them on a down instance
9620 _StartInstanceDisks(self.lu, self.instance, True)
9623 # Should we replace the secondary node?
9624 if self.new_node is not None:
9625 fn = self._ExecDrbd8Secondary
9627 fn = self._ExecDrbd8DiskOnly
9629 result = fn(feedback_fn)
9631 # Deactivate the instance disks if we're replacing them on a
9634 _SafeShutdownInstanceDisks(self.lu, self.instance)
9637 # Verify owned locks
9638 owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
9639 nodes = frozenset(self.node_secondary_ip)
9640 assert ((self.early_release and not owned_nodes) or
9641 (not self.early_release and not (set(owned_nodes) - nodes))), \
9642 ("Not owning the correct locks, early_release=%s, owned=%r,"
9643 " nodes=%r" % (self.early_release, owned_nodes, nodes))
9647 def _CheckVolumeGroup(self, nodes):
9648 self.lu.LogInfo("Checking volume groups")
9650 vgname = self.cfg.GetVGName()
9652 # Make sure volume group exists on all involved nodes
9653 results = self.rpc.call_vg_list(nodes)
9655 raise errors.OpExecError("Can't list volume groups on the nodes")
9659 res.Raise("Error checking node %s" % node)
9660 if vgname not in res.payload:
9661 raise errors.OpExecError("Volume group '%s' not found on node %s" %
9664 def _CheckDisksExistence(self, nodes):
9665 # Check disk existence
9666 for idx, dev in enumerate(self.instance.disks):
9667 if idx not in self.disks:
9671 self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
9672 self.cfg.SetDiskID(dev, node)
9674 result = self.rpc.call_blockdev_find(node, dev)
9676 msg = result.fail_msg
9677 if msg or not result.payload:
9679 msg = "disk not found"
9680 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
9683 def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
9684 for idx, dev in enumerate(self.instance.disks):
9685 if idx not in self.disks:
9688 self.lu.LogInfo("Checking disk/%d consistency on node %s" %
9691 if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
9693 raise errors.OpExecError("Node %s has degraded storage, unsafe to"
9694 " replace disks for instance %s" %
9695 (node_name, self.instance.name))
9697 def _CreateNewStorage(self, node_name):
9698 """Create new storage on the primary or secondary node.
9700 This is only used for same-node replaces, not for changing the
9701 secondary node, hence we don't want to modify the existing disk.
9706 for idx, dev in enumerate(self.instance.disks):
9707 if idx not in self.disks:
9710 self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
9712 self.cfg.SetDiskID(dev, node_name)
9714 lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
9715 names = _GenerateUniqueNames(self.lu, lv_names)
9717 vg_data = dev.children[0].logical_id[0]
9718 lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
9719 logical_id=(vg_data, names[0]))
9720 vg_meta = dev.children[1].logical_id[0]
9721 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
9722 logical_id=(vg_meta, names[1]))
9724 new_lvs = [lv_data, lv_meta]
9725 old_lvs = [child.Copy() for child in dev.children]
9726 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
9728 # we pass force_create=True to force the LVM creation
9729 for new_lv in new_lvs:
9730 _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
9731 _GetInstanceInfoText(self.instance), False)
9735 def _CheckDevices(self, node_name, iv_names):
9736 for name, (dev, _, _) in iv_names.iteritems():
9737 self.cfg.SetDiskID(dev, node_name)
9739 result = self.rpc.call_blockdev_find(node_name, dev)
9741 msg = result.fail_msg
9742 if msg or not result.payload:
9744 msg = "disk not found"
9745 raise errors.OpExecError("Can't find DRBD device %s: %s" %
9748 if result.payload.is_degraded:
9749 raise errors.OpExecError("DRBD device %s is degraded!" % name)
9751 def _RemoveOldStorage(self, node_name, iv_names):
9752 for name, (_, old_lvs, _) in iv_names.iteritems():
9753 self.lu.LogInfo("Remove logical volumes for %s" % name)
9756 self.cfg.SetDiskID(lv, node_name)
9758 msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
9760 self.lu.LogWarning("Can't remove old LV: %s" % msg,
9761 hint="remove unused LVs manually")
9763 def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
9764 """Replace a disk on the primary or secondary for DRBD 8.
9766 The algorithm for replace is quite complicated:
9768 1. for each disk to be replaced:
9770 1. create new LVs on the target node with unique names
9771 1. detach old LVs from the drbd device
9772 1. rename old LVs to name_replaced.<time_t>
9773 1. rename new LVs to old LVs
9774 1. attach the new LVs (with the old names now) to the drbd device
9776 1. wait for sync across all devices
9778 1. for each modified disk:
9780 1. remove old LVs (which have the name name_replaces.<time_t>)
9782 Failures are not very well handled.
9787 # Step: check device activation
9788 self.lu.LogStep(1, steps_total, "Check device existence")
9789 self._CheckDisksExistence([self.other_node, self.target_node])
9790 self._CheckVolumeGroup([self.target_node, self.other_node])
9792 # Step: check other node consistency
9793 self.lu.LogStep(2, steps_total, "Check peer consistency")
9794 self._CheckDisksConsistency(self.other_node,
9795 self.other_node == self.instance.primary_node,
9798 # Step: create new storage
9799 self.lu.LogStep(3, steps_total, "Allocate new storage")
9800 iv_names = self._CreateNewStorage(self.target_node)
9802 # Step: for each lv, detach+rename*2+attach
9803 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
9804 for dev, old_lvs, new_lvs in iv_names.itervalues():
9805 self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
9807 result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
9809 result.Raise("Can't detach drbd from local storage on node"
9810 " %s for device %s" % (self.target_node, dev.iv_name))
9812 #cfg.Update(instance)
9814 # ok, we created the new LVs, so now we know we have the needed
9815 # storage; as such, we proceed on the target node to rename
9816 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
9817 # using the assumption that logical_id == physical_id (which in
9818 # turn is the unique_id on that node)
9820 # FIXME(iustin): use a better name for the replaced LVs
9821 temp_suffix = int(time.time())
9822 ren_fn = lambda d, suff: (d.physical_id[0],
9823 d.physical_id[1] + "_replaced-%s" % suff)
9825 # Build the rename list based on what LVs exist on the node
9826 rename_old_to_new = []
9827 for to_ren in old_lvs:
9828 result = self.rpc.call_blockdev_find(self.target_node, to_ren)
9829 if not result.fail_msg and result.payload:
9831 rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
9833 self.lu.LogInfo("Renaming the old LVs on the target node")
9834 result = self.rpc.call_blockdev_rename(self.target_node,
9836 result.Raise("Can't rename old LVs on node %s" % self.target_node)
9838 # Now we rename the new LVs to the old LVs
9839 self.lu.LogInfo("Renaming the new LVs on the target node")
9840 rename_new_to_old = [(new, old.physical_id)
9841 for old, new in zip(old_lvs, new_lvs)]
9842 result = self.rpc.call_blockdev_rename(self.target_node,
9844 result.Raise("Can't rename new LVs on node %s" % self.target_node)
9846 # Intermediate steps of in memory modifications
9847 for old, new in zip(old_lvs, new_lvs):
9848 new.logical_id = old.logical_id
9849 self.cfg.SetDiskID(new, self.target_node)
9851 # We need to modify old_lvs so that removal later removes the
9852 # right LVs, not the newly added ones; note that old_lvs is a
9854 for disk in old_lvs:
9855 disk.logical_id = ren_fn(disk, temp_suffix)
9856 self.cfg.SetDiskID(disk, self.target_node)
9858 # Now that the new lvs have the old name, we can add them to the device
9859 self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
9860 result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
9862 msg = result.fail_msg
9864 for new_lv in new_lvs:
9865 msg2 = self.rpc.call_blockdev_remove(self.target_node,
9868 self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
9869 hint=("cleanup manually the unused logical"
9871 raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
9874 if self.early_release:
9875 self.lu.LogStep(cstep, steps_total, "Removing old storage")
9877 self._RemoveOldStorage(self.target_node, iv_names)
9878 # WARNING: we release both node locks here, do not do other RPCs
9879 # than WaitForSync to the primary node
9880 _ReleaseLocks(self.lu, locking.LEVEL_NODE,
9881 names=[self.target_node, self.other_node])
9884 # This can fail as the old devices are degraded and _WaitForSync
9885 # does a combined result over all disks, so we don't check its return value
9886 self.lu.LogStep(cstep, steps_total, "Sync devices")
9888 _WaitForSync(self.lu, self.instance)
9890 # Check all devices manually
9891 self._CheckDevices(self.instance.primary_node, iv_names)
9893 # Step: remove old storage
9894 if not self.early_release:
9895 self.lu.LogStep(cstep, steps_total, "Removing old storage")
9897 self._RemoveOldStorage(self.target_node, iv_names)
9899 def _ExecDrbd8Secondary(self, feedback_fn):
9900 """Replace the secondary node for DRBD 8.
9902 The algorithm for replace is quite complicated:
9903 - for all disks of the instance:
9904 - create new LVs on the new node with same names
9905 - shutdown the drbd device on the old secondary
9906 - disconnect the drbd network on the primary
9907 - create the drbd device on the new secondary
9908 - network attach the drbd on the primary, using an artifice:
9909 the drbd code for Attach() will connect to the network if it
9910 finds a device which is connected to the good local disks but
9912 - wait for sync across all devices
9913 - remove all disks from the old secondary
9915 Failures are not very well handled.
9920 pnode = self.instance.primary_node
9922 # Step: check device activation
9923 self.lu.LogStep(1, steps_total, "Check device existence")
9924 self._CheckDisksExistence([self.instance.primary_node])
9925 self._CheckVolumeGroup([self.instance.primary_node])
9927 # Step: check other node consistency
9928 self.lu.LogStep(2, steps_total, "Check peer consistency")
9929 self._CheckDisksConsistency(self.instance.primary_node, True, True)
9931 # Step: create new storage
9932 self.lu.LogStep(3, steps_total, "Allocate new storage")
9933 for idx, dev in enumerate(self.instance.disks):
9934 self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
9935 (self.new_node, idx))
9936 # we pass force_create=True to force LVM creation
9937 for new_lv in dev.children:
9938 _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
9939 _GetInstanceInfoText(self.instance), False)
9941 # Step 4: dbrd minors and drbd setups changes
9942 # after this, we must manually remove the drbd minors on both the
9943 # error and the success paths
9944 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
9945 minors = self.cfg.AllocateDRBDMinor([self.new_node
9946 for dev in self.instance.disks],
9948 logging.debug("Allocated minors %r", minors)
9951 for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
9952 self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
9953 (self.new_node, idx))
9954 # create new devices on new_node; note that we create two IDs:
9955 # one without port, so the drbd will be activated without
9956 # networking information on the new node at this stage, and one
9957 # with network, for the latter activation in step 4
9958 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
9959 if self.instance.primary_node == o_node1:
9962 assert self.instance.primary_node == o_node2, "Three-node instance?"
9965 new_alone_id = (self.instance.primary_node, self.new_node, None,
9966 p_minor, new_minor, o_secret)
9967 new_net_id = (self.instance.primary_node, self.new_node, o_port,
9968 p_minor, new_minor, o_secret)
9970 iv_names[idx] = (dev, dev.children, new_net_id)
9971 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
9973 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
9974 logical_id=new_alone_id,
9975 children=dev.children,
9978 _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
9979 _GetInstanceInfoText(self.instance), False)
9980 except errors.GenericError:
9981 self.cfg.ReleaseDRBDMinors(self.instance.name)
9984 # We have new devices, shutdown the drbd on the old secondary
9985 for idx, dev in enumerate(self.instance.disks):
9986 self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
9987 self.cfg.SetDiskID(dev, self.target_node)
9988 msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
9990 self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
9991 "node: %s" % (idx, msg),
9992 hint=("Please cleanup this device manually as"
9993 " soon as possible"))
9995 self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
9996 result = self.rpc.call_drbd_disconnect_net([pnode], self.node_secondary_ip,
9997 self.instance.disks)[pnode]
9999 msg = result.fail_msg
10001 # detaches didn't succeed (unlikely)
10002 self.cfg.ReleaseDRBDMinors(self.instance.name)
10003 raise errors.OpExecError("Can't detach the disks from the network on"
10004 " old node: %s" % (msg,))
10006 # if we managed to detach at least one, we update all the disks of
10007 # the instance to point to the new secondary
10008 self.lu.LogInfo("Updating instance configuration")
10009 for dev, _, new_logical_id in iv_names.itervalues():
10010 dev.logical_id = new_logical_id
10011 self.cfg.SetDiskID(dev, self.instance.primary_node)
10013 self.cfg.Update(self.instance, feedback_fn)
10015 # and now perform the drbd attach
10016 self.lu.LogInfo("Attaching primary drbds to new secondary"
10017 " (standalone => connected)")
10018 result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
10020 self.node_secondary_ip,
10021 self.instance.disks,
10022 self.instance.name,
10024 for to_node, to_result in result.items():
10025 msg = to_result.fail_msg
10027 self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
10029 hint=("please do a gnt-instance info to see the"
10030 " status of disks"))
10032 if self.early_release:
10033 self.lu.LogStep(cstep, steps_total, "Removing old storage")
10035 self._RemoveOldStorage(self.target_node, iv_names)
10036 # WARNING: we release all node locks here, do not do other RPCs
10037 # than WaitForSync to the primary node
10038 _ReleaseLocks(self.lu, locking.LEVEL_NODE,
10039 names=[self.instance.primary_node,
10044 # This can fail as the old devices are degraded and _WaitForSync
10045 # does a combined result over all disks, so we don't check its return value
10046 self.lu.LogStep(cstep, steps_total, "Sync devices")
10048 _WaitForSync(self.lu, self.instance)
10050 # Check all devices manually
10051 self._CheckDevices(self.instance.primary_node, iv_names)
10053 # Step: remove old storage
10054 if not self.early_release:
10055 self.lu.LogStep(cstep, steps_total, "Removing old storage")
10056 self._RemoveOldStorage(self.target_node, iv_names)
10059 class LURepairNodeStorage(NoHooksLU):
10060 """Repairs the volume group on a node.
10065 def CheckArguments(self):
10066 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
10068 storage_type = self.op.storage_type
10070 if (constants.SO_FIX_CONSISTENCY not in
10071 constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
10072 raise errors.OpPrereqError("Storage units of type '%s' can not be"
10073 " repaired" % storage_type,
10074 errors.ECODE_INVAL)
10076 def ExpandNames(self):
10077 self.needed_locks = {
10078 locking.LEVEL_NODE: [self.op.node_name],
10081 def _CheckFaultyDisks(self, instance, node_name):
10082 """Ensure faulty disks abort the opcode or at least warn."""
10084 if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
10086 raise errors.OpPrereqError("Instance '%s' has faulty disks on"
10087 " node '%s'" % (instance.name, node_name),
10088 errors.ECODE_STATE)
10089 except errors.OpPrereqError, err:
10090 if self.op.ignore_consistency:
10091 self.proc.LogWarning(str(err.args[0]))
10095 def CheckPrereq(self):
10096 """Check prerequisites.
10099 # Check whether any instance on this node has faulty disks
10100 for inst in _GetNodeInstances(self.cfg, self.op.node_name):
10101 if not inst.admin_up:
10103 check_nodes = set(inst.all_nodes)
10104 check_nodes.discard(self.op.node_name)
10105 for inst_node_name in check_nodes:
10106 self._CheckFaultyDisks(inst, inst_node_name)
10108 def Exec(self, feedback_fn):
10109 feedback_fn("Repairing storage unit '%s' on %s ..." %
10110 (self.op.name, self.op.node_name))
10112 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
10113 result = self.rpc.call_storage_execute(self.op.node_name,
10114 self.op.storage_type, st_args,
10116 constants.SO_FIX_CONSISTENCY)
10117 result.Raise("Failed to repair storage unit '%s' on %s" %
10118 (self.op.name, self.op.node_name))
10121 class LUNodeEvacuate(NoHooksLU):
10122 """Evacuates instances off a list of nodes.
10127 def CheckArguments(self):
10128 _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
10130 def ExpandNames(self):
10131 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
10133 if self.op.remote_node is not None:
10134 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
10135 assert self.op.remote_node
10137 if self.op.remote_node == self.op.node_name:
10138 raise errors.OpPrereqError("Can not use evacuated node as a new"
10139 " secondary node", errors.ECODE_INVAL)
10141 if self.op.mode != constants.IALLOCATOR_NEVAC_SEC:
10142 raise errors.OpPrereqError("Without the use of an iallocator only"
10143 " secondary instances can be evacuated",
10144 errors.ECODE_INVAL)
10147 self.share_locks = _ShareAll()
10148 self.needed_locks = {
10149 locking.LEVEL_INSTANCE: [],
10150 locking.LEVEL_NODEGROUP: [],
10151 locking.LEVEL_NODE: [],
10154 if self.op.remote_node is None:
10155 # Iallocator will choose any node(s) in the same group
10156 group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_name])
10158 group_nodes = frozenset([self.op.remote_node])
10160 # Determine nodes to be locked
10161 self.lock_nodes = set([self.op.node_name]) | group_nodes
10163 def _DetermineInstances(self):
10164 """Builds list of instances to operate on.
10167 assert self.op.mode in constants.IALLOCATOR_NEVAC_MODES
10169 if self.op.mode == constants.IALLOCATOR_NEVAC_PRI:
10170 # Primary instances only
10171 inst_fn = _GetNodePrimaryInstances
10172 assert self.op.remote_node is None, \
10173 "Evacuating primary instances requires iallocator"
10174 elif self.op.mode == constants.IALLOCATOR_NEVAC_SEC:
10175 # Secondary instances only
10176 inst_fn = _GetNodeSecondaryInstances
10179 assert self.op.mode == constants.IALLOCATOR_NEVAC_ALL
10180 inst_fn = _GetNodeInstances
10182 return inst_fn(self.cfg, self.op.node_name)
10184 def DeclareLocks(self, level):
10185 if level == locking.LEVEL_INSTANCE:
10186 # Lock instances optimistically, needs verification once node and group
10187 # locks have been acquired
10188 self.needed_locks[locking.LEVEL_INSTANCE] = \
10189 set(i.name for i in self._DetermineInstances())
10191 elif level == locking.LEVEL_NODEGROUP:
10192 # Lock node groups optimistically, needs verification once nodes have
10194 self.needed_locks[locking.LEVEL_NODEGROUP] = \
10195 self.cfg.GetNodeGroupsFromNodes(self.lock_nodes)
10197 elif level == locking.LEVEL_NODE:
10198 self.needed_locks[locking.LEVEL_NODE] = self.lock_nodes
10200 def CheckPrereq(self):
10202 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
10203 owned_nodes = self.owned_locks(locking.LEVEL_NODE)
10204 owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
10206 assert owned_nodes == self.lock_nodes
10208 wanted_groups = self.cfg.GetNodeGroupsFromNodes(owned_nodes)
10209 if owned_groups != wanted_groups:
10210 raise errors.OpExecError("Node groups changed since locks were acquired,"
10211 " current groups are '%s', used to be '%s'" %
10212 (utils.CommaJoin(wanted_groups),
10213 utils.CommaJoin(owned_groups)))
10215 # Determine affected instances
10216 self.instances = self._DetermineInstances()
10217 self.instance_names = [i.name for i in self.instances]
10219 if set(self.instance_names) != owned_instances:
10220 raise errors.OpExecError("Instances on node '%s' changed since locks"
10221 " were acquired, current instances are '%s',"
10222 " used to be '%s'" %
10223 (self.op.node_name,
10224 utils.CommaJoin(self.instance_names),
10225 utils.CommaJoin(owned_instances)))
10227 if self.instance_names:
10228 self.LogInfo("Evacuating instances from node '%s': %s",
10230 utils.CommaJoin(utils.NiceSort(self.instance_names)))
10232 self.LogInfo("No instances to evacuate from node '%s'",
10235 if self.op.remote_node is not None:
10236 for i in self.instances:
10237 if i.primary_node == self.op.remote_node:
10238 raise errors.OpPrereqError("Node %s is the primary node of"
10239 " instance %s, cannot use it as"
10241 (self.op.remote_node, i.name),
10242 errors.ECODE_INVAL)
10244 def Exec(self, feedback_fn):
10245 assert (self.op.iallocator is not None) ^ (self.op.remote_node is not None)
10247 if not self.instance_names:
10248 # No instances to evacuate
10251 elif self.op.iallocator is not None:
10252 # TODO: Implement relocation to other group
10253 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_NODE_EVAC,
10254 evac_mode=self.op.mode,
10255 instances=list(self.instance_names))
10257 ial.Run(self.op.iallocator)
10259 if not ial.success:
10260 raise errors.OpPrereqError("Can't compute node evacuation using"
10261 " iallocator '%s': %s" %
10262 (self.op.iallocator, ial.info),
10263 errors.ECODE_NORES)
10265 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, True)
10267 elif self.op.remote_node is not None:
10268 assert self.op.mode == constants.IALLOCATOR_NEVAC_SEC
10270 [opcodes.OpInstanceReplaceDisks(instance_name=instance_name,
10271 remote_node=self.op.remote_node,
10273 mode=constants.REPLACE_DISK_CHG,
10274 early_release=self.op.early_release)]
10275 for instance_name in self.instance_names
10279 raise errors.ProgrammerError("No iallocator or remote node")
10281 return ResultWithJobs(jobs)
10284 def _SetOpEarlyRelease(early_release, op):
10285 """Sets C{early_release} flag on opcodes if available.
10289 op.early_release = early_release
10290 except AttributeError:
10291 assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
10296 def _NodeEvacDest(use_nodes, group, nodes):
10297 """Returns group or nodes depending on caller's choice.
10301 return utils.CommaJoin(nodes)
10306 def _LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
10307 """Unpacks the result of change-group and node-evacuate iallocator requests.
10309 Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
10310 L{constants.IALLOCATOR_MODE_CHG_GROUP}.
10312 @type lu: L{LogicalUnit}
10313 @param lu: Logical unit instance
10314 @type alloc_result: tuple/list
10315 @param alloc_result: Result from iallocator
10316 @type early_release: bool
10317 @param early_release: Whether to release locks early if possible
10318 @type use_nodes: bool
10319 @param use_nodes: Whether to display node names instead of groups
10322 (moved, failed, jobs) = alloc_result
10325 lu.LogWarning("Unable to evacuate instances %s",
10326 utils.CommaJoin("%s (%s)" % (name, reason)
10327 for (name, reason) in failed))
10330 lu.LogInfo("Instances to be moved: %s",
10331 utils.CommaJoin("%s (to %s)" %
10332 (name, _NodeEvacDest(use_nodes, group, nodes))
10333 for (name, group, nodes) in moved))
10335 return [map(compat.partial(_SetOpEarlyRelease, early_release),
10336 map(opcodes.OpCode.LoadOpCode, ops))
10340 class LUInstanceGrowDisk(LogicalUnit):
10341 """Grow a disk of an instance.
10344 HPATH = "disk-grow"
10345 HTYPE = constants.HTYPE_INSTANCE
10348 def ExpandNames(self):
10349 self._ExpandAndLockInstance()
10350 self.needed_locks[locking.LEVEL_NODE] = []
10351 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10353 def DeclareLocks(self, level):
10354 if level == locking.LEVEL_NODE:
10355 self._LockInstancesNodes()
10357 def BuildHooksEnv(self):
10358 """Build hooks env.
10360 This runs on the master, the primary and all the secondaries.
10364 "DISK": self.op.disk,
10365 "AMOUNT": self.op.amount,
10367 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
10370 def BuildHooksNodes(self):
10371 """Build hooks nodes.
10374 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
10377 def CheckPrereq(self):
10378 """Check prerequisites.
10380 This checks that the instance is in the cluster.
10383 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
10384 assert instance is not None, \
10385 "Cannot retrieve locked instance %s" % self.op.instance_name
10386 nodenames = list(instance.all_nodes)
10387 for node in nodenames:
10388 _CheckNodeOnline(self, node)
10390 self.instance = instance
10392 if instance.disk_template not in constants.DTS_GROWABLE:
10393 raise errors.OpPrereqError("Instance's disk layout does not support"
10394 " growing", errors.ECODE_INVAL)
10396 self.disk = instance.FindDisk(self.op.disk)
10398 if instance.disk_template not in (constants.DT_FILE,
10399 constants.DT_SHARED_FILE):
10400 # TODO: check the free disk space for file, when that feature will be
10402 _CheckNodesFreeDiskPerVG(self, nodenames,
10403 self.disk.ComputeGrowth(self.op.amount))
10405 def Exec(self, feedback_fn):
10406 """Execute disk grow.
10409 instance = self.instance
10412 disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
10414 raise errors.OpExecError("Cannot activate block device to grow")
10416 # First run all grow ops in dry-run mode
10417 for node in instance.all_nodes:
10418 self.cfg.SetDiskID(disk, node)
10419 result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, True)
10420 result.Raise("Grow request failed to node %s" % node)
10422 # We know that (as far as we can test) operations across different
10423 # nodes will succeed, time to run it for real
10424 for node in instance.all_nodes:
10425 self.cfg.SetDiskID(disk, node)
10426 result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, False)
10427 result.Raise("Grow request failed to node %s" % node)
10429 # TODO: Rewrite code to work properly
10430 # DRBD goes into sync mode for a short amount of time after executing the
10431 # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
10432 # calling "resize" in sync mode fails. Sleeping for a short amount of
10433 # time is a work-around.
10436 disk.RecordGrow(self.op.amount)
10437 self.cfg.Update(instance, feedback_fn)
10438 if self.op.wait_for_sync:
10439 disk_abort = not _WaitForSync(self, instance, disks=[disk])
10441 self.proc.LogWarning("Disk sync-ing has not returned a good"
10442 " status; please check the instance")
10443 if not instance.admin_up:
10444 _SafeShutdownInstanceDisks(self, instance, disks=[disk])
10445 elif not instance.admin_up:
10446 self.proc.LogWarning("Not shutting down the disk even if the instance is"
10447 " not supposed to be running because no wait for"
10448 " sync mode was requested")
10451 class LUInstanceQueryData(NoHooksLU):
10452 """Query runtime instance data.
10457 def ExpandNames(self):
10458 self.needed_locks = {}
10460 # Use locking if requested or when non-static information is wanted
10461 if not (self.op.static or self.op.use_locking):
10462 self.LogWarning("Non-static data requested, locks need to be acquired")
10463 self.op.use_locking = True
10465 if self.op.instances or not self.op.use_locking:
10466 # Expand instance names right here
10467 self.wanted_names = _GetWantedInstances(self, self.op.instances)
10469 # Will use acquired locks
10470 self.wanted_names = None
10472 if self.op.use_locking:
10473 self.share_locks = _ShareAll()
10475 if self.wanted_names is None:
10476 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
10478 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
10480 self.needed_locks[locking.LEVEL_NODE] = []
10481 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10483 def DeclareLocks(self, level):
10484 if self.op.use_locking and level == locking.LEVEL_NODE:
10485 self._LockInstancesNodes()
10487 def CheckPrereq(self):
10488 """Check prerequisites.
10490 This only checks the optional instance list against the existing names.
10493 if self.wanted_names is None:
10494 assert self.op.use_locking, "Locking was not used"
10495 self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
10497 self.wanted_instances = \
10498 map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
10500 def _ComputeBlockdevStatus(self, node, instance_name, dev):
10501 """Returns the status of a block device
10504 if self.op.static or not node:
10507 self.cfg.SetDiskID(dev, node)
10509 result = self.rpc.call_blockdev_find(node, dev)
10513 result.Raise("Can't compute disk status for %s" % instance_name)
10515 status = result.payload
10519 return (status.dev_path, status.major, status.minor,
10520 status.sync_percent, status.estimated_time,
10521 status.is_degraded, status.ldisk_status)
10523 def _ComputeDiskStatus(self, instance, snode, dev):
10524 """Compute block device status.
10527 if dev.dev_type in constants.LDS_DRBD:
10528 # we change the snode then (otherwise we use the one passed in)
10529 if dev.logical_id[0] == instance.primary_node:
10530 snode = dev.logical_id[1]
10532 snode = dev.logical_id[0]
10534 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
10535 instance.name, dev)
10536 dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
10539 dev_children = map(compat.partial(self._ComputeDiskStatus,
10546 "iv_name": dev.iv_name,
10547 "dev_type": dev.dev_type,
10548 "logical_id": dev.logical_id,
10549 "physical_id": dev.physical_id,
10550 "pstatus": dev_pstatus,
10551 "sstatus": dev_sstatus,
10552 "children": dev_children,
10557 def Exec(self, feedback_fn):
10558 """Gather and return data"""
10561 cluster = self.cfg.GetClusterInfo()
10563 pri_nodes = self.cfg.GetMultiNodeInfo(i.primary_node
10564 for i in self.wanted_instances)
10565 for instance, (_, pnode) in zip(self.wanted_instances, pri_nodes):
10566 if self.op.static or pnode.offline:
10567 remote_state = None
10569 self.LogWarning("Primary node %s is marked offline, returning static"
10570 " information only for instance %s" %
10571 (pnode.name, instance.name))
10573 remote_info = self.rpc.call_instance_info(instance.primary_node,
10575 instance.hypervisor)
10576 remote_info.Raise("Error checking node %s" % instance.primary_node)
10577 remote_info = remote_info.payload
10578 if remote_info and "state" in remote_info:
10579 remote_state = "up"
10581 remote_state = "down"
10583 if instance.admin_up:
10584 config_state = "up"
10586 config_state = "down"
10588 disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
10591 result[instance.name] = {
10592 "name": instance.name,
10593 "config_state": config_state,
10594 "run_state": remote_state,
10595 "pnode": instance.primary_node,
10596 "snodes": instance.secondary_nodes,
10598 # this happens to be the same format used for hooks
10599 "nics": _NICListToTuple(self, instance.nics),
10600 "disk_template": instance.disk_template,
10602 "hypervisor": instance.hypervisor,
10603 "network_port": instance.network_port,
10604 "hv_instance": instance.hvparams,
10605 "hv_actual": cluster.FillHV(instance, skip_globals=True),
10606 "be_instance": instance.beparams,
10607 "be_actual": cluster.FillBE(instance),
10608 "os_instance": instance.osparams,
10609 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
10610 "serial_no": instance.serial_no,
10611 "mtime": instance.mtime,
10612 "ctime": instance.ctime,
10613 "uuid": instance.uuid,
10619 class LUInstanceSetParams(LogicalUnit):
10620 """Modifies an instances's parameters.
10623 HPATH = "instance-modify"
10624 HTYPE = constants.HTYPE_INSTANCE
10627 def CheckArguments(self):
10628 if not (self.op.nics or self.op.disks or self.op.disk_template or
10629 self.op.hvparams or self.op.beparams or self.op.os_name):
10630 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
10632 if self.op.hvparams:
10633 _CheckGlobalHvParams(self.op.hvparams)
10637 for disk_op, disk_dict in self.op.disks:
10638 utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
10639 if disk_op == constants.DDM_REMOVE:
10640 disk_addremove += 1
10642 elif disk_op == constants.DDM_ADD:
10643 disk_addremove += 1
10645 if not isinstance(disk_op, int):
10646 raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
10647 if not isinstance(disk_dict, dict):
10648 msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
10649 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
10651 if disk_op == constants.DDM_ADD:
10652 mode = disk_dict.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
10653 if mode not in constants.DISK_ACCESS_SET:
10654 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
10655 errors.ECODE_INVAL)
10656 size = disk_dict.get(constants.IDISK_SIZE, None)
10658 raise errors.OpPrereqError("Required disk parameter size missing",
10659 errors.ECODE_INVAL)
10662 except (TypeError, ValueError), err:
10663 raise errors.OpPrereqError("Invalid disk size parameter: %s" %
10664 str(err), errors.ECODE_INVAL)
10665 disk_dict[constants.IDISK_SIZE] = size
10667 # modification of disk
10668 if constants.IDISK_SIZE in disk_dict:
10669 raise errors.OpPrereqError("Disk size change not possible, use"
10670 " grow-disk", errors.ECODE_INVAL)
10672 if disk_addremove > 1:
10673 raise errors.OpPrereqError("Only one disk add or remove operation"
10674 " supported at a time", errors.ECODE_INVAL)
10676 if self.op.disks and self.op.disk_template is not None:
10677 raise errors.OpPrereqError("Disk template conversion and other disk"
10678 " changes not supported at the same time",
10679 errors.ECODE_INVAL)
10681 if (self.op.disk_template and
10682 self.op.disk_template in constants.DTS_INT_MIRROR and
10683 self.op.remote_node is None):
10684 raise errors.OpPrereqError("Changing the disk template to a mirrored"
10685 " one requires specifying a secondary node",
10686 errors.ECODE_INVAL)
10690 for nic_op, nic_dict in self.op.nics:
10691 utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
10692 if nic_op == constants.DDM_REMOVE:
10695 elif nic_op == constants.DDM_ADD:
10698 if not isinstance(nic_op, int):
10699 raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
10700 if not isinstance(nic_dict, dict):
10701 msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
10702 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
10704 # nic_dict should be a dict
10705 nic_ip = nic_dict.get(constants.INIC_IP, None)
10706 if nic_ip is not None:
10707 if nic_ip.lower() == constants.VALUE_NONE:
10708 nic_dict[constants.INIC_IP] = None
10710 if not netutils.IPAddress.IsValid(nic_ip):
10711 raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
10712 errors.ECODE_INVAL)
10714 nic_bridge = nic_dict.get("bridge", None)
10715 nic_link = nic_dict.get(constants.INIC_LINK, None)
10716 if nic_bridge and nic_link:
10717 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
10718 " at the same time", errors.ECODE_INVAL)
10719 elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
10720 nic_dict["bridge"] = None
10721 elif nic_link and nic_link.lower() == constants.VALUE_NONE:
10722 nic_dict[constants.INIC_LINK] = None
10724 if nic_op == constants.DDM_ADD:
10725 nic_mac = nic_dict.get(constants.INIC_MAC, None)
10726 if nic_mac is None:
10727 nic_dict[constants.INIC_MAC] = constants.VALUE_AUTO
10729 if constants.INIC_MAC in nic_dict:
10730 nic_mac = nic_dict[constants.INIC_MAC]
10731 if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
10732 nic_mac = utils.NormalizeAndValidateMac(nic_mac)
10734 if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
10735 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
10736 " modifying an existing nic",
10737 errors.ECODE_INVAL)
10739 if nic_addremove > 1:
10740 raise errors.OpPrereqError("Only one NIC add or remove operation"
10741 " supported at a time", errors.ECODE_INVAL)
10743 def ExpandNames(self):
10744 self._ExpandAndLockInstance()
10745 self.needed_locks[locking.LEVEL_NODE] = []
10746 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10748 def DeclareLocks(self, level):
10749 if level == locking.LEVEL_NODE:
10750 self._LockInstancesNodes()
10751 if self.op.disk_template and self.op.remote_node:
10752 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
10753 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
10755 def BuildHooksEnv(self):
10756 """Build hooks env.
10758 This runs on the master, primary and secondaries.
10762 if constants.BE_MEMORY in self.be_new:
10763 args["memory"] = self.be_new[constants.BE_MEMORY]
10764 if constants.BE_VCPUS in self.be_new:
10765 args["vcpus"] = self.be_new[constants.BE_VCPUS]
10766 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
10767 # information at all.
10770 nic_override = dict(self.op.nics)
10771 for idx, nic in enumerate(self.instance.nics):
10772 if idx in nic_override:
10773 this_nic_override = nic_override[idx]
10775 this_nic_override = {}
10776 if constants.INIC_IP in this_nic_override:
10777 ip = this_nic_override[constants.INIC_IP]
10780 if constants.INIC_MAC in this_nic_override:
10781 mac = this_nic_override[constants.INIC_MAC]
10784 if idx in self.nic_pnew:
10785 nicparams = self.nic_pnew[idx]
10787 nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
10788 mode = nicparams[constants.NIC_MODE]
10789 link = nicparams[constants.NIC_LINK]
10790 args["nics"].append((ip, mac, mode, link))
10791 if constants.DDM_ADD in nic_override:
10792 ip = nic_override[constants.DDM_ADD].get(constants.INIC_IP, None)
10793 mac = nic_override[constants.DDM_ADD][constants.INIC_MAC]
10794 nicparams = self.nic_pnew[constants.DDM_ADD]
10795 mode = nicparams[constants.NIC_MODE]
10796 link = nicparams[constants.NIC_LINK]
10797 args["nics"].append((ip, mac, mode, link))
10798 elif constants.DDM_REMOVE in nic_override:
10799 del args["nics"][-1]
10801 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
10802 if self.op.disk_template:
10803 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
10807 def BuildHooksNodes(self):
10808 """Build hooks nodes.
10811 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
10814 def CheckPrereq(self):
10815 """Check prerequisites.
10817 This only checks the instance list against the existing names.
10820 # checking the new params on the primary/secondary nodes
10822 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
10823 cluster = self.cluster = self.cfg.GetClusterInfo()
10824 assert self.instance is not None, \
10825 "Cannot retrieve locked instance %s" % self.op.instance_name
10826 pnode = instance.primary_node
10827 nodelist = list(instance.all_nodes)
10830 if self.op.os_name and not self.op.force:
10831 _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
10832 self.op.force_variant)
10833 instance_os = self.op.os_name
10835 instance_os = instance.os
10837 if self.op.disk_template:
10838 if instance.disk_template == self.op.disk_template:
10839 raise errors.OpPrereqError("Instance already has disk template %s" %
10840 instance.disk_template, errors.ECODE_INVAL)
10842 if (instance.disk_template,
10843 self.op.disk_template) not in self._DISK_CONVERSIONS:
10844 raise errors.OpPrereqError("Unsupported disk template conversion from"
10845 " %s to %s" % (instance.disk_template,
10846 self.op.disk_template),
10847 errors.ECODE_INVAL)
10848 _CheckInstanceDown(self, instance, "cannot change disk template")
10849 if self.op.disk_template in constants.DTS_INT_MIRROR:
10850 if self.op.remote_node == pnode:
10851 raise errors.OpPrereqError("Given new secondary node %s is the same"
10852 " as the primary node of the instance" %
10853 self.op.remote_node, errors.ECODE_STATE)
10854 _CheckNodeOnline(self, self.op.remote_node)
10855 _CheckNodeNotDrained(self, self.op.remote_node)
10856 # FIXME: here we assume that the old instance type is DT_PLAIN
10857 assert instance.disk_template == constants.DT_PLAIN
10858 disks = [{constants.IDISK_SIZE: d.size,
10859 constants.IDISK_VG: d.logical_id[0]}
10860 for d in instance.disks]
10861 required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
10862 _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
10864 # hvparams processing
10865 if self.op.hvparams:
10866 hv_type = instance.hypervisor
10867 i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
10868 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
10869 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
10872 hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
10873 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
10874 self.hv_proposed = self.hv_new = hv_new # the new actual values
10875 self.hv_inst = i_hvdict # the new dict (without defaults)
10877 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
10879 self.hv_new = self.hv_inst = {}
10881 # beparams processing
10882 if self.op.beparams:
10883 i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
10885 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
10886 be_new = cluster.SimpleFillBE(i_bedict)
10887 self.be_proposed = self.be_new = be_new # the new actual values
10888 self.be_inst = i_bedict # the new dict (without defaults)
10890 self.be_new = self.be_inst = {}
10891 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
10892 be_old = cluster.FillBE(instance)
10894 # CPU param validation -- checking every time a paramtere is
10895 # changed to cover all cases where either CPU mask or vcpus have
10897 if (constants.BE_VCPUS in self.be_proposed and
10898 constants.HV_CPU_MASK in self.hv_proposed):
10900 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
10901 # Verify mask is consistent with number of vCPUs. Can skip this
10902 # test if only 1 entry in the CPU mask, which means same mask
10903 # is applied to all vCPUs.
10904 if (len(cpu_list) > 1 and
10905 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
10906 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
10908 (self.be_proposed[constants.BE_VCPUS],
10909 self.hv_proposed[constants.HV_CPU_MASK]),
10910 errors.ECODE_INVAL)
10912 # Only perform this test if a new CPU mask is given
10913 if constants.HV_CPU_MASK in self.hv_new:
10914 # Calculate the largest CPU number requested
10915 max_requested_cpu = max(map(max, cpu_list))
10916 # Check that all of the instance's nodes have enough physical CPUs to
10917 # satisfy the requested CPU mask
10918 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
10919 max_requested_cpu + 1, instance.hypervisor)
10921 # osparams processing
10922 if self.op.osparams:
10923 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
10924 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
10925 self.os_inst = i_osdict # the new dict (without defaults)
10931 if (constants.BE_MEMORY in self.op.beparams and not self.op.force and
10932 be_new[constants.BE_MEMORY] > be_old[constants.BE_MEMORY]):
10933 mem_check_list = [pnode]
10934 if be_new[constants.BE_AUTO_BALANCE]:
10935 # either we changed auto_balance to yes or it was from before
10936 mem_check_list.extend(instance.secondary_nodes)
10937 instance_info = self.rpc.call_instance_info(pnode, instance.name,
10938 instance.hypervisor)
10939 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
10940 instance.hypervisor)
10941 pninfo = nodeinfo[pnode]
10942 msg = pninfo.fail_msg
10944 # Assume the primary node is unreachable and go ahead
10945 self.warn.append("Can't get info from primary node %s: %s" %
10947 elif not isinstance(pninfo.payload.get("memory_free", None), int):
10948 self.warn.append("Node data from primary node %s doesn't contain"
10949 " free memory information" % pnode)
10950 elif instance_info.fail_msg:
10951 self.warn.append("Can't get instance runtime information: %s" %
10952 instance_info.fail_msg)
10954 if instance_info.payload:
10955 current_mem = int(instance_info.payload["memory"])
10957 # Assume instance not running
10958 # (there is a slight race condition here, but it's not very probable,
10959 # and we have no other way to check)
10961 miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
10962 pninfo.payload["memory_free"])
10964 raise errors.OpPrereqError("This change will prevent the instance"
10965 " from starting, due to %d MB of memory"
10966 " missing on its primary node" % miss_mem,
10967 errors.ECODE_NORES)
10969 if be_new[constants.BE_AUTO_BALANCE]:
10970 for node, nres in nodeinfo.items():
10971 if node not in instance.secondary_nodes:
10973 nres.Raise("Can't get info from secondary node %s" % node,
10974 prereq=True, ecode=errors.ECODE_STATE)
10975 if not isinstance(nres.payload.get("memory_free", None), int):
10976 raise errors.OpPrereqError("Secondary node %s didn't return free"
10977 " memory information" % node,
10978 errors.ECODE_STATE)
10979 elif be_new[constants.BE_MEMORY] > nres.payload["memory_free"]:
10980 raise errors.OpPrereqError("This change will prevent the instance"
10981 " from failover to its secondary node"
10982 " %s, due to not enough memory" % node,
10983 errors.ECODE_STATE)
10987 self.nic_pinst = {}
10988 for nic_op, nic_dict in self.op.nics:
10989 if nic_op == constants.DDM_REMOVE:
10990 if not instance.nics:
10991 raise errors.OpPrereqError("Instance has no NICs, cannot remove",
10992 errors.ECODE_INVAL)
10994 if nic_op != constants.DDM_ADD:
10996 if not instance.nics:
10997 raise errors.OpPrereqError("Invalid NIC index %s, instance has"
10998 " no NICs" % nic_op,
10999 errors.ECODE_INVAL)
11000 if nic_op < 0 or nic_op >= len(instance.nics):
11001 raise errors.OpPrereqError("Invalid NIC index %s, valid values"
11003 (nic_op, len(instance.nics) - 1),
11004 errors.ECODE_INVAL)
11005 old_nic_params = instance.nics[nic_op].nicparams
11006 old_nic_ip = instance.nics[nic_op].ip
11008 old_nic_params = {}
11011 update_params_dict = dict([(key, nic_dict[key])
11012 for key in constants.NICS_PARAMETERS
11013 if key in nic_dict])
11015 if "bridge" in nic_dict:
11016 update_params_dict[constants.NIC_LINK] = nic_dict["bridge"]
11018 new_nic_params = _GetUpdatedParams(old_nic_params,
11019 update_params_dict)
11020 utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
11021 new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
11022 objects.NIC.CheckParameterSyntax(new_filled_nic_params)
11023 self.nic_pinst[nic_op] = new_nic_params
11024 self.nic_pnew[nic_op] = new_filled_nic_params
11025 new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
11027 if new_nic_mode == constants.NIC_MODE_BRIDGED:
11028 nic_bridge = new_filled_nic_params[constants.NIC_LINK]
11029 msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
11031 msg = "Error checking bridges on node %s: %s" % (pnode, msg)
11033 self.warn.append(msg)
11035 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
11036 if new_nic_mode == constants.NIC_MODE_ROUTED:
11037 if constants.INIC_IP in nic_dict:
11038 nic_ip = nic_dict[constants.INIC_IP]
11040 nic_ip = old_nic_ip
11042 raise errors.OpPrereqError("Cannot set the nic ip to None"
11043 " on a routed nic", errors.ECODE_INVAL)
11044 if constants.INIC_MAC in nic_dict:
11045 nic_mac = nic_dict[constants.INIC_MAC]
11046 if nic_mac is None:
11047 raise errors.OpPrereqError("Cannot set the nic mac to None",
11048 errors.ECODE_INVAL)
11049 elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
11050 # otherwise generate the mac
11051 nic_dict[constants.INIC_MAC] = \
11052 self.cfg.GenerateMAC(self.proc.GetECId())
11054 # or validate/reserve the current one
11056 self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
11057 except errors.ReservationError:
11058 raise errors.OpPrereqError("MAC address %s already in use"
11059 " in cluster" % nic_mac,
11060 errors.ECODE_NOTUNIQUE)
11063 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
11064 raise errors.OpPrereqError("Disk operations not supported for"
11065 " diskless instances",
11066 errors.ECODE_INVAL)
11067 for disk_op, _ in self.op.disks:
11068 if disk_op == constants.DDM_REMOVE:
11069 if len(instance.disks) == 1:
11070 raise errors.OpPrereqError("Cannot remove the last disk of"
11071 " an instance", errors.ECODE_INVAL)
11072 _CheckInstanceDown(self, instance, "cannot remove disks")
11074 if (disk_op == constants.DDM_ADD and
11075 len(instance.disks) >= constants.MAX_DISKS):
11076 raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
11077 " add more" % constants.MAX_DISKS,
11078 errors.ECODE_STATE)
11079 if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
11081 if disk_op < 0 or disk_op >= len(instance.disks):
11082 raise errors.OpPrereqError("Invalid disk index %s, valid values"
11084 (disk_op, len(instance.disks)),
11085 errors.ECODE_INVAL)
11089 def _ConvertPlainToDrbd(self, feedback_fn):
11090 """Converts an instance from plain to drbd.
11093 feedback_fn("Converting template to drbd")
11094 instance = self.instance
11095 pnode = instance.primary_node
11096 snode = self.op.remote_node
11098 # create a fake disk info for _GenerateDiskTemplate
11099 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
11100 constants.IDISK_VG: d.logical_id[0]}
11101 for d in instance.disks]
11102 new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
11103 instance.name, pnode, [snode],
11104 disk_info, None, None, 0, feedback_fn)
11105 info = _GetInstanceInfoText(instance)
11106 feedback_fn("Creating aditional volumes...")
11107 # first, create the missing data and meta devices
11108 for disk in new_disks:
11109 # unfortunately this is... not too nice
11110 _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
11112 for child in disk.children:
11113 _CreateSingleBlockDev(self, snode, instance, child, info, True)
11114 # at this stage, all new LVs have been created, we can rename the
11116 feedback_fn("Renaming original volumes...")
11117 rename_list = [(o, n.children[0].logical_id)
11118 for (o, n) in zip(instance.disks, new_disks)]
11119 result = self.rpc.call_blockdev_rename(pnode, rename_list)
11120 result.Raise("Failed to rename original LVs")
11122 feedback_fn("Initializing DRBD devices...")
11123 # all child devices are in place, we can now create the DRBD devices
11124 for disk in new_disks:
11125 for node in [pnode, snode]:
11126 f_create = node == pnode
11127 _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
11129 # at this point, the instance has been modified
11130 instance.disk_template = constants.DT_DRBD8
11131 instance.disks = new_disks
11132 self.cfg.Update(instance, feedback_fn)
11134 # disks are created, waiting for sync
11135 disk_abort = not _WaitForSync(self, instance,
11136 oneshot=not self.op.wait_for_sync)
11138 raise errors.OpExecError("There are some degraded disks for"
11139 " this instance, please cleanup manually")
11141 def _ConvertDrbdToPlain(self, feedback_fn):
11142 """Converts an instance from drbd to plain.
11145 instance = self.instance
11146 assert len(instance.secondary_nodes) == 1
11147 pnode = instance.primary_node
11148 snode = instance.secondary_nodes[0]
11149 feedback_fn("Converting template to plain")
11151 old_disks = instance.disks
11152 new_disks = [d.children[0] for d in old_disks]
11154 # copy over size and mode
11155 for parent, child in zip(old_disks, new_disks):
11156 child.size = parent.size
11157 child.mode = parent.mode
11159 # update instance structure
11160 instance.disks = new_disks
11161 instance.disk_template = constants.DT_PLAIN
11162 self.cfg.Update(instance, feedback_fn)
11164 feedback_fn("Removing volumes on the secondary node...")
11165 for disk in old_disks:
11166 self.cfg.SetDiskID(disk, snode)
11167 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
11169 self.LogWarning("Could not remove block device %s on node %s,"
11170 " continuing anyway: %s", disk.iv_name, snode, msg)
11172 feedback_fn("Removing unneeded volumes on the primary node...")
11173 for idx, disk in enumerate(old_disks):
11174 meta = disk.children[1]
11175 self.cfg.SetDiskID(meta, pnode)
11176 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
11178 self.LogWarning("Could not remove metadata for disk %d on node %s,"
11179 " continuing anyway: %s", idx, pnode, msg)
11181 def Exec(self, feedback_fn):
11182 """Modifies an instance.
11184 All parameters take effect only at the next restart of the instance.
11187 # Process here the warnings from CheckPrereq, as we don't have a
11188 # feedback_fn there.
11189 for warn in self.warn:
11190 feedback_fn("WARNING: %s" % warn)
11193 instance = self.instance
11195 for disk_op, disk_dict in self.op.disks:
11196 if disk_op == constants.DDM_REMOVE:
11197 # remove the last disk
11198 device = instance.disks.pop()
11199 device_idx = len(instance.disks)
11200 for node, disk in device.ComputeNodeTree(instance.primary_node):
11201 self.cfg.SetDiskID(disk, node)
11202 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
11204 self.LogWarning("Could not remove disk/%d on node %s: %s,"
11205 " continuing anyway", device_idx, node, msg)
11206 result.append(("disk/%d" % device_idx, "remove"))
11207 elif disk_op == constants.DDM_ADD:
11209 if instance.disk_template in (constants.DT_FILE,
11210 constants.DT_SHARED_FILE):
11211 file_driver, file_path = instance.disks[0].logical_id
11212 file_path = os.path.dirname(file_path)
11214 file_driver = file_path = None
11215 disk_idx_base = len(instance.disks)
11216 new_disk = _GenerateDiskTemplate(self,
11217 instance.disk_template,
11218 instance.name, instance.primary_node,
11219 instance.secondary_nodes,
11223 disk_idx_base, feedback_fn)[0]
11224 instance.disks.append(new_disk)
11225 info = _GetInstanceInfoText(instance)
11227 logging.info("Creating volume %s for instance %s",
11228 new_disk.iv_name, instance.name)
11229 # Note: this needs to be kept in sync with _CreateDisks
11231 for node in instance.all_nodes:
11232 f_create = node == instance.primary_node
11234 _CreateBlockDev(self, node, instance, new_disk,
11235 f_create, info, f_create)
11236 except errors.OpExecError, err:
11237 self.LogWarning("Failed to create volume %s (%s) on"
11239 new_disk.iv_name, new_disk, node, err)
11240 result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
11241 (new_disk.size, new_disk.mode)))
11243 # change a given disk
11244 instance.disks[disk_op].mode = disk_dict[constants.IDISK_MODE]
11245 result.append(("disk.mode/%d" % disk_op,
11246 disk_dict[constants.IDISK_MODE]))
11248 if self.op.disk_template:
11249 r_shut = _ShutdownInstanceDisks(self, instance)
11251 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
11252 " proceed with disk template conversion")
11253 mode = (instance.disk_template, self.op.disk_template)
11255 self._DISK_CONVERSIONS[mode](self, feedback_fn)
11257 self.cfg.ReleaseDRBDMinors(instance.name)
11259 result.append(("disk_template", self.op.disk_template))
11262 for nic_op, nic_dict in self.op.nics:
11263 if nic_op == constants.DDM_REMOVE:
11264 # remove the last nic
11265 del instance.nics[-1]
11266 result.append(("nic.%d" % len(instance.nics), "remove"))
11267 elif nic_op == constants.DDM_ADD:
11268 # mac and bridge should be set, by now
11269 mac = nic_dict[constants.INIC_MAC]
11270 ip = nic_dict.get(constants.INIC_IP, None)
11271 nicparams = self.nic_pinst[constants.DDM_ADD]
11272 new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
11273 instance.nics.append(new_nic)
11274 result.append(("nic.%d" % (len(instance.nics) - 1),
11275 "add:mac=%s,ip=%s,mode=%s,link=%s" %
11276 (new_nic.mac, new_nic.ip,
11277 self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
11278 self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
11281 for key in (constants.INIC_MAC, constants.INIC_IP):
11282 if key in nic_dict:
11283 setattr(instance.nics[nic_op], key, nic_dict[key])
11284 if nic_op in self.nic_pinst:
11285 instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
11286 for key, val in nic_dict.iteritems():
11287 result.append(("nic.%s/%d" % (key, nic_op), val))
11290 if self.op.hvparams:
11291 instance.hvparams = self.hv_inst
11292 for key, val in self.op.hvparams.iteritems():
11293 result.append(("hv/%s" % key, val))
11296 if self.op.beparams:
11297 instance.beparams = self.be_inst
11298 for key, val in self.op.beparams.iteritems():
11299 result.append(("be/%s" % key, val))
11302 if self.op.os_name:
11303 instance.os = self.op.os_name
11306 if self.op.osparams:
11307 instance.osparams = self.os_inst
11308 for key, val in self.op.osparams.iteritems():
11309 result.append(("os/%s" % key, val))
11311 self.cfg.Update(instance, feedback_fn)
11315 _DISK_CONVERSIONS = {
11316 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
11317 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
11321 class LUInstanceChangeGroup(LogicalUnit):
11322 HPATH = "instance-change-group"
11323 HTYPE = constants.HTYPE_INSTANCE
11326 def ExpandNames(self):
11327 self.share_locks = _ShareAll()
11328 self.needed_locks = {
11329 locking.LEVEL_NODEGROUP: [],
11330 locking.LEVEL_NODE: [],
11333 self._ExpandAndLockInstance()
11335 if self.op.target_groups:
11336 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
11337 self.op.target_groups)
11339 self.req_target_uuids = None
11341 self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
11343 def DeclareLocks(self, level):
11344 if level == locking.LEVEL_NODEGROUP:
11345 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
11347 if self.req_target_uuids:
11348 lock_groups = set(self.req_target_uuids)
11350 # Lock all groups used by instance optimistically; this requires going
11351 # via the node before it's locked, requiring verification later on
11352 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
11353 lock_groups.update(instance_groups)
11355 # No target groups, need to lock all of them
11356 lock_groups = locking.ALL_SET
11358 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
11360 elif level == locking.LEVEL_NODE:
11361 if self.req_target_uuids:
11362 # Lock all nodes used by instances
11363 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
11364 self._LockInstancesNodes()
11366 # Lock all nodes in all potential target groups
11367 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
11368 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
11369 member_nodes = [node_name
11370 for group in lock_groups
11371 for node_name in self.cfg.GetNodeGroup(group).members]
11372 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
11374 # Lock all nodes as all groups are potential targets
11375 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
11377 def CheckPrereq(self):
11378 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
11379 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
11380 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
11382 assert (self.req_target_uuids is None or
11383 owned_groups.issuperset(self.req_target_uuids))
11384 assert owned_instances == set([self.op.instance_name])
11386 # Get instance information
11387 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
11389 # Check if node groups for locked instance are still correct
11390 assert owned_nodes.issuperset(self.instance.all_nodes), \
11391 ("Instance %s's nodes changed while we kept the lock" %
11392 self.op.instance_name)
11394 inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
11397 if self.req_target_uuids:
11398 # User requested specific target groups
11399 self.target_uuids = self.req_target_uuids
11401 # All groups except those used by the instance are potential targets
11402 self.target_uuids = owned_groups - inst_groups
11404 conflicting_groups = self.target_uuids & inst_groups
11405 if conflicting_groups:
11406 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
11407 " used by the instance '%s'" %
11408 (utils.CommaJoin(conflicting_groups),
11409 self.op.instance_name),
11410 errors.ECODE_INVAL)
11412 if not self.target_uuids:
11413 raise errors.OpPrereqError("There are no possible target groups",
11414 errors.ECODE_INVAL)
11416 def BuildHooksEnv(self):
11417 """Build hooks env.
11420 assert self.target_uuids
11423 "TARGET_GROUPS": " ".join(self.target_uuids),
11426 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
11430 def BuildHooksNodes(self):
11431 """Build hooks nodes.
11434 mn = self.cfg.GetMasterNode()
11435 return ([mn], [mn])
11437 def Exec(self, feedback_fn):
11438 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
11440 assert instances == [self.op.instance_name], "Instance not locked"
11442 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
11443 instances=instances, target_groups=list(self.target_uuids))
11445 ial.Run(self.op.iallocator)
11447 if not ial.success:
11448 raise errors.OpPrereqError("Can't compute solution for changing group of"
11449 " instance '%s' using iallocator '%s': %s" %
11450 (self.op.instance_name, self.op.iallocator,
11452 errors.ECODE_NORES)
11454 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
11456 self.LogInfo("Iallocator returned %s job(s) for changing group of"
11457 " instance '%s'", len(jobs), self.op.instance_name)
11459 return ResultWithJobs(jobs)
11462 class LUBackupQuery(NoHooksLU):
11463 """Query the exports list
11468 def ExpandNames(self):
11469 self.needed_locks = {}
11470 self.share_locks[locking.LEVEL_NODE] = 1
11471 if not self.op.nodes:
11472 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
11474 self.needed_locks[locking.LEVEL_NODE] = \
11475 _GetWantedNodes(self, self.op.nodes)
11477 def Exec(self, feedback_fn):
11478 """Compute the list of all the exported system images.
11481 @return: a dictionary with the structure node->(export-list)
11482 where export-list is a list of the instances exported on
11486 self.nodes = self.owned_locks(locking.LEVEL_NODE)
11487 rpcresult = self.rpc.call_export_list(self.nodes)
11489 for node in rpcresult:
11490 if rpcresult[node].fail_msg:
11491 result[node] = False
11493 result[node] = rpcresult[node].payload
11498 class LUBackupPrepare(NoHooksLU):
11499 """Prepares an instance for an export and returns useful information.
11504 def ExpandNames(self):
11505 self._ExpandAndLockInstance()
11507 def CheckPrereq(self):
11508 """Check prerequisites.
11511 instance_name = self.op.instance_name
11513 self.instance = self.cfg.GetInstanceInfo(instance_name)
11514 assert self.instance is not None, \
11515 "Cannot retrieve locked instance %s" % self.op.instance_name
11516 _CheckNodeOnline(self, self.instance.primary_node)
11518 self._cds = _GetClusterDomainSecret()
11520 def Exec(self, feedback_fn):
11521 """Prepares an instance for an export.
11524 instance = self.instance
11526 if self.op.mode == constants.EXPORT_MODE_REMOTE:
11527 salt = utils.GenerateSecret(8)
11529 feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
11530 result = self.rpc.call_x509_cert_create(instance.primary_node,
11531 constants.RIE_CERT_VALIDITY)
11532 result.Raise("Can't create X509 key and certificate on %s" % result.node)
11534 (name, cert_pem) = result.payload
11536 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
11540 "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
11541 "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
11543 "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
11549 class LUBackupExport(LogicalUnit):
11550 """Export an instance to an image in the cluster.
11553 HPATH = "instance-export"
11554 HTYPE = constants.HTYPE_INSTANCE
11557 def CheckArguments(self):
11558 """Check the arguments.
11561 self.x509_key_name = self.op.x509_key_name
11562 self.dest_x509_ca_pem = self.op.destination_x509_ca
11564 if self.op.mode == constants.EXPORT_MODE_REMOTE:
11565 if not self.x509_key_name:
11566 raise errors.OpPrereqError("Missing X509 key name for encryption",
11567 errors.ECODE_INVAL)
11569 if not self.dest_x509_ca_pem:
11570 raise errors.OpPrereqError("Missing destination X509 CA",
11571 errors.ECODE_INVAL)
11573 def ExpandNames(self):
11574 self._ExpandAndLockInstance()
11576 # Lock all nodes for local exports
11577 if self.op.mode == constants.EXPORT_MODE_LOCAL:
11578 # FIXME: lock only instance primary and destination node
11580 # Sad but true, for now we have do lock all nodes, as we don't know where
11581 # the previous export might be, and in this LU we search for it and
11582 # remove it from its current node. In the future we could fix this by:
11583 # - making a tasklet to search (share-lock all), then create the
11584 # new one, then one to remove, after
11585 # - removing the removal operation altogether
11586 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
11588 def DeclareLocks(self, level):
11589 """Last minute lock declaration."""
11590 # All nodes are locked anyway, so nothing to do here.
11592 def BuildHooksEnv(self):
11593 """Build hooks env.
11595 This will run on the master, primary node and target node.
11599 "EXPORT_MODE": self.op.mode,
11600 "EXPORT_NODE": self.op.target_node,
11601 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
11602 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
11603 # TODO: Generic function for boolean env variables
11604 "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
11607 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
11611 def BuildHooksNodes(self):
11612 """Build hooks nodes.
11615 nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
11617 if self.op.mode == constants.EXPORT_MODE_LOCAL:
11618 nl.append(self.op.target_node)
11622 def CheckPrereq(self):
11623 """Check prerequisites.
11625 This checks that the instance and node names are valid.
11628 instance_name = self.op.instance_name
11630 self.instance = self.cfg.GetInstanceInfo(instance_name)
11631 assert self.instance is not None, \
11632 "Cannot retrieve locked instance %s" % self.op.instance_name
11633 _CheckNodeOnline(self, self.instance.primary_node)
11635 if (self.op.remove_instance and self.instance.admin_up and
11636 not self.op.shutdown):
11637 raise errors.OpPrereqError("Can not remove instance without shutting it"
11640 if self.op.mode == constants.EXPORT_MODE_LOCAL:
11641 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
11642 self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
11643 assert self.dst_node is not None
11645 _CheckNodeOnline(self, self.dst_node.name)
11646 _CheckNodeNotDrained(self, self.dst_node.name)
11649 self.dest_disk_info = None
11650 self.dest_x509_ca = None
11652 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
11653 self.dst_node = None
11655 if len(self.op.target_node) != len(self.instance.disks):
11656 raise errors.OpPrereqError(("Received destination information for %s"
11657 " disks, but instance %s has %s disks") %
11658 (len(self.op.target_node), instance_name,
11659 len(self.instance.disks)),
11660 errors.ECODE_INVAL)
11662 cds = _GetClusterDomainSecret()
11664 # Check X509 key name
11666 (key_name, hmac_digest, hmac_salt) = self.x509_key_name
11667 except (TypeError, ValueError), err:
11668 raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
11670 if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
11671 raise errors.OpPrereqError("HMAC for X509 key name is wrong",
11672 errors.ECODE_INVAL)
11674 # Load and verify CA
11676 (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
11677 except OpenSSL.crypto.Error, err:
11678 raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
11679 (err, ), errors.ECODE_INVAL)
11681 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
11682 if errcode is not None:
11683 raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
11684 (msg, ), errors.ECODE_INVAL)
11686 self.dest_x509_ca = cert
11688 # Verify target information
11690 for idx, disk_data in enumerate(self.op.target_node):
11692 (host, port, magic) = \
11693 masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
11694 except errors.GenericError, err:
11695 raise errors.OpPrereqError("Target info for disk %s: %s" %
11696 (idx, err), errors.ECODE_INVAL)
11698 disk_info.append((host, port, magic))
11700 assert len(disk_info) == len(self.op.target_node)
11701 self.dest_disk_info = disk_info
11704 raise errors.ProgrammerError("Unhandled export mode %r" %
11707 # instance disk type verification
11708 # TODO: Implement export support for file-based disks
11709 for disk in self.instance.disks:
11710 if disk.dev_type == constants.LD_FILE:
11711 raise errors.OpPrereqError("Export not supported for instances with"
11712 " file-based disks", errors.ECODE_INVAL)
11714 def _CleanupExports(self, feedback_fn):
11715 """Removes exports of current instance from all other nodes.
11717 If an instance in a cluster with nodes A..D was exported to node C, its
11718 exports will be removed from the nodes A, B and D.
11721 assert self.op.mode != constants.EXPORT_MODE_REMOTE
11723 nodelist = self.cfg.GetNodeList()
11724 nodelist.remove(self.dst_node.name)
11726 # on one-node clusters nodelist will be empty after the removal
11727 # if we proceed the backup would be removed because OpBackupQuery
11728 # substitutes an empty list with the full cluster node list.
11729 iname = self.instance.name
11731 feedback_fn("Removing old exports for instance %s" % iname)
11732 exportlist = self.rpc.call_export_list(nodelist)
11733 for node in exportlist:
11734 if exportlist[node].fail_msg:
11736 if iname in exportlist[node].payload:
11737 msg = self.rpc.call_export_remove(node, iname).fail_msg
11739 self.LogWarning("Could not remove older export for instance %s"
11740 " on node %s: %s", iname, node, msg)
11742 def Exec(self, feedback_fn):
11743 """Export an instance to an image in the cluster.
11746 assert self.op.mode in constants.EXPORT_MODES
11748 instance = self.instance
11749 src_node = instance.primary_node
11751 if self.op.shutdown:
11752 # shutdown the instance, but not the disks
11753 feedback_fn("Shutting down instance %s" % instance.name)
11754 result = self.rpc.call_instance_shutdown(src_node, instance,
11755 self.op.shutdown_timeout)
11756 # TODO: Maybe ignore failures if ignore_remove_failures is set
11757 result.Raise("Could not shutdown instance %s on"
11758 " node %s" % (instance.name, src_node))
11760 # set the disks ID correctly since call_instance_start needs the
11761 # correct drbd minor to create the symlinks
11762 for disk in instance.disks:
11763 self.cfg.SetDiskID(disk, src_node)
11765 activate_disks = (not instance.admin_up)
11768 # Activate the instance disks if we'exporting a stopped instance
11769 feedback_fn("Activating disks for %s" % instance.name)
11770 _StartInstanceDisks(self, instance, None)
11773 helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
11776 helper.CreateSnapshots()
11778 if (self.op.shutdown and instance.admin_up and
11779 not self.op.remove_instance):
11780 assert not activate_disks
11781 feedback_fn("Starting instance %s" % instance.name)
11782 result = self.rpc.call_instance_start(src_node, instance,
11784 msg = result.fail_msg
11786 feedback_fn("Failed to start instance: %s" % msg)
11787 _ShutdownInstanceDisks(self, instance)
11788 raise errors.OpExecError("Could not start instance: %s" % msg)
11790 if self.op.mode == constants.EXPORT_MODE_LOCAL:
11791 (fin_resu, dresults) = helper.LocalExport(self.dst_node)
11792 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
11793 connect_timeout = constants.RIE_CONNECT_TIMEOUT
11794 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
11796 (key_name, _, _) = self.x509_key_name
11799 OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
11802 (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
11803 key_name, dest_ca_pem,
11808 # Check for backwards compatibility
11809 assert len(dresults) == len(instance.disks)
11810 assert compat.all(isinstance(i, bool) for i in dresults), \
11811 "Not all results are boolean: %r" % dresults
11815 feedback_fn("Deactivating disks for %s" % instance.name)
11816 _ShutdownInstanceDisks(self, instance)
11818 if not (compat.all(dresults) and fin_resu):
11821 failures.append("export finalization")
11822 if not compat.all(dresults):
11823 fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
11825 failures.append("disk export: disk(s) %s" % fdsk)
11827 raise errors.OpExecError("Export failed, errors in %s" %
11828 utils.CommaJoin(failures))
11830 # At this point, the export was successful, we can cleanup/finish
11832 # Remove instance if requested
11833 if self.op.remove_instance:
11834 feedback_fn("Removing instance %s" % instance.name)
11835 _RemoveInstance(self, feedback_fn, instance,
11836 self.op.ignore_remove_failures)
11838 if self.op.mode == constants.EXPORT_MODE_LOCAL:
11839 self._CleanupExports(feedback_fn)
11841 return fin_resu, dresults
11844 class LUBackupRemove(NoHooksLU):
11845 """Remove exports related to the named instance.
11850 def ExpandNames(self):
11851 self.needed_locks = {}
11852 # We need all nodes to be locked in order for RemoveExport to work, but we
11853 # don't need to lock the instance itself, as nothing will happen to it (and
11854 # we can remove exports also for a removed instance)
11855 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
11857 def Exec(self, feedback_fn):
11858 """Remove any export.
11861 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
11862 # If the instance was not found we'll try with the name that was passed in.
11863 # This will only work if it was an FQDN, though.
11865 if not instance_name:
11867 instance_name = self.op.instance_name
11869 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
11870 exportlist = self.rpc.call_export_list(locked_nodes)
11872 for node in exportlist:
11873 msg = exportlist[node].fail_msg
11875 self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
11877 if instance_name in exportlist[node].payload:
11879 result = self.rpc.call_export_remove(node, instance_name)
11880 msg = result.fail_msg
11882 logging.error("Could not remove export for instance %s"
11883 " on node %s: %s", instance_name, node, msg)
11885 if fqdn_warn and not found:
11886 feedback_fn("Export not found. If trying to remove an export belonging"
11887 " to a deleted instance please use its Fully Qualified"
11891 class LUGroupAdd(LogicalUnit):
11892 """Logical unit for creating node groups.
11895 HPATH = "group-add"
11896 HTYPE = constants.HTYPE_GROUP
11899 def ExpandNames(self):
11900 # We need the new group's UUID here so that we can create and acquire the
11901 # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
11902 # that it should not check whether the UUID exists in the configuration.
11903 self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
11904 self.needed_locks = {}
11905 self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
11907 def CheckPrereq(self):
11908 """Check prerequisites.
11910 This checks that the given group name is not an existing node group
11915 existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11916 except errors.OpPrereqError:
11919 raise errors.OpPrereqError("Desired group name '%s' already exists as a"
11920 " node group (UUID: %s)" %
11921 (self.op.group_name, existing_uuid),
11922 errors.ECODE_EXISTS)
11924 if self.op.ndparams:
11925 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
11927 def BuildHooksEnv(self):
11928 """Build hooks env.
11932 "GROUP_NAME": self.op.group_name,
11935 def BuildHooksNodes(self):
11936 """Build hooks nodes.
11939 mn = self.cfg.GetMasterNode()
11940 return ([mn], [mn])
11942 def Exec(self, feedback_fn):
11943 """Add the node group to the cluster.
11946 group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
11947 uuid=self.group_uuid,
11948 alloc_policy=self.op.alloc_policy,
11949 ndparams=self.op.ndparams)
11951 self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
11952 del self.remove_locks[locking.LEVEL_NODEGROUP]
11955 class LUGroupAssignNodes(NoHooksLU):
11956 """Logical unit for assigning nodes to groups.
11961 def ExpandNames(self):
11962 # These raise errors.OpPrereqError on their own:
11963 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11964 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
11966 # We want to lock all the affected nodes and groups. We have readily
11967 # available the list of nodes, and the *destination* group. To gather the
11968 # list of "source" groups, we need to fetch node information later on.
11969 self.needed_locks = {
11970 locking.LEVEL_NODEGROUP: set([self.group_uuid]),
11971 locking.LEVEL_NODE: self.op.nodes,
11974 def DeclareLocks(self, level):
11975 if level == locking.LEVEL_NODEGROUP:
11976 assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
11978 # Try to get all affected nodes' groups without having the group or node
11979 # lock yet. Needs verification later in the code flow.
11980 groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
11982 self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
11984 def CheckPrereq(self):
11985 """Check prerequisites.
11988 assert self.needed_locks[locking.LEVEL_NODEGROUP]
11989 assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
11990 frozenset(self.op.nodes))
11992 expected_locks = (set([self.group_uuid]) |
11993 self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
11994 actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
11995 if actual_locks != expected_locks:
11996 raise errors.OpExecError("Nodes changed groups since locks were acquired,"
11997 " current groups are '%s', used to be '%s'" %
11998 (utils.CommaJoin(expected_locks),
11999 utils.CommaJoin(actual_locks)))
12001 self.node_data = self.cfg.GetAllNodesInfo()
12002 self.group = self.cfg.GetNodeGroup(self.group_uuid)
12003 instance_data = self.cfg.GetAllInstancesInfo()
12005 if self.group is None:
12006 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
12007 (self.op.group_name, self.group_uuid))
12009 (new_splits, previous_splits) = \
12010 self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
12011 for node in self.op.nodes],
12012 self.node_data, instance_data)
12015 fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
12017 if not self.op.force:
12018 raise errors.OpExecError("The following instances get split by this"
12019 " change and --force was not given: %s" %
12022 self.LogWarning("This operation will split the following instances: %s",
12025 if previous_splits:
12026 self.LogWarning("In addition, these already-split instances continue"
12027 " to be split across groups: %s",
12028 utils.CommaJoin(utils.NiceSort(previous_splits)))
12030 def Exec(self, feedback_fn):
12031 """Assign nodes to a new group.
12034 for node in self.op.nodes:
12035 self.node_data[node].group = self.group_uuid
12037 # FIXME: Depends on side-effects of modifying the result of
12038 # C{cfg.GetAllNodesInfo}
12040 self.cfg.Update(self.group, feedback_fn) # Saves all modified nodes.
12043 def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
12044 """Check for split instances after a node assignment.
12046 This method considers a series of node assignments as an atomic operation,
12047 and returns information about split instances after applying the set of
12050 In particular, it returns information about newly split instances, and
12051 instances that were already split, and remain so after the change.
12053 Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
12056 @type changes: list of (node_name, new_group_uuid) pairs.
12057 @param changes: list of node assignments to consider.
12058 @param node_data: a dict with data for all nodes
12059 @param instance_data: a dict with all instances to consider
12060 @rtype: a two-tuple
12061 @return: a list of instances that were previously okay and result split as a
12062 consequence of this change, and a list of instances that were previously
12063 split and this change does not fix.
12066 changed_nodes = dict((node, group) for node, group in changes
12067 if node_data[node].group != group)
12069 all_split_instances = set()
12070 previously_split_instances = set()
12072 def InstanceNodes(instance):
12073 return [instance.primary_node] + list(instance.secondary_nodes)
12075 for inst in instance_data.values():
12076 if inst.disk_template not in constants.DTS_INT_MIRROR:
12079 instance_nodes = InstanceNodes(inst)
12081 if len(set(node_data[node].group for node in instance_nodes)) > 1:
12082 previously_split_instances.add(inst.name)
12084 if len(set(changed_nodes.get(node, node_data[node].group)
12085 for node in instance_nodes)) > 1:
12086 all_split_instances.add(inst.name)
12088 return (list(all_split_instances - previously_split_instances),
12089 list(previously_split_instances & all_split_instances))
12092 class _GroupQuery(_QueryBase):
12093 FIELDS = query.GROUP_FIELDS
12095 def ExpandNames(self, lu):
12096 lu.needed_locks = {}
12098 self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
12099 name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
12102 self.wanted = [name_to_uuid[name]
12103 for name in utils.NiceSort(name_to_uuid.keys())]
12105 # Accept names to be either names or UUIDs.
12108 all_uuid = frozenset(self._all_groups.keys())
12110 for name in self.names:
12111 if name in all_uuid:
12112 self.wanted.append(name)
12113 elif name in name_to_uuid:
12114 self.wanted.append(name_to_uuid[name])
12116 missing.append(name)
12119 raise errors.OpPrereqError("Some groups do not exist: %s" %
12120 utils.CommaJoin(missing),
12121 errors.ECODE_NOENT)
12123 def DeclareLocks(self, lu, level):
12126 def _GetQueryData(self, lu):
12127 """Computes the list of node groups and their attributes.
12130 do_nodes = query.GQ_NODE in self.requested_data
12131 do_instances = query.GQ_INST in self.requested_data
12133 group_to_nodes = None
12134 group_to_instances = None
12136 # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
12137 # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
12138 # latter GetAllInstancesInfo() is not enough, for we have to go through
12139 # instance->node. Hence, we will need to process nodes even if we only need
12140 # instance information.
12141 if do_nodes or do_instances:
12142 all_nodes = lu.cfg.GetAllNodesInfo()
12143 group_to_nodes = dict((uuid, []) for uuid in self.wanted)
12146 for node in all_nodes.values():
12147 if node.group in group_to_nodes:
12148 group_to_nodes[node.group].append(node.name)
12149 node_to_group[node.name] = node.group
12152 all_instances = lu.cfg.GetAllInstancesInfo()
12153 group_to_instances = dict((uuid, []) for uuid in self.wanted)
12155 for instance in all_instances.values():
12156 node = instance.primary_node
12157 if node in node_to_group:
12158 group_to_instances[node_to_group[node]].append(instance.name)
12161 # Do not pass on node information if it was not requested.
12162 group_to_nodes = None
12164 return query.GroupQueryData([self._all_groups[uuid]
12165 for uuid in self.wanted],
12166 group_to_nodes, group_to_instances)
12169 class LUGroupQuery(NoHooksLU):
12170 """Logical unit for querying node groups.
12175 def CheckArguments(self):
12176 self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
12177 self.op.output_fields, False)
12179 def ExpandNames(self):
12180 self.gq.ExpandNames(self)
12182 def DeclareLocks(self, level):
12183 self.gq.DeclareLocks(self, level)
12185 def Exec(self, feedback_fn):
12186 return self.gq.OldStyleQuery(self)
12189 class LUGroupSetParams(LogicalUnit):
12190 """Modifies the parameters of a node group.
12193 HPATH = "group-modify"
12194 HTYPE = constants.HTYPE_GROUP
12197 def CheckArguments(self):
12200 self.op.alloc_policy,
12203 if all_changes.count(None) == len(all_changes):
12204 raise errors.OpPrereqError("Please pass at least one modification",
12205 errors.ECODE_INVAL)
12207 def ExpandNames(self):
12208 # This raises errors.OpPrereqError on its own:
12209 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
12211 self.needed_locks = {
12212 locking.LEVEL_NODEGROUP: [self.group_uuid],
12215 def CheckPrereq(self):
12216 """Check prerequisites.
12219 self.group = self.cfg.GetNodeGroup(self.group_uuid)
12221 if self.group is None:
12222 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
12223 (self.op.group_name, self.group_uuid))
12225 if self.op.ndparams:
12226 new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
12227 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
12228 self.new_ndparams = new_ndparams
12230 def BuildHooksEnv(self):
12231 """Build hooks env.
12235 "GROUP_NAME": self.op.group_name,
12236 "NEW_ALLOC_POLICY": self.op.alloc_policy,
12239 def BuildHooksNodes(self):
12240 """Build hooks nodes.
12243 mn = self.cfg.GetMasterNode()
12244 return ([mn], [mn])
12246 def Exec(self, feedback_fn):
12247 """Modifies the node group.
12252 if self.op.ndparams:
12253 self.group.ndparams = self.new_ndparams
12254 result.append(("ndparams", str(self.group.ndparams)))
12256 if self.op.alloc_policy:
12257 self.group.alloc_policy = self.op.alloc_policy
12259 self.cfg.Update(self.group, feedback_fn)
12263 class LUGroupRemove(LogicalUnit):
12264 HPATH = "group-remove"
12265 HTYPE = constants.HTYPE_GROUP
12268 def ExpandNames(self):
12269 # This will raises errors.OpPrereqError on its own:
12270 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
12271 self.needed_locks = {
12272 locking.LEVEL_NODEGROUP: [self.group_uuid],
12275 def CheckPrereq(self):
12276 """Check prerequisites.
12278 This checks that the given group name exists as a node group, that is
12279 empty (i.e., contains no nodes), and that is not the last group of the
12283 # Verify that the group is empty.
12284 group_nodes = [node.name
12285 for node in self.cfg.GetAllNodesInfo().values()
12286 if node.group == self.group_uuid]
12289 raise errors.OpPrereqError("Group '%s' not empty, has the following"
12291 (self.op.group_name,
12292 utils.CommaJoin(utils.NiceSort(group_nodes))),
12293 errors.ECODE_STATE)
12295 # Verify the cluster would not be left group-less.
12296 if len(self.cfg.GetNodeGroupList()) == 1:
12297 raise errors.OpPrereqError("Group '%s' is the only group,"
12298 " cannot be removed" %
12299 self.op.group_name,
12300 errors.ECODE_STATE)
12302 def BuildHooksEnv(self):
12303 """Build hooks env.
12307 "GROUP_NAME": self.op.group_name,
12310 def BuildHooksNodes(self):
12311 """Build hooks nodes.
12314 mn = self.cfg.GetMasterNode()
12315 return ([mn], [mn])
12317 def Exec(self, feedback_fn):
12318 """Remove the node group.
12322 self.cfg.RemoveNodeGroup(self.group_uuid)
12323 except errors.ConfigurationError:
12324 raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
12325 (self.op.group_name, self.group_uuid))
12327 self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
12330 class LUGroupRename(LogicalUnit):
12331 HPATH = "group-rename"
12332 HTYPE = constants.HTYPE_GROUP
12335 def ExpandNames(self):
12336 # This raises errors.OpPrereqError on its own:
12337 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
12339 self.needed_locks = {
12340 locking.LEVEL_NODEGROUP: [self.group_uuid],
12343 def CheckPrereq(self):
12344 """Check prerequisites.
12346 Ensures requested new name is not yet used.
12350 new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
12351 except errors.OpPrereqError:
12354 raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
12355 " node group (UUID: %s)" %
12356 (self.op.new_name, new_name_uuid),
12357 errors.ECODE_EXISTS)
12359 def BuildHooksEnv(self):
12360 """Build hooks env.
12364 "OLD_NAME": self.op.group_name,
12365 "NEW_NAME": self.op.new_name,
12368 def BuildHooksNodes(self):
12369 """Build hooks nodes.
12372 mn = self.cfg.GetMasterNode()
12374 all_nodes = self.cfg.GetAllNodesInfo()
12375 all_nodes.pop(mn, None)
12378 run_nodes.extend(node.name for node in all_nodes.values()
12379 if node.group == self.group_uuid)
12381 return (run_nodes, run_nodes)
12383 def Exec(self, feedback_fn):
12384 """Rename the node group.
12387 group = self.cfg.GetNodeGroup(self.group_uuid)
12390 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
12391 (self.op.group_name, self.group_uuid))
12393 group.name = self.op.new_name
12394 self.cfg.Update(group, feedback_fn)
12396 return self.op.new_name
12399 class LUGroupEvacuate(LogicalUnit):
12400 HPATH = "group-evacuate"
12401 HTYPE = constants.HTYPE_GROUP
12404 def ExpandNames(self):
12405 # This raises errors.OpPrereqError on its own:
12406 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
12408 if self.op.target_groups:
12409 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
12410 self.op.target_groups)
12412 self.req_target_uuids = []
12414 if self.group_uuid in self.req_target_uuids:
12415 raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
12416 " as a target group (targets are %s)" %
12418 utils.CommaJoin(self.req_target_uuids)),
12419 errors.ECODE_INVAL)
12421 self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
12423 self.share_locks = _ShareAll()
12424 self.needed_locks = {
12425 locking.LEVEL_INSTANCE: [],
12426 locking.LEVEL_NODEGROUP: [],
12427 locking.LEVEL_NODE: [],
12430 def DeclareLocks(self, level):
12431 if level == locking.LEVEL_INSTANCE:
12432 assert not self.needed_locks[locking.LEVEL_INSTANCE]
12434 # Lock instances optimistically, needs verification once node and group
12435 # locks have been acquired
12436 self.needed_locks[locking.LEVEL_INSTANCE] = \
12437 self.cfg.GetNodeGroupInstances(self.group_uuid)
12439 elif level == locking.LEVEL_NODEGROUP:
12440 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
12442 if self.req_target_uuids:
12443 lock_groups = set([self.group_uuid] + self.req_target_uuids)
12445 # Lock all groups used by instances optimistically; this requires going
12446 # via the node before it's locked, requiring verification later on
12447 lock_groups.update(group_uuid
12448 for instance_name in
12449 self.owned_locks(locking.LEVEL_INSTANCE)
12451 self.cfg.GetInstanceNodeGroups(instance_name))
12453 # No target groups, need to lock all of them
12454 lock_groups = locking.ALL_SET
12456 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
12458 elif level == locking.LEVEL_NODE:
12459 # This will only lock the nodes in the group to be evacuated which
12460 # contain actual instances
12461 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
12462 self._LockInstancesNodes()
12464 # Lock all nodes in group to be evacuated and target groups
12465 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
12466 assert self.group_uuid in owned_groups
12467 member_nodes = [node_name
12468 for group in owned_groups
12469 for node_name in self.cfg.GetNodeGroup(group).members]
12470 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
12472 def CheckPrereq(self):
12473 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
12474 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
12475 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
12477 assert owned_groups.issuperset(self.req_target_uuids)
12478 assert self.group_uuid in owned_groups
12480 # Check if locked instances are still correct
12481 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
12483 # Get instance information
12484 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
12486 # Check if node groups for locked instances are still correct
12487 for instance_name in owned_instances:
12488 inst = self.instances[instance_name]
12489 assert owned_nodes.issuperset(inst.all_nodes), \
12490 "Instance %s's nodes changed while we kept the lock" % instance_name
12492 inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
12495 assert self.group_uuid in inst_groups, \
12496 "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
12498 if self.req_target_uuids:
12499 # User requested specific target groups
12500 self.target_uuids = self.req_target_uuids
12502 # All groups except the one to be evacuated are potential targets
12503 self.target_uuids = [group_uuid for group_uuid in owned_groups
12504 if group_uuid != self.group_uuid]
12506 if not self.target_uuids:
12507 raise errors.OpPrereqError("There are no possible target groups",
12508 errors.ECODE_INVAL)
12510 def BuildHooksEnv(self):
12511 """Build hooks env.
12515 "GROUP_NAME": self.op.group_name,
12516 "TARGET_GROUPS": " ".join(self.target_uuids),
12519 def BuildHooksNodes(self):
12520 """Build hooks nodes.
12523 mn = self.cfg.GetMasterNode()
12525 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
12527 run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
12529 return (run_nodes, run_nodes)
12531 def Exec(self, feedback_fn):
12532 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
12534 assert self.group_uuid not in self.target_uuids
12536 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
12537 instances=instances, target_groups=self.target_uuids)
12539 ial.Run(self.op.iallocator)
12541 if not ial.success:
12542 raise errors.OpPrereqError("Can't compute group evacuation using"
12543 " iallocator '%s': %s" %
12544 (self.op.iallocator, ial.info),
12545 errors.ECODE_NORES)
12547 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
12549 self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
12550 len(jobs), self.op.group_name)
12552 return ResultWithJobs(jobs)
12555 class TagsLU(NoHooksLU): # pylint: disable=W0223
12556 """Generic tags LU.
12558 This is an abstract class which is the parent of all the other tags LUs.
12561 def ExpandNames(self):
12562 self.group_uuid = None
12563 self.needed_locks = {}
12564 if self.op.kind == constants.TAG_NODE:
12565 self.op.name = _ExpandNodeName(self.cfg, self.op.name)
12566 self.needed_locks[locking.LEVEL_NODE] = self.op.name
12567 elif self.op.kind == constants.TAG_INSTANCE:
12568 self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
12569 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
12570 elif self.op.kind == constants.TAG_NODEGROUP:
12571 self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
12573 # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
12574 # not possible to acquire the BGL based on opcode parameters)
12576 def CheckPrereq(self):
12577 """Check prerequisites.
12580 if self.op.kind == constants.TAG_CLUSTER:
12581 self.target = self.cfg.GetClusterInfo()
12582 elif self.op.kind == constants.TAG_NODE:
12583 self.target = self.cfg.GetNodeInfo(self.op.name)
12584 elif self.op.kind == constants.TAG_INSTANCE:
12585 self.target = self.cfg.GetInstanceInfo(self.op.name)
12586 elif self.op.kind == constants.TAG_NODEGROUP:
12587 self.target = self.cfg.GetNodeGroup(self.group_uuid)
12589 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
12590 str(self.op.kind), errors.ECODE_INVAL)
12593 class LUTagsGet(TagsLU):
12594 """Returns the tags of a given object.
12599 def ExpandNames(self):
12600 TagsLU.ExpandNames(self)
12602 # Share locks as this is only a read operation
12603 self.share_locks = _ShareAll()
12605 def Exec(self, feedback_fn):
12606 """Returns the tag list.
12609 return list(self.target.GetTags())
12612 class LUTagsSearch(NoHooksLU):
12613 """Searches the tags for a given pattern.
12618 def ExpandNames(self):
12619 self.needed_locks = {}
12621 def CheckPrereq(self):
12622 """Check prerequisites.
12624 This checks the pattern passed for validity by compiling it.
12628 self.re = re.compile(self.op.pattern)
12629 except re.error, err:
12630 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
12631 (self.op.pattern, err), errors.ECODE_INVAL)
12633 def Exec(self, feedback_fn):
12634 """Returns the tag list.
12638 tgts = [("/cluster", cfg.GetClusterInfo())]
12639 ilist = cfg.GetAllInstancesInfo().values()
12640 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
12641 nlist = cfg.GetAllNodesInfo().values()
12642 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
12643 tgts.extend(("/nodegroup/%s" % n.name, n)
12644 for n in cfg.GetAllNodeGroupsInfo().values())
12646 for path, target in tgts:
12647 for tag in target.GetTags():
12648 if self.re.search(tag):
12649 results.append((path, tag))
12653 class LUTagsSet(TagsLU):
12654 """Sets a tag on a given object.
12659 def CheckPrereq(self):
12660 """Check prerequisites.
12662 This checks the type and length of the tag name and value.
12665 TagsLU.CheckPrereq(self)
12666 for tag in self.op.tags:
12667 objects.TaggableObject.ValidateTag(tag)
12669 def Exec(self, feedback_fn):
12674 for tag in self.op.tags:
12675 self.target.AddTag(tag)
12676 except errors.TagError, err:
12677 raise errors.OpExecError("Error while setting tag: %s" % str(err))
12678 self.cfg.Update(self.target, feedback_fn)
12681 class LUTagsDel(TagsLU):
12682 """Delete a list of tags from a given object.
12687 def CheckPrereq(self):
12688 """Check prerequisites.
12690 This checks that we have the given tag.
12693 TagsLU.CheckPrereq(self)
12694 for tag in self.op.tags:
12695 objects.TaggableObject.ValidateTag(tag)
12696 del_tags = frozenset(self.op.tags)
12697 cur_tags = self.target.GetTags()
12699 diff_tags = del_tags - cur_tags
12701 diff_names = ("'%s'" % i for i in sorted(diff_tags))
12702 raise errors.OpPrereqError("Tag(s) %s not found" %
12703 (utils.CommaJoin(diff_names), ),
12704 errors.ECODE_NOENT)
12706 def Exec(self, feedback_fn):
12707 """Remove the tag from the object.
12710 for tag in self.op.tags:
12711 self.target.RemoveTag(tag)
12712 self.cfg.Update(self.target, feedback_fn)
12715 class LUTestDelay(NoHooksLU):
12716 """Sleep for a specified amount of time.
12718 This LU sleeps on the master and/or nodes for a specified amount of
12724 def ExpandNames(self):
12725 """Expand names and set required locks.
12727 This expands the node list, if any.
12730 self.needed_locks = {}
12731 if self.op.on_nodes:
12732 # _GetWantedNodes can be used here, but is not always appropriate to use
12733 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
12734 # more information.
12735 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
12736 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
12738 def _TestDelay(self):
12739 """Do the actual sleep.
12742 if self.op.on_master:
12743 if not utils.TestDelay(self.op.duration):
12744 raise errors.OpExecError("Error during master delay test")
12745 if self.op.on_nodes:
12746 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
12747 for node, node_result in result.items():
12748 node_result.Raise("Failure during rpc call to node %s" % node)
12750 def Exec(self, feedback_fn):
12751 """Execute the test delay opcode, with the wanted repetitions.
12754 if self.op.repeat == 0:
12757 top_value = self.op.repeat - 1
12758 for i in range(self.op.repeat):
12759 self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
12763 class LUTestJqueue(NoHooksLU):
12764 """Utility LU to test some aspects of the job queue.
12769 # Must be lower than default timeout for WaitForJobChange to see whether it
12770 # notices changed jobs
12771 _CLIENT_CONNECT_TIMEOUT = 20.0
12772 _CLIENT_CONFIRM_TIMEOUT = 60.0
12775 def _NotifyUsingSocket(cls, cb, errcls):
12776 """Opens a Unix socket and waits for another program to connect.
12779 @param cb: Callback to send socket name to client
12780 @type errcls: class
12781 @param errcls: Exception class to use for errors
12784 # Using a temporary directory as there's no easy way to create temporary
12785 # sockets without writing a custom loop around tempfile.mktemp and
12787 tmpdir = tempfile.mkdtemp()
12789 tmpsock = utils.PathJoin(tmpdir, "sock")
12791 logging.debug("Creating temporary socket at %s", tmpsock)
12792 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
12797 # Send details to client
12800 # Wait for client to connect before continuing
12801 sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
12803 (conn, _) = sock.accept()
12804 except socket.error, err:
12805 raise errcls("Client didn't connect in time (%s)" % err)
12809 # Remove as soon as client is connected
12810 shutil.rmtree(tmpdir)
12812 # Wait for client to close
12815 # pylint: disable=E1101
12816 # Instance of '_socketobject' has no ... member
12817 conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
12819 except socket.error, err:
12820 raise errcls("Client failed to confirm notification (%s)" % err)
12824 def _SendNotification(self, test, arg, sockname):
12825 """Sends a notification to the client.
12828 @param test: Test name
12829 @param arg: Test argument (depends on test)
12830 @type sockname: string
12831 @param sockname: Socket path
12834 self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
12836 def _Notify(self, prereq, test, arg):
12837 """Notifies the client of a test.
12840 @param prereq: Whether this is a prereq-phase test
12842 @param test: Test name
12843 @param arg: Test argument (depends on test)
12847 errcls = errors.OpPrereqError
12849 errcls = errors.OpExecError
12851 return self._NotifyUsingSocket(compat.partial(self._SendNotification,
12855 def CheckArguments(self):
12856 self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
12857 self.expandnames_calls = 0
12859 def ExpandNames(self):
12860 checkargs_calls = getattr(self, "checkargs_calls", 0)
12861 if checkargs_calls < 1:
12862 raise errors.ProgrammerError("CheckArguments was not called")
12864 self.expandnames_calls += 1
12866 if self.op.notify_waitlock:
12867 self._Notify(True, constants.JQT_EXPANDNAMES, None)
12869 self.LogInfo("Expanding names")
12871 # Get lock on master node (just to get a lock, not for a particular reason)
12872 self.needed_locks = {
12873 locking.LEVEL_NODE: self.cfg.GetMasterNode(),
12876 def Exec(self, feedback_fn):
12877 if self.expandnames_calls < 1:
12878 raise errors.ProgrammerError("ExpandNames was not called")
12880 if self.op.notify_exec:
12881 self._Notify(False, constants.JQT_EXEC, None)
12883 self.LogInfo("Executing")
12885 if self.op.log_messages:
12886 self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
12887 for idx, msg in enumerate(self.op.log_messages):
12888 self.LogInfo("Sending log message %s", idx + 1)
12889 feedback_fn(constants.JQT_MSGPREFIX + msg)
12890 # Report how many test messages have been sent
12891 self._Notify(False, constants.JQT_LOGMSG, idx + 1)
12894 raise errors.OpExecError("Opcode failure was requested")
12899 class IAllocator(object):
12900 """IAllocator framework.
12902 An IAllocator instance has three sets of attributes:
12903 - cfg that is needed to query the cluster
12904 - input data (all members of the _KEYS class attribute are required)
12905 - four buffer attributes (in|out_data|text), that represent the
12906 input (to the external script) in text and data structure format,
12907 and the output from it, again in two formats
12908 - the result variables from the script (success, info, nodes) for
12912 # pylint: disable=R0902
12913 # lots of instance attributes
12915 def __init__(self, cfg, rpc, mode, **kwargs):
12918 # init buffer variables
12919 self.in_text = self.out_text = self.in_data = self.out_data = None
12920 # init all input fields so that pylint is happy
12922 self.memory = self.disks = self.disk_template = None
12923 self.os = self.tags = self.nics = self.vcpus = None
12924 self.hypervisor = None
12925 self.relocate_from = None
12927 self.instances = None
12928 self.evac_mode = None
12929 self.target_groups = []
12931 self.required_nodes = None
12932 # init result fields
12933 self.success = self.info = self.result = None
12936 (fn, keydata, self._result_check) = self._MODE_DATA[self.mode]
12938 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
12939 " IAllocator" % self.mode)
12941 keyset = [n for (n, _) in keydata]
12944 if key not in keyset:
12945 raise errors.ProgrammerError("Invalid input parameter '%s' to"
12946 " IAllocator" % key)
12947 setattr(self, key, kwargs[key])
12950 if key not in kwargs:
12951 raise errors.ProgrammerError("Missing input parameter '%s' to"
12952 " IAllocator" % key)
12953 self._BuildInputData(compat.partial(fn, self), keydata)
12955 def _ComputeClusterData(self):
12956 """Compute the generic allocator input data.
12958 This is the data that is independent of the actual operation.
12962 cluster_info = cfg.GetClusterInfo()
12965 "version": constants.IALLOCATOR_VERSION,
12966 "cluster_name": cfg.GetClusterName(),
12967 "cluster_tags": list(cluster_info.GetTags()),
12968 "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
12969 # we don't have job IDs
12971 ninfo = cfg.GetAllNodesInfo()
12972 iinfo = cfg.GetAllInstancesInfo().values()
12973 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
12976 node_list = [n.name for n in ninfo.values() if n.vm_capable]
12978 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
12979 hypervisor_name = self.hypervisor
12980 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
12981 hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
12983 hypervisor_name = cluster_info.enabled_hypervisors[0]
12985 node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
12988 self.rpc.call_all_instances_info(node_list,
12989 cluster_info.enabled_hypervisors)
12991 data["nodegroups"] = self._ComputeNodeGroupData(cfg)
12993 config_ndata = self._ComputeBasicNodeData(ninfo)
12994 data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
12995 i_list, config_ndata)
12996 assert len(data["nodes"]) == len(ninfo), \
12997 "Incomplete node data computed"
12999 data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
13001 self.in_data = data
13004 def _ComputeNodeGroupData(cfg):
13005 """Compute node groups data.
13008 ng = dict((guuid, {
13009 "name": gdata.name,
13010 "alloc_policy": gdata.alloc_policy,
13012 for guuid, gdata in cfg.GetAllNodeGroupsInfo().items())
13017 def _ComputeBasicNodeData(node_cfg):
13018 """Compute global node data.
13021 @returns: a dict of name: (node dict, node config)
13024 # fill in static (config-based) values
13025 node_results = dict((ninfo.name, {
13026 "tags": list(ninfo.GetTags()),
13027 "primary_ip": ninfo.primary_ip,
13028 "secondary_ip": ninfo.secondary_ip,
13029 "offline": ninfo.offline,
13030 "drained": ninfo.drained,
13031 "master_candidate": ninfo.master_candidate,
13032 "group": ninfo.group,
13033 "master_capable": ninfo.master_capable,
13034 "vm_capable": ninfo.vm_capable,
13036 for ninfo in node_cfg.values())
13038 return node_results
13041 def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
13043 """Compute global node data.
13045 @param node_results: the basic node structures as filled from the config
13048 # make a copy of the current dict
13049 node_results = dict(node_results)
13050 for nname, nresult in node_data.items():
13051 assert nname in node_results, "Missing basic data for node %s" % nname
13052 ninfo = node_cfg[nname]
13054 if not (ninfo.offline or ninfo.drained):
13055 nresult.Raise("Can't get data for node %s" % nname)
13056 node_iinfo[nname].Raise("Can't get node instance info from node %s" %
13058 remote_info = nresult.payload
13060 for attr in ["memory_total", "memory_free", "memory_dom0",
13061 "vg_size", "vg_free", "cpu_total"]:
13062 if attr not in remote_info:
13063 raise errors.OpExecError("Node '%s' didn't return attribute"
13064 " '%s'" % (nname, attr))
13065 if not isinstance(remote_info[attr], int):
13066 raise errors.OpExecError("Node '%s' returned invalid value"
13068 (nname, attr, remote_info[attr]))
13069 # compute memory used by primary instances
13070 i_p_mem = i_p_up_mem = 0
13071 for iinfo, beinfo in i_list:
13072 if iinfo.primary_node == nname:
13073 i_p_mem += beinfo[constants.BE_MEMORY]
13074 if iinfo.name not in node_iinfo[nname].payload:
13077 i_used_mem = int(node_iinfo[nname].payload[iinfo.name]["memory"])
13078 i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
13079 remote_info["memory_free"] -= max(0, i_mem_diff)
13082 i_p_up_mem += beinfo[constants.BE_MEMORY]
13084 # compute memory used by instances
13086 "total_memory": remote_info["memory_total"],
13087 "reserved_memory": remote_info["memory_dom0"],
13088 "free_memory": remote_info["memory_free"],
13089 "total_disk": remote_info["vg_size"],
13090 "free_disk": remote_info["vg_free"],
13091 "total_cpus": remote_info["cpu_total"],
13092 "i_pri_memory": i_p_mem,
13093 "i_pri_up_memory": i_p_up_mem,
13095 pnr_dyn.update(node_results[nname])
13096 node_results[nname] = pnr_dyn
13098 return node_results
13101 def _ComputeInstanceData(cluster_info, i_list):
13102 """Compute global instance data.
13106 for iinfo, beinfo in i_list:
13108 for nic in iinfo.nics:
13109 filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
13113 "mode": filled_params[constants.NIC_MODE],
13114 "link": filled_params[constants.NIC_LINK],
13116 if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
13117 nic_dict["bridge"] = filled_params[constants.NIC_LINK]
13118 nic_data.append(nic_dict)
13120 "tags": list(iinfo.GetTags()),
13121 "admin_up": iinfo.admin_up,
13122 "vcpus": beinfo[constants.BE_VCPUS],
13123 "memory": beinfo[constants.BE_MEMORY],
13125 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
13127 "disks": [{constants.IDISK_SIZE: dsk.size,
13128 constants.IDISK_MODE: dsk.mode}
13129 for dsk in iinfo.disks],
13130 "disk_template": iinfo.disk_template,
13131 "hypervisor": iinfo.hypervisor,
13133 pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
13135 instance_data[iinfo.name] = pir
13137 return instance_data
13139 def _AddNewInstance(self):
13140 """Add new instance data to allocator structure.
13142 This in combination with _AllocatorGetClusterData will create the
13143 correct structure needed as input for the allocator.
13145 The checks for the completeness of the opcode must have already been
13149 disk_space = _ComputeDiskSize(self.disk_template, self.disks)
13151 if self.disk_template in constants.DTS_INT_MIRROR:
13152 self.required_nodes = 2
13154 self.required_nodes = 1
13158 "disk_template": self.disk_template,
13161 "vcpus": self.vcpus,
13162 "memory": self.memory,
13163 "disks": self.disks,
13164 "disk_space_total": disk_space,
13166 "required_nodes": self.required_nodes,
13167 "hypervisor": self.hypervisor,
13172 def _AddRelocateInstance(self):
13173 """Add relocate instance data to allocator structure.
13175 This in combination with _IAllocatorGetClusterData will create the
13176 correct structure needed as input for the allocator.
13178 The checks for the completeness of the opcode must have already been
13182 instance = self.cfg.GetInstanceInfo(self.name)
13183 if instance is None:
13184 raise errors.ProgrammerError("Unknown instance '%s' passed to"
13185 " IAllocator" % self.name)
13187 if instance.disk_template not in constants.DTS_MIRRORED:
13188 raise errors.OpPrereqError("Can't relocate non-mirrored instances",
13189 errors.ECODE_INVAL)
13191 if instance.disk_template in constants.DTS_INT_MIRROR and \
13192 len(instance.secondary_nodes) != 1:
13193 raise errors.OpPrereqError("Instance has not exactly one secondary node",
13194 errors.ECODE_STATE)
13196 self.required_nodes = 1
13197 disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks]
13198 disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
13202 "disk_space_total": disk_space,
13203 "required_nodes": self.required_nodes,
13204 "relocate_from": self.relocate_from,
13208 def _AddNodeEvacuate(self):
13209 """Get data for node-evacuate requests.
13213 "instances": self.instances,
13214 "evac_mode": self.evac_mode,
13217 def _AddChangeGroup(self):
13218 """Get data for node-evacuate requests.
13222 "instances": self.instances,
13223 "target_groups": self.target_groups,
13226 def _BuildInputData(self, fn, keydata):
13227 """Build input data structures.
13230 self._ComputeClusterData()
13233 request["type"] = self.mode
13234 for keyname, keytype in keydata:
13235 if keyname not in request:
13236 raise errors.ProgrammerError("Request parameter %s is missing" %
13238 val = request[keyname]
13239 if not keytype(val):
13240 raise errors.ProgrammerError("Request parameter %s doesn't pass"
13241 " validation, value %s, expected"
13242 " type %s" % (keyname, val, keytype))
13243 self.in_data["request"] = request
13245 self.in_text = serializer.Dump(self.in_data)
13247 _STRING_LIST = ht.TListOf(ht.TString)
13248 _JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
13249 # pylint: disable=E1101
13250 # Class '...' has no 'OP_ID' member
13251 "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
13252 opcodes.OpInstanceMigrate.OP_ID,
13253 opcodes.OpInstanceReplaceDisks.OP_ID])
13257 ht.TListOf(ht.TAnd(ht.TIsLength(3),
13258 ht.TItems([ht.TNonEmptyString,
13259 ht.TNonEmptyString,
13260 ht.TListOf(ht.TNonEmptyString),
13263 ht.TListOf(ht.TAnd(ht.TIsLength(2),
13264 ht.TItems([ht.TNonEmptyString,
13267 _NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
13268 ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))
13271 constants.IALLOCATOR_MODE_ALLOC:
13274 ("name", ht.TString),
13275 ("memory", ht.TInt),
13276 ("disks", ht.TListOf(ht.TDict)),
13277 ("disk_template", ht.TString),
13278 ("os", ht.TString),
13279 ("tags", _STRING_LIST),
13280 ("nics", ht.TListOf(ht.TDict)),
13281 ("vcpus", ht.TInt),
13282 ("hypervisor", ht.TString),
13284 constants.IALLOCATOR_MODE_RELOC:
13285 (_AddRelocateInstance,
13286 [("name", ht.TString), ("relocate_from", _STRING_LIST)],
13288 constants.IALLOCATOR_MODE_NODE_EVAC:
13289 (_AddNodeEvacuate, [
13290 ("instances", _STRING_LIST),
13291 ("evac_mode", ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)),
13293 constants.IALLOCATOR_MODE_CHG_GROUP:
13294 (_AddChangeGroup, [
13295 ("instances", _STRING_LIST),
13296 ("target_groups", _STRING_LIST),
13300 def Run(self, name, validate=True, call_fn=None):
13301 """Run an instance allocator and return the results.
13304 if call_fn is None:
13305 call_fn = self.rpc.call_iallocator_runner
13307 result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
13308 result.Raise("Failure while running the iallocator script")
13310 self.out_text = result.payload
13312 self._ValidateResult()
13314 def _ValidateResult(self):
13315 """Process the allocator results.
13317 This will process and if successful save the result in
13318 self.out_data and the other parameters.
13322 rdict = serializer.Load(self.out_text)
13323 except Exception, err:
13324 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
13326 if not isinstance(rdict, dict):
13327 raise errors.OpExecError("Can't parse iallocator results: not a dict")
13329 # TODO: remove backwards compatiblity in later versions
13330 if "nodes" in rdict and "result" not in rdict:
13331 rdict["result"] = rdict["nodes"]
13334 for key in "success", "info", "result":
13335 if key not in rdict:
13336 raise errors.OpExecError("Can't parse iallocator results:"
13337 " missing key '%s'" % key)
13338 setattr(self, key, rdict[key])
13340 if not self._result_check(self.result):
13341 raise errors.OpExecError("Iallocator returned invalid result,"
13342 " expected %s, got %s" %
13343 (self._result_check, self.result),
13344 errors.ECODE_INVAL)
13346 if self.mode == constants.IALLOCATOR_MODE_RELOC:
13347 assert self.relocate_from is not None
13348 assert self.required_nodes == 1
13350 node2group = dict((name, ndata["group"])
13351 for (name, ndata) in self.in_data["nodes"].items())
13353 fn = compat.partial(self._NodesToGroups, node2group,
13354 self.in_data["nodegroups"])
13356 instance = self.cfg.GetInstanceInfo(self.name)
13357 request_groups = fn(self.relocate_from + [instance.primary_node])
13358 result_groups = fn(rdict["result"] + [instance.primary_node])
13360 if self.success and not set(result_groups).issubset(request_groups):
13361 raise errors.OpExecError("Groups of nodes returned by iallocator (%s)"
13362 " differ from original groups (%s)" %
13363 (utils.CommaJoin(result_groups),
13364 utils.CommaJoin(request_groups)))
13366 elif self.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
13367 assert self.evac_mode in constants.IALLOCATOR_NEVAC_MODES
13369 self.out_data = rdict
13372 def _NodesToGroups(node2group, groups, nodes):
13373 """Returns a list of unique group names for a list of nodes.
13375 @type node2group: dict
13376 @param node2group: Map from node name to group UUID
13378 @param groups: Group information
13380 @param nodes: Node names
13387 group_uuid = node2group[node]
13389 # Ignore unknown node
13393 group = groups[group_uuid]
13395 # Can't find group, let's use UUID
13396 group_name = group_uuid
13398 group_name = group["name"]
13400 result.add(group_name)
13402 return sorted(result)
13405 class LUTestAllocator(NoHooksLU):
13406 """Run allocator tests.
13408 This LU runs the allocator tests
13411 def CheckPrereq(self):
13412 """Check prerequisites.
13414 This checks the opcode parameters depending on the director and mode test.
13417 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
13418 for attr in ["memory", "disks", "disk_template",
13419 "os", "tags", "nics", "vcpus"]:
13420 if not hasattr(self.op, attr):
13421 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
13422 attr, errors.ECODE_INVAL)
13423 iname = self.cfg.ExpandInstanceName(self.op.name)
13424 if iname is not None:
13425 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
13426 iname, errors.ECODE_EXISTS)
13427 if not isinstance(self.op.nics, list):
13428 raise errors.OpPrereqError("Invalid parameter 'nics'",
13429 errors.ECODE_INVAL)
13430 if not isinstance(self.op.disks, list):
13431 raise errors.OpPrereqError("Invalid parameter 'disks'",
13432 errors.ECODE_INVAL)
13433 for row in self.op.disks:
13434 if (not isinstance(row, dict) or
13435 constants.IDISK_SIZE not in row or
13436 not isinstance(row[constants.IDISK_SIZE], int) or
13437 constants.IDISK_MODE not in row or
13438 row[constants.IDISK_MODE] not in constants.DISK_ACCESS_SET):
13439 raise errors.OpPrereqError("Invalid contents of the 'disks'"
13440 " parameter", errors.ECODE_INVAL)
13441 if self.op.hypervisor is None:
13442 self.op.hypervisor = self.cfg.GetHypervisorType()
13443 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
13444 fname = _ExpandInstanceName(self.cfg, self.op.name)
13445 self.op.name = fname
13446 self.relocate_from = \
13447 list(self.cfg.GetInstanceInfo(fname).secondary_nodes)
13448 elif self.op.mode in (constants.IALLOCATOR_MODE_CHG_GROUP,
13449 constants.IALLOCATOR_MODE_NODE_EVAC):
13450 if not self.op.instances:
13451 raise errors.OpPrereqError("Missing instances", errors.ECODE_INVAL)
13452 self.op.instances = _GetWantedInstances(self, self.op.instances)
13454 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
13455 self.op.mode, errors.ECODE_INVAL)
13457 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
13458 if self.op.allocator is None:
13459 raise errors.OpPrereqError("Missing allocator name",
13460 errors.ECODE_INVAL)
13461 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
13462 raise errors.OpPrereqError("Wrong allocator test '%s'" %
13463 self.op.direction, errors.ECODE_INVAL)
13465 def Exec(self, feedback_fn):
13466 """Run the allocator test.
13469 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
13470 ial = IAllocator(self.cfg, self.rpc,
13473 memory=self.op.memory,
13474 disks=self.op.disks,
13475 disk_template=self.op.disk_template,
13479 vcpus=self.op.vcpus,
13480 hypervisor=self.op.hypervisor,
13482 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
13483 ial = IAllocator(self.cfg, self.rpc,
13486 relocate_from=list(self.relocate_from),
13488 elif self.op.mode == constants.IALLOCATOR_MODE_CHG_GROUP:
13489 ial = IAllocator(self.cfg, self.rpc,
13491 instances=self.op.instances,
13492 target_groups=self.op.target_groups)
13493 elif self.op.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
13494 ial = IAllocator(self.cfg, self.rpc,
13496 instances=self.op.instances,
13497 evac_mode=self.op.evac_mode)
13499 raise errors.ProgrammerError("Uncatched mode %s in"
13500 " LUTestAllocator.Exec", self.op.mode)
13502 if self.op.direction == constants.IALLOCATOR_DIR_IN:
13503 result = ial.in_text
13505 ial.Run(self.op.allocator, validate=False)
13506 result = ial.out_text
13510 #: Query type implementations
13512 constants.QR_INSTANCE: _InstanceQuery,
13513 constants.QR_NODE: _NodeQuery,
13514 constants.QR_GROUP: _GroupQuery,
13515 constants.QR_OS: _OsQuery,
13518 assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP
13521 def _GetQueryImplementation(name):
13522 """Returns the implemtnation for a query type.
13524 @param name: Query type, must be one of L{constants.QR_VIA_OP}
13528 return _QUERY_IMPL[name]
13530 raise errors.OpPrereqError("Unknown query resource '%s'" % name,
13531 errors.ECODE_INVAL)