4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the master-side code."""
24 # pylint: disable=W0201,C0302
26 # W0201 since most LU attributes are defined in CheckPrereq or similar
29 # C0302: since we have waaaay too many lines in this module
44 from ganeti import ssh
45 from ganeti import utils
46 from ganeti import errors
47 from ganeti import hypervisor
48 from ganeti import locking
49 from ganeti import constants
50 from ganeti import objects
51 from ganeti import serializer
52 from ganeti import ssconf
53 from ganeti import uidpool
54 from ganeti import compat
55 from ganeti import masterd
56 from ganeti import netutils
57 from ganeti import query
58 from ganeti import qlang
59 from ganeti import opcodes
61 from ganeti import rpc
62 from ganeti import runtime
64 import ganeti.masterd.instance # pylint: disable=W0611
67 #: Size of DRBD meta block device
71 INSTANCE_DOWN = [constants.ADMINST_DOWN]
72 INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
73 INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
75 #: Instance status in which an instance can be marked as offline/online
76 CAN_CHANGE_INSTANCE_OFFLINE = (frozenset(INSTANCE_DOWN) | frozenset([
77 constants.ADMINST_OFFLINE,
82 """Data container for LU results with jobs.
84 Instances of this class returned from L{LogicalUnit.Exec} will be recognized
85 by L{mcpu._ProcessResult}. The latter will then submit the jobs
86 contained in the C{jobs} attribute and include the job IDs in the opcode
90 def __init__(self, jobs, **kwargs):
91 """Initializes this class.
93 Additional return values can be specified as keyword arguments.
95 @type jobs: list of lists of L{opcode.OpCode}
96 @param jobs: A list of lists of opcode objects
103 class LogicalUnit(object):
104 """Logical Unit base class.
106 Subclasses must follow these rules:
107 - implement ExpandNames
108 - implement CheckPrereq (except when tasklets are used)
109 - implement Exec (except when tasklets are used)
110 - implement BuildHooksEnv
111 - implement BuildHooksNodes
112 - redefine HPATH and HTYPE
113 - optionally redefine their run requirements:
114 REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
116 Note that all commands require root permissions.
118 @ivar dry_run_result: the value (if any) that will be returned to the caller
119 in dry-run mode (signalled by opcode dry_run parameter)
126 def __init__(self, processor, op, context, rpc_runner):
127 """Constructor for LogicalUnit.
129 This needs to be overridden in derived classes in order to check op
133 self.proc = processor
135 self.cfg = context.cfg
136 self.glm = context.glm
138 self.owned_locks = context.glm.list_owned
139 self.context = context
140 self.rpc = rpc_runner
141 # Dicts used to declare locking needs to mcpu
142 self.needed_locks = None
143 self.share_locks = dict.fromkeys(locking.LEVELS, 0)
145 self.remove_locks = {}
146 # Used to force good behavior when calling helper functions
147 self.recalculate_locks = {}
149 self.Log = processor.Log # pylint: disable=C0103
150 self.LogWarning = processor.LogWarning # pylint: disable=C0103
151 self.LogInfo = processor.LogInfo # pylint: disable=C0103
152 self.LogStep = processor.LogStep # pylint: disable=C0103
153 # support for dry-run
154 self.dry_run_result = None
155 # support for generic debug attribute
156 if (not hasattr(self.op, "debug_level") or
157 not isinstance(self.op.debug_level, int)):
158 self.op.debug_level = 0
163 # Validate opcode parameters and set defaults
164 self.op.Validate(True)
166 self.CheckArguments()
168 def CheckArguments(self):
169 """Check syntactic validity for the opcode arguments.
171 This method is for doing a simple syntactic check and ensure
172 validity of opcode parameters, without any cluster-related
173 checks. While the same can be accomplished in ExpandNames and/or
174 CheckPrereq, doing these separate is better because:
176 - ExpandNames is left as as purely a lock-related function
177 - CheckPrereq is run after we have acquired locks (and possible
180 The function is allowed to change the self.op attribute so that
181 later methods can no longer worry about missing parameters.
186 def ExpandNames(self):
187 """Expand names for this LU.
189 This method is called before starting to execute the opcode, and it should
190 update all the parameters of the opcode to their canonical form (e.g. a
191 short node name must be fully expanded after this method has successfully
192 completed). This way locking, hooks, logging, etc. can work correctly.
194 LUs which implement this method must also populate the self.needed_locks
195 member, as a dict with lock levels as keys, and a list of needed lock names
198 - use an empty dict if you don't need any lock
199 - if you don't need any lock at a particular level omit that
200 level (note that in this case C{DeclareLocks} won't be called
201 at all for that level)
202 - if you need locks at a level, but you can't calculate it in
203 this function, initialise that level with an empty list and do
204 further processing in L{LogicalUnit.DeclareLocks} (see that
205 function's docstring)
206 - don't put anything for the BGL level
207 - if you want all locks at a level use L{locking.ALL_SET} as a value
209 If you need to share locks (rather than acquire them exclusively) at one
210 level you can modify self.share_locks, setting a true value (usually 1) for
211 that level. By default locks are not shared.
213 This function can also define a list of tasklets, which then will be
214 executed in order instead of the usual LU-level CheckPrereq and Exec
215 functions, if those are not defined by the LU.
219 # Acquire all nodes and one instance
220 self.needed_locks = {
221 locking.LEVEL_NODE: locking.ALL_SET,
222 locking.LEVEL_INSTANCE: ['instance1.example.com'],
224 # Acquire just two nodes
225 self.needed_locks = {
226 locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
229 self.needed_locks = {} # No, you can't leave it to the default value None
232 # The implementation of this method is mandatory only if the new LU is
233 # concurrent, so that old LUs don't need to be changed all at the same
236 self.needed_locks = {} # Exclusive LUs don't need locks.
238 raise NotImplementedError
240 def DeclareLocks(self, level):
241 """Declare LU locking needs for a level
243 While most LUs can just declare their locking needs at ExpandNames time,
244 sometimes there's the need to calculate some locks after having acquired
245 the ones before. This function is called just before acquiring locks at a
246 particular level, but after acquiring the ones at lower levels, and permits
247 such calculations. It can be used to modify self.needed_locks, and by
248 default it does nothing.
250 This function is only called if you have something already set in
251 self.needed_locks for the level.
253 @param level: Locking level which is going to be locked
254 @type level: member of L{ganeti.locking.LEVELS}
258 def CheckPrereq(self):
259 """Check prerequisites for this LU.
261 This method should check that the prerequisites for the execution
262 of this LU are fulfilled. It can do internode communication, but
263 it should be idempotent - no cluster or system changes are
266 The method should raise errors.OpPrereqError in case something is
267 not fulfilled. Its return value is ignored.
269 This method should also update all the parameters of the opcode to
270 their canonical form if it hasn't been done by ExpandNames before.
273 if self.tasklets is not None:
274 for (idx, tl) in enumerate(self.tasklets):
275 logging.debug("Checking prerequisites for tasklet %s/%s",
276 idx + 1, len(self.tasklets))
281 def Exec(self, feedback_fn):
284 This method should implement the actual work. It should raise
285 errors.OpExecError for failures that are somewhat dealt with in
289 if self.tasklets is not None:
290 for (idx, tl) in enumerate(self.tasklets):
291 logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
294 raise NotImplementedError
296 def BuildHooksEnv(self):
297 """Build hooks environment for this LU.
300 @return: Dictionary containing the environment that will be used for
301 running the hooks for this LU. The keys of the dict must not be prefixed
302 with "GANETI_"--that'll be added by the hooks runner. The hooks runner
303 will extend the environment with additional variables. If no environment
304 should be defined, an empty dictionary should be returned (not C{None}).
305 @note: If the C{HPATH} attribute of the LU class is C{None}, this function
309 raise NotImplementedError
311 def BuildHooksNodes(self):
312 """Build list of nodes to run LU's hooks.
314 @rtype: tuple; (list, list)
315 @return: Tuple containing a list of node names on which the hook
316 should run before the execution and a list of node names on which the
317 hook should run after the execution. No nodes should be returned as an
318 empty list (and not None).
319 @note: If the C{HPATH} attribute of the LU class is C{None}, this function
323 raise NotImplementedError
325 def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
326 """Notify the LU about the results of its hooks.
328 This method is called every time a hooks phase is executed, and notifies
329 the Logical Unit about the hooks' result. The LU can then use it to alter
330 its result based on the hooks. By default the method does nothing and the
331 previous result is passed back unchanged but any LU can define it if it
332 wants to use the local cluster hook-scripts somehow.
334 @param phase: one of L{constants.HOOKS_PHASE_POST} or
335 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
336 @param hook_results: the results of the multi-node hooks rpc call
337 @param feedback_fn: function used send feedback back to the caller
338 @param lu_result: the previous Exec result this LU had, or None
340 @return: the new Exec result, based on the previous result
344 # API must be kept, thus we ignore the unused argument and could
345 # be a function warnings
346 # pylint: disable=W0613,R0201
349 def _ExpandAndLockInstance(self):
350 """Helper function to expand and lock an instance.
352 Many LUs that work on an instance take its name in self.op.instance_name
353 and need to expand it and then declare the expanded name for locking. This
354 function does it, and then updates self.op.instance_name to the expanded
355 name. It also initializes needed_locks as a dict, if this hasn't been done
359 if self.needed_locks is None:
360 self.needed_locks = {}
362 assert locking.LEVEL_INSTANCE not in self.needed_locks, \
363 "_ExpandAndLockInstance called with instance-level locks set"
364 self.op.instance_name = _ExpandInstanceName(self.cfg,
365 self.op.instance_name)
366 self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
368 def _LockInstancesNodes(self, primary_only=False,
369 level=locking.LEVEL_NODE):
370 """Helper function to declare instances' nodes for locking.
372 This function should be called after locking one or more instances to lock
373 their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
374 with all primary or secondary nodes for instances already locked and
375 present in self.needed_locks[locking.LEVEL_INSTANCE].
377 It should be called from DeclareLocks, and for safety only works if
378 self.recalculate_locks[locking.LEVEL_NODE] is set.
380 In the future it may grow parameters to just lock some instance's nodes, or
381 to just lock primaries or secondary nodes, if needed.
383 If should be called in DeclareLocks in a way similar to::
385 if level == locking.LEVEL_NODE:
386 self._LockInstancesNodes()
388 @type primary_only: boolean
389 @param primary_only: only lock primary nodes of locked instances
390 @param level: Which lock level to use for locking nodes
393 assert level in self.recalculate_locks, \
394 "_LockInstancesNodes helper function called with no nodes to recalculate"
396 # TODO: check if we're really been called with the instance locks held
398 # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
399 # future we might want to have different behaviors depending on the value
400 # of self.recalculate_locks[locking.LEVEL_NODE]
402 locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
403 for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
404 wanted_nodes.append(instance.primary_node)
406 wanted_nodes.extend(instance.secondary_nodes)
408 if self.recalculate_locks[level] == constants.LOCKS_REPLACE:
409 self.needed_locks[level] = wanted_nodes
410 elif self.recalculate_locks[level] == constants.LOCKS_APPEND:
411 self.needed_locks[level].extend(wanted_nodes)
413 raise errors.ProgrammerError("Unknown recalculation mode")
415 del self.recalculate_locks[level]
418 class NoHooksLU(LogicalUnit): # pylint: disable=W0223
419 """Simple LU which runs no hooks.
421 This LU is intended as a parent for other LogicalUnits which will
422 run no hooks, in order to reduce duplicate code.
428 def BuildHooksEnv(self):
429 """Empty BuildHooksEnv for NoHooksLu.
431 This just raises an error.
434 raise AssertionError("BuildHooksEnv called for NoHooksLUs")
436 def BuildHooksNodes(self):
437 """Empty BuildHooksNodes for NoHooksLU.
440 raise AssertionError("BuildHooksNodes called for NoHooksLU")
444 """Tasklet base class.
446 Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
447 they can mix legacy code with tasklets. Locking needs to be done in the LU,
448 tasklets know nothing about locks.
450 Subclasses must follow these rules:
451 - Implement CheckPrereq
455 def __init__(self, lu):
462 def CheckPrereq(self):
463 """Check prerequisites for this tasklets.
465 This method should check whether the prerequisites for the execution of
466 this tasklet are fulfilled. It can do internode communication, but it
467 should be idempotent - no cluster or system changes are allowed.
469 The method should raise errors.OpPrereqError in case something is not
470 fulfilled. Its return value is ignored.
472 This method should also update all parameters to their canonical form if it
473 hasn't been done before.
478 def Exec(self, feedback_fn):
479 """Execute the tasklet.
481 This method should implement the actual work. It should raise
482 errors.OpExecError for failures that are somewhat dealt with in code, or
486 raise NotImplementedError
490 """Base for query utility classes.
493 #: Attribute holding field definitions
499 def __init__(self, qfilter, fields, use_locking):
500 """Initializes this class.
503 self.use_locking = use_locking
505 self.query = query.Query(self.FIELDS, fields, qfilter=qfilter,
506 namefield=self.SORT_FIELD)
507 self.requested_data = self.query.RequestedData()
508 self.names = self.query.RequestedNames()
510 # Sort only if no names were requested
511 self.sort_by_name = not self.names
513 self.do_locking = None
516 def _GetNames(self, lu, all_names, lock_level):
517 """Helper function to determine names asked for in the query.
521 names = lu.owned_locks(lock_level)
525 if self.wanted == locking.ALL_SET:
526 assert not self.names
527 # caller didn't specify names, so ordering is not important
528 return utils.NiceSort(names)
530 # caller specified names and we must keep the same order
532 assert not self.do_locking or lu.glm.is_owned(lock_level)
534 missing = set(self.wanted).difference(names)
536 raise errors.OpExecError("Some items were removed before retrieving"
537 " their data: %s" % missing)
539 # Return expanded names
542 def ExpandNames(self, lu):
543 """Expand names for this query.
545 See L{LogicalUnit.ExpandNames}.
548 raise NotImplementedError()
550 def DeclareLocks(self, lu, level):
551 """Declare locks for this query.
553 See L{LogicalUnit.DeclareLocks}.
556 raise NotImplementedError()
558 def _GetQueryData(self, lu):
559 """Collects all data for this query.
561 @return: Query data object
564 raise NotImplementedError()
566 def NewStyleQuery(self, lu):
567 """Collect data and execute query.
570 return query.GetQueryResponse(self.query, self._GetQueryData(lu),
571 sort_by_name=self.sort_by_name)
573 def OldStyleQuery(self, lu):
574 """Collect data and execute query.
577 return self.query.OldStyleQuery(self._GetQueryData(lu),
578 sort_by_name=self.sort_by_name)
582 """Returns a dict declaring all lock levels shared.
585 return dict.fromkeys(locking.LEVELS, 1)
588 def _MakeLegacyNodeInfo(data):
589 """Formats the data returned by L{rpc.RpcRunner.call_node_info}.
591 Converts the data into a single dictionary. This is fine for most use cases,
592 but some require information from more than one volume group or hypervisor.
595 (bootid, (vg_info, ), (hv_info, )) = data
597 return utils.JoinDisjointDicts(utils.JoinDisjointDicts(vg_info, hv_info), {
602 def _AnnotateDiskParams(instance, devs, cfg):
603 """Little helper wrapper to the rpc annotation method.
605 @param instance: The instance object
606 @type devs: List of L{objects.Disk}
607 @param devs: The root devices (not any of its children!)
608 @param cfg: The config object
609 @returns The annotated disk copies
610 @see L{rpc.AnnotateDiskParams}
613 return rpc.AnnotateDiskParams(instance.disk_template, devs,
614 cfg.GetInstanceDiskParams(instance))
617 def _CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
619 """Checks if node groups for locked instances are still correct.
621 @type cfg: L{config.ConfigWriter}
622 @param cfg: Cluster configuration
623 @type instances: dict; string as key, L{objects.Instance} as value
624 @param instances: Dictionary, instance name as key, instance object as value
625 @type owned_groups: iterable of string
626 @param owned_groups: List of owned groups
627 @type owned_nodes: iterable of string
628 @param owned_nodes: List of owned nodes
629 @type cur_group_uuid: string or None
630 @param cur_group_uuid: Optional group UUID to check against instance's groups
633 for (name, inst) in instances.items():
634 assert owned_nodes.issuperset(inst.all_nodes), \
635 "Instance %s's nodes changed while we kept the lock" % name
637 inst_groups = _CheckInstanceNodeGroups(cfg, name, owned_groups)
639 assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
640 "Instance %s has no node in group %s" % (name, cur_group_uuid)
643 def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups):
644 """Checks if the owned node groups are still correct for an instance.
646 @type cfg: L{config.ConfigWriter}
647 @param cfg: The cluster configuration
648 @type instance_name: string
649 @param instance_name: Instance name
650 @type owned_groups: set or frozenset
651 @param owned_groups: List of currently owned node groups
654 inst_groups = cfg.GetInstanceNodeGroups(instance_name)
656 if not owned_groups.issuperset(inst_groups):
657 raise errors.OpPrereqError("Instance %s's node groups changed since"
658 " locks were acquired, current groups are"
659 " are '%s', owning groups '%s'; retry the"
662 utils.CommaJoin(inst_groups),
663 utils.CommaJoin(owned_groups)),
669 def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
670 """Checks if the instances in a node group are still correct.
672 @type cfg: L{config.ConfigWriter}
673 @param cfg: The cluster configuration
674 @type group_uuid: string
675 @param group_uuid: Node group UUID
676 @type owned_instances: set or frozenset
677 @param owned_instances: List of currently owned instances
680 wanted_instances = cfg.GetNodeGroupInstances(group_uuid)
681 if owned_instances != wanted_instances:
682 raise errors.OpPrereqError("Instances in node group '%s' changed since"
683 " locks were acquired, wanted '%s', have '%s';"
684 " retry the operation" %
686 utils.CommaJoin(wanted_instances),
687 utils.CommaJoin(owned_instances)),
690 return wanted_instances
693 def _SupportsOob(cfg, node):
694 """Tells if node supports OOB.
696 @type cfg: L{config.ConfigWriter}
697 @param cfg: The cluster configuration
698 @type node: L{objects.Node}
699 @param node: The node
700 @return: The OOB script if supported or an empty string otherwise
703 return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
706 def _GetWantedNodes(lu, nodes):
707 """Returns list of checked and expanded node names.
709 @type lu: L{LogicalUnit}
710 @param lu: the logical unit on whose behalf we execute
712 @param nodes: list of node names or None for all nodes
714 @return: the list of nodes, sorted
715 @raise errors.ProgrammerError: if the nodes parameter is wrong type
719 return [_ExpandNodeName(lu.cfg, name) for name in nodes]
721 return utils.NiceSort(lu.cfg.GetNodeList())
724 def _GetWantedInstances(lu, instances):
725 """Returns list of checked and expanded instance names.
727 @type lu: L{LogicalUnit}
728 @param lu: the logical unit on whose behalf we execute
729 @type instances: list
730 @param instances: list of instance names or None for all instances
732 @return: the list of instances, sorted
733 @raise errors.OpPrereqError: if the instances parameter is wrong type
734 @raise errors.OpPrereqError: if any of the passed instances is not found
738 wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
740 wanted = utils.NiceSort(lu.cfg.GetInstanceList())
744 def _GetUpdatedParams(old_params, update_dict,
745 use_default=True, use_none=False):
746 """Return the new version of a parameter dictionary.
748 @type old_params: dict
749 @param old_params: old parameters
750 @type update_dict: dict
751 @param update_dict: dict containing new parameter values, or
752 constants.VALUE_DEFAULT to reset the parameter to its default
754 @param use_default: boolean
755 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
756 values as 'to be deleted' values
757 @param use_none: boolean
758 @type use_none: whether to recognise C{None} values as 'to be
761 @return: the new parameter dictionary
764 params_copy = copy.deepcopy(old_params)
765 for key, val in update_dict.iteritems():
766 if ((use_default and val == constants.VALUE_DEFAULT) or
767 (use_none and val is None)):
773 params_copy[key] = val
777 def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
778 """Return the new version of a instance policy.
780 @param group_policy: whether this policy applies to a group and thus
781 we should support removal of policy entries
784 use_none = use_default = group_policy
785 ipolicy = copy.deepcopy(old_ipolicy)
786 for key, value in new_ipolicy.items():
787 if key not in constants.IPOLICY_ALL_KEYS:
788 raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
790 if key in constants.IPOLICY_ISPECS:
791 utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
792 ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
794 use_default=use_default)
796 if not value or value == [constants.VALUE_DEFAULT]:
800 raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
801 " on the cluster'" % key,
804 if key in constants.IPOLICY_PARAMETERS:
805 # FIXME: we assume all such values are float
807 ipolicy[key] = float(value)
808 except (TypeError, ValueError), err:
809 raise errors.OpPrereqError("Invalid value for attribute"
810 " '%s': '%s', error: %s" %
811 (key, value, err), errors.ECODE_INVAL)
813 # FIXME: we assume all others are lists; this should be redone
815 ipolicy[key] = list(value)
817 objects.InstancePolicy.CheckParameterSyntax(ipolicy)
818 except errors.ConfigurationError, err:
819 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
824 def _UpdateAndVerifySubDict(base, updates, type_check):
825 """Updates and verifies a dict with sub dicts of the same type.
827 @param base: The dict with the old data
828 @param updates: The dict with the new data
829 @param type_check: Dict suitable to ForceDictType to verify correct types
830 @returns: A new dict with updated and verified values
834 new = _GetUpdatedParams(old, value)
835 utils.ForceDictType(new, type_check)
838 ret = copy.deepcopy(base)
839 ret.update(dict((key, fn(base.get(key, {}), value))
840 for key, value in updates.items()))
844 def _MergeAndVerifyHvState(op_input, obj_input):
845 """Combines the hv state from an opcode with the one of the object
847 @param op_input: The input dict from the opcode
848 @param obj_input: The input dict from the objects
849 @return: The verified and updated dict
853 invalid_hvs = set(op_input) - constants.HYPER_TYPES
855 raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:"
856 " %s" % utils.CommaJoin(invalid_hvs),
858 if obj_input is None:
860 type_check = constants.HVSTS_PARAMETER_TYPES
861 return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
866 def _MergeAndVerifyDiskState(op_input, obj_input):
867 """Combines the disk state from an opcode with the one of the object
869 @param op_input: The input dict from the opcode
870 @param obj_input: The input dict from the objects
871 @return: The verified and updated dict
874 invalid_dst = set(op_input) - constants.DS_VALID_TYPES
876 raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" %
877 utils.CommaJoin(invalid_dst),
879 type_check = constants.DSS_PARAMETER_TYPES
880 if obj_input is None:
882 return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value,
884 for key, value in op_input.items())
889 def _ReleaseLocks(lu, level, names=None, keep=None):
890 """Releases locks owned by an LU.
892 @type lu: L{LogicalUnit}
893 @param level: Lock level
894 @type names: list or None
895 @param names: Names of locks to release
896 @type keep: list or None
897 @param keep: Names of locks to retain
900 assert not (keep is not None and names is not None), \
901 "Only one of the 'names' and the 'keep' parameters can be given"
903 if names is not None:
904 should_release = names.__contains__
906 should_release = lambda name: name not in keep
908 should_release = None
910 owned = lu.owned_locks(level)
912 # Not owning any lock at this level, do nothing
919 # Determine which locks to release
921 if should_release(name):
926 assert len(lu.owned_locks(level)) == (len(retain) + len(release))
928 # Release just some locks
929 lu.glm.release(level, names=release)
931 assert frozenset(lu.owned_locks(level)) == frozenset(retain)
934 lu.glm.release(level)
936 assert not lu.glm.is_owned(level), "No locks should be owned"
939 def _MapInstanceDisksToNodes(instances):
940 """Creates a map from (node, volume) to instance name.
942 @type instances: list of L{objects.Instance}
943 @rtype: dict; tuple of (node name, volume name) as key, instance name as value
946 return dict(((node, vol), inst.name)
947 for inst in instances
948 for (node, vols) in inst.MapLVsByNode().items()
952 def _RunPostHook(lu, node_name):
953 """Runs the post-hook for an opcode on a single node.
956 hm = lu.proc.BuildHooksManager(lu)
958 hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
960 # pylint: disable=W0702
961 lu.LogWarning("Errors occurred running hooks on %s" % node_name)
964 def _CheckOutputFields(static, dynamic, selected):
965 """Checks whether all selected fields are valid.
967 @type static: L{utils.FieldSet}
968 @param static: static fields set
969 @type dynamic: L{utils.FieldSet}
970 @param dynamic: dynamic fields set
977 delta = f.NonMatching(selected)
979 raise errors.OpPrereqError("Unknown output fields selected: %s"
980 % ",".join(delta), errors.ECODE_INVAL)
983 def _CheckGlobalHvParams(params):
984 """Validates that given hypervisor params are not global ones.
986 This will ensure that instances don't get customised versions of
990 used_globals = constants.HVC_GLOBALS.intersection(params)
992 msg = ("The following hypervisor parameters are global and cannot"
993 " be customized at instance level, please modify them at"
994 " cluster level: %s" % utils.CommaJoin(used_globals))
995 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
998 def _CheckNodeOnline(lu, node, msg=None):
999 """Ensure that a given node is online.
1001 @param lu: the LU on behalf of which we make the check
1002 @param node: the node to check
1003 @param msg: if passed, should be a message to replace the default one
1004 @raise errors.OpPrereqError: if the node is offline
1008 msg = "Can't use offline node"
1009 if lu.cfg.GetNodeInfo(node).offline:
1010 raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
1013 def _CheckNodeNotDrained(lu, node):
1014 """Ensure that a given node is not drained.
1016 @param lu: the LU on behalf of which we make the check
1017 @param node: the node to check
1018 @raise errors.OpPrereqError: if the node is drained
1021 if lu.cfg.GetNodeInfo(node).drained:
1022 raise errors.OpPrereqError("Can't use drained node %s" % node,
1026 def _CheckNodeVmCapable(lu, node):
1027 """Ensure that a given node is vm capable.
1029 @param lu: the LU on behalf of which we make the check
1030 @param node: the node to check
1031 @raise errors.OpPrereqError: if the node is not vm capable
1034 if not lu.cfg.GetNodeInfo(node).vm_capable:
1035 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
1039 def _CheckNodeHasOS(lu, node, os_name, force_variant):
1040 """Ensure that a node supports a given OS.
1042 @param lu: the LU on behalf of which we make the check
1043 @param node: the node to check
1044 @param os_name: the OS to query about
1045 @param force_variant: whether to ignore variant errors
1046 @raise errors.OpPrereqError: if the node is not supporting the OS
1049 result = lu.rpc.call_os_get(node, os_name)
1050 result.Raise("OS '%s' not in supported OS list for node %s" %
1052 prereq=True, ecode=errors.ECODE_INVAL)
1053 if not force_variant:
1054 _CheckOSVariant(result.payload, os_name)
1057 def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
1058 """Ensure that a node has the given secondary ip.
1060 @type lu: L{LogicalUnit}
1061 @param lu: the LU on behalf of which we make the check
1063 @param node: the node to check
1064 @type secondary_ip: string
1065 @param secondary_ip: the ip to check
1066 @type prereq: boolean
1067 @param prereq: whether to throw a prerequisite or an execute error
1068 @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
1069 @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
1072 result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
1073 result.Raise("Failure checking secondary ip on node %s" % node,
1074 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1075 if not result.payload:
1076 msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
1077 " please fix and re-run this command" % secondary_ip)
1079 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
1081 raise errors.OpExecError(msg)
1084 def _GetClusterDomainSecret():
1085 """Reads the cluster domain secret.
1088 return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
1092 def _CheckInstanceState(lu, instance, req_states, msg=None):
1093 """Ensure that an instance is in one of the required states.
1095 @param lu: the LU on behalf of which we make the check
1096 @param instance: the instance to check
1097 @param msg: if passed, should be a message to replace the default one
1098 @raise errors.OpPrereqError: if the instance is not in the required state
1102 msg = "can't use instance from outside %s states" % ", ".join(req_states)
1103 if instance.admin_state not in req_states:
1104 raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
1105 (instance.name, instance.admin_state, msg),
1108 if constants.ADMINST_UP not in req_states:
1109 pnode = instance.primary_node
1110 ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
1111 ins_l.Raise("Can't contact node %s for instance information" % pnode,
1112 prereq=True, ecode=errors.ECODE_ENVIRON)
1114 if instance.name in ins_l.payload:
1115 raise errors.OpPrereqError("Instance %s is running, %s" %
1116 (instance.name, msg), errors.ECODE_STATE)
1119 def _ComputeMinMaxSpec(name, ipolicy, value):
1120 """Computes if value is in the desired range.
1122 @param name: name of the parameter for which we perform the check
1123 @param ipolicy: dictionary containing min, max and std values
1124 @param value: actual value that we want to use
1125 @return: None or element not meeting the criteria
1129 if value in [None, constants.VALUE_AUTO]:
1131 max_v = ipolicy[constants.ISPECS_MAX].get(name, value)
1132 min_v = ipolicy[constants.ISPECS_MIN].get(name, value)
1133 if value > max_v or min_v > value:
1134 return ("%s value %s is not in range [%s, %s]" %
1135 (name, value, min_v, max_v))
1139 def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
1140 nic_count, disk_sizes, spindle_use,
1141 _compute_fn=_ComputeMinMaxSpec):
1142 """Verifies ipolicy against provided specs.
1145 @param ipolicy: The ipolicy
1147 @param mem_size: The memory size
1148 @type cpu_count: int
1149 @param cpu_count: Used cpu cores
1150 @type disk_count: int
1151 @param disk_count: Number of disks used
1152 @type nic_count: int
1153 @param nic_count: Number of nics used
1154 @type disk_sizes: list of ints
1155 @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
1156 @type spindle_use: int
1157 @param spindle_use: The number of spindles this instance uses
1158 @param _compute_fn: The compute function (unittest only)
1159 @return: A list of violations, or an empty list of no violations are found
1162 assert disk_count == len(disk_sizes)
1165 (constants.ISPEC_MEM_SIZE, mem_size),
1166 (constants.ISPEC_CPU_COUNT, cpu_count),
1167 (constants.ISPEC_DISK_COUNT, disk_count),
1168 (constants.ISPEC_NIC_COUNT, nic_count),
1169 (constants.ISPEC_SPINDLE_USE, spindle_use),
1170 ] + map((lambda d: (constants.ISPEC_DISK_SIZE, d)), disk_sizes)
1173 (_compute_fn(name, ipolicy, value)
1174 for (name, value) in test_settings))
1177 def _ComputeIPolicyInstanceViolation(ipolicy, instance,
1178 _compute_fn=_ComputeIPolicySpecViolation):
1179 """Compute if instance meets the specs of ipolicy.
1182 @param ipolicy: The ipolicy to verify against
1183 @type instance: L{objects.Instance}
1184 @param instance: The instance to verify
1185 @param _compute_fn: The function to verify ipolicy (unittest only)
1186 @see: L{_ComputeIPolicySpecViolation}
1189 mem_size = instance.beparams.get(constants.BE_MAXMEM, None)
1190 cpu_count = instance.beparams.get(constants.BE_VCPUS, None)
1191 spindle_use = instance.beparams.get(constants.BE_SPINDLE_USE, None)
1192 disk_count = len(instance.disks)
1193 disk_sizes = [disk.size for disk in instance.disks]
1194 nic_count = len(instance.nics)
1196 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
1197 disk_sizes, spindle_use)
1200 def _ComputeIPolicyInstanceSpecViolation(ipolicy, instance_spec,
1201 _compute_fn=_ComputeIPolicySpecViolation):
1202 """Compute if instance specs meets the specs of ipolicy.
1205 @param ipolicy: The ipolicy to verify against
1206 @param instance_spec: dict
1207 @param instance_spec: The instance spec to verify
1208 @param _compute_fn: The function to verify ipolicy (unittest only)
1209 @see: L{_ComputeIPolicySpecViolation}
1212 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
1213 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
1214 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
1215 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
1216 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
1217 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
1219 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
1220 disk_sizes, spindle_use)
1223 def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
1225 _compute_fn=_ComputeIPolicyInstanceViolation):
1226 """Compute if instance meets the specs of the new target group.
1228 @param ipolicy: The ipolicy to verify
1229 @param instance: The instance object to verify
1230 @param current_group: The current group of the instance
1231 @param target_group: The new group of the instance
1232 @param _compute_fn: The function to verify ipolicy (unittest only)
1233 @see: L{_ComputeIPolicySpecViolation}
1236 if current_group == target_group:
1239 return _compute_fn(ipolicy, instance)
1242 def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
1243 _compute_fn=_ComputeIPolicyNodeViolation):
1244 """Checks that the target node is correct in terms of instance policy.
1246 @param ipolicy: The ipolicy to verify
1247 @param instance: The instance object to verify
1248 @param node: The new node to relocate
1249 @param ignore: Ignore violations of the ipolicy
1250 @param _compute_fn: The function to verify ipolicy (unittest only)
1251 @see: L{_ComputeIPolicySpecViolation}
1254 primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
1255 res = _compute_fn(ipolicy, instance, primary_node.group, node.group)
1258 msg = ("Instance does not meet target node group's (%s) instance"
1259 " policy: %s") % (node.group, utils.CommaJoin(res))
1263 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1266 def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances):
1267 """Computes a set of any instances that would violate the new ipolicy.
1269 @param old_ipolicy: The current (still in-place) ipolicy
1270 @param new_ipolicy: The new (to become) ipolicy
1271 @param instances: List of instances to verify
1272 @return: A list of instances which violates the new ipolicy but did not before
1275 return (_ComputeViolatingInstances(old_ipolicy, instances) -
1276 _ComputeViolatingInstances(new_ipolicy, instances))
1279 def _ExpandItemName(fn, name, kind):
1280 """Expand an item name.
1282 @param fn: the function to use for expansion
1283 @param name: requested item name
1284 @param kind: text description ('Node' or 'Instance')
1285 @return: the resolved (full) name
1286 @raise errors.OpPrereqError: if the item is not found
1289 full_name = fn(name)
1290 if full_name is None:
1291 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
1296 def _ExpandNodeName(cfg, name):
1297 """Wrapper over L{_ExpandItemName} for nodes."""
1298 return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
1301 def _ExpandInstanceName(cfg, name):
1302 """Wrapper over L{_ExpandItemName} for instance."""
1303 return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
1306 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
1307 minmem, maxmem, vcpus, nics, disk_template, disks,
1308 bep, hvp, hypervisor_name, tags):
1309 """Builds instance related env variables for hooks
1311 This builds the hook environment from individual variables.
1314 @param name: the name of the instance
1315 @type primary_node: string
1316 @param primary_node: the name of the instance's primary node
1317 @type secondary_nodes: list
1318 @param secondary_nodes: list of secondary nodes as strings
1319 @type os_type: string
1320 @param os_type: the name of the instance's OS
1321 @type status: string
1322 @param status: the desired status of the instance
1323 @type minmem: string
1324 @param minmem: the minimum memory size of the instance
1325 @type maxmem: string
1326 @param maxmem: the maximum memory size of the instance
1328 @param vcpus: the count of VCPUs the instance has
1330 @param nics: list of tuples (ip, mac, mode, link) representing
1331 the NICs the instance has
1332 @type disk_template: string
1333 @param disk_template: the disk template of the instance
1335 @param disks: the list of (size, mode) pairs
1337 @param bep: the backend parameters for the instance
1339 @param hvp: the hypervisor parameters for the instance
1340 @type hypervisor_name: string
1341 @param hypervisor_name: the hypervisor for the instance
1343 @param tags: list of instance tags as strings
1345 @return: the hook environment for this instance
1350 "INSTANCE_NAME": name,
1351 "INSTANCE_PRIMARY": primary_node,
1352 "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
1353 "INSTANCE_OS_TYPE": os_type,
1354 "INSTANCE_STATUS": status,
1355 "INSTANCE_MINMEM": minmem,
1356 "INSTANCE_MAXMEM": maxmem,
1357 # TODO(2.7) remove deprecated "memory" value
1358 "INSTANCE_MEMORY": maxmem,
1359 "INSTANCE_VCPUS": vcpus,
1360 "INSTANCE_DISK_TEMPLATE": disk_template,
1361 "INSTANCE_HYPERVISOR": hypervisor_name,
1364 nic_count = len(nics)
1365 for idx, (ip, mac, mode, link) in enumerate(nics):
1368 env["INSTANCE_NIC%d_IP" % idx] = ip
1369 env["INSTANCE_NIC%d_MAC" % idx] = mac
1370 env["INSTANCE_NIC%d_MODE" % idx] = mode
1371 env["INSTANCE_NIC%d_LINK" % idx] = link
1372 if mode == constants.NIC_MODE_BRIDGED:
1373 env["INSTANCE_NIC%d_BRIDGE" % idx] = link
1377 env["INSTANCE_NIC_COUNT"] = nic_count
1380 disk_count = len(disks)
1381 for idx, (size, mode) in enumerate(disks):
1382 env["INSTANCE_DISK%d_SIZE" % idx] = size
1383 env["INSTANCE_DISK%d_MODE" % idx] = mode
1387 env["INSTANCE_DISK_COUNT"] = disk_count
1392 env["INSTANCE_TAGS"] = " ".join(tags)
1394 for source, kind in [(bep, "BE"), (hvp, "HV")]:
1395 for key, value in source.items():
1396 env["INSTANCE_%s_%s" % (kind, key)] = value
1401 def _NICListToTuple(lu, nics):
1402 """Build a list of nic information tuples.
1404 This list is suitable to be passed to _BuildInstanceHookEnv or as a return
1405 value in LUInstanceQueryData.
1407 @type lu: L{LogicalUnit}
1408 @param lu: the logical unit on whose behalf we execute
1409 @type nics: list of L{objects.NIC}
1410 @param nics: list of nics to convert to hooks tuples
1414 cluster = lu.cfg.GetClusterInfo()
1418 filled_params = cluster.SimpleFillNIC(nic.nicparams)
1419 mode = filled_params[constants.NIC_MODE]
1420 link = filled_params[constants.NIC_LINK]
1421 hooks_nics.append((ip, mac, mode, link))
1425 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
1426 """Builds instance related env variables for hooks from an object.
1428 @type lu: L{LogicalUnit}
1429 @param lu: the logical unit on whose behalf we execute
1430 @type instance: L{objects.Instance}
1431 @param instance: the instance for which we should build the
1433 @type override: dict
1434 @param override: dictionary with key/values that will override
1437 @return: the hook environment dictionary
1440 cluster = lu.cfg.GetClusterInfo()
1441 bep = cluster.FillBE(instance)
1442 hvp = cluster.FillHV(instance)
1444 "name": instance.name,
1445 "primary_node": instance.primary_node,
1446 "secondary_nodes": instance.secondary_nodes,
1447 "os_type": instance.os,
1448 "status": instance.admin_state,
1449 "maxmem": bep[constants.BE_MAXMEM],
1450 "minmem": bep[constants.BE_MINMEM],
1451 "vcpus": bep[constants.BE_VCPUS],
1452 "nics": _NICListToTuple(lu, instance.nics),
1453 "disk_template": instance.disk_template,
1454 "disks": [(disk.size, disk.mode) for disk in instance.disks],
1457 "hypervisor_name": instance.hypervisor,
1458 "tags": instance.tags,
1461 args.update(override)
1462 return _BuildInstanceHookEnv(**args) # pylint: disable=W0142
1465 def _AdjustCandidatePool(lu, exceptions):
1466 """Adjust the candidate pool after node operations.
1469 mod_list = lu.cfg.MaintainCandidatePool(exceptions)
1471 lu.LogInfo("Promoted nodes to master candidate role: %s",
1472 utils.CommaJoin(node.name for node in mod_list))
1473 for name in mod_list:
1474 lu.context.ReaddNode(name)
1475 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1477 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
1481 def _DecideSelfPromotion(lu, exceptions=None):
1482 """Decide whether I should promote myself as a master candidate.
1485 cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
1486 mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1487 # the new node will increase mc_max with one, so:
1488 mc_should = min(mc_should + 1, cp_size)
1489 return mc_now < mc_should
1492 def _CalculateGroupIPolicy(cluster, group):
1493 """Calculate instance policy for group.
1496 return cluster.SimpleFillIPolicy(group.ipolicy)
1499 def _ComputeViolatingInstances(ipolicy, instances):
1500 """Computes a set of instances who violates given ipolicy.
1502 @param ipolicy: The ipolicy to verify
1503 @type instances: object.Instance
1504 @param instances: List of instances to verify
1505 @return: A frozenset of instance names violating the ipolicy
1508 return frozenset([inst.name for inst in instances
1509 if _ComputeIPolicyInstanceViolation(ipolicy, inst)])
1512 def _CheckNicsBridgesExist(lu, target_nics, target_node):
1513 """Check that the brigdes needed by a list of nics exist.
1516 cluster = lu.cfg.GetClusterInfo()
1517 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
1518 brlist = [params[constants.NIC_LINK] for params in paramslist
1519 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
1521 result = lu.rpc.call_bridges_exist(target_node, brlist)
1522 result.Raise("Error checking bridges on destination node '%s'" %
1523 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
1526 def _CheckInstanceBridgesExist(lu, instance, node=None):
1527 """Check that the brigdes needed by an instance exist.
1531 node = instance.primary_node
1532 _CheckNicsBridgesExist(lu, instance.nics, node)
1535 def _CheckOSVariant(os_obj, name):
1536 """Check whether an OS name conforms to the os variants specification.
1538 @type os_obj: L{objects.OS}
1539 @param os_obj: OS object to check
1541 @param name: OS name passed by the user, to check for validity
1544 variant = objects.OS.GetVariant(name)
1545 if not os_obj.supported_variants:
1547 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
1548 " passed)" % (os_obj.name, variant),
1552 raise errors.OpPrereqError("OS name must include a variant",
1555 if variant not in os_obj.supported_variants:
1556 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1559 def _GetNodeInstancesInner(cfg, fn):
1560 return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1563 def _GetNodeInstances(cfg, node_name):
1564 """Returns a list of all primary and secondary instances on a node.
1568 return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1571 def _GetNodePrimaryInstances(cfg, node_name):
1572 """Returns primary instances on a node.
1575 return _GetNodeInstancesInner(cfg,
1576 lambda inst: node_name == inst.primary_node)
1579 def _GetNodeSecondaryInstances(cfg, node_name):
1580 """Returns secondary instances on a node.
1583 return _GetNodeInstancesInner(cfg,
1584 lambda inst: node_name in inst.secondary_nodes)
1587 def _GetStorageTypeArgs(cfg, storage_type):
1588 """Returns the arguments for a storage type.
1591 # Special case for file storage
1592 if storage_type == constants.ST_FILE:
1593 # storage.FileStorage wants a list of storage directories
1594 return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
1599 def _FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_name, prereq):
1602 for dev in instance.disks:
1603 cfg.SetDiskID(dev, node_name)
1605 result = rpc_runner.call_blockdev_getmirrorstatus(node_name, instance.disks)
1606 result.Raise("Failed to get disk status from node %s" % node_name,
1607 prereq=prereq, ecode=errors.ECODE_ENVIRON)
1609 for idx, bdev_status in enumerate(result.payload):
1610 if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1616 def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1617 """Check the sanity of iallocator and node arguments and use the
1618 cluster-wide iallocator if appropriate.
1620 Check that at most one of (iallocator, node) is specified. If none is
1621 specified, then the LU's opcode's iallocator slot is filled with the
1622 cluster-wide default iallocator.
1624 @type iallocator_slot: string
1625 @param iallocator_slot: the name of the opcode iallocator slot
1626 @type node_slot: string
1627 @param node_slot: the name of the opcode target node slot
1630 node = getattr(lu.op, node_slot, None)
1631 iallocator = getattr(lu.op, iallocator_slot, None)
1633 if node is not None and iallocator is not None:
1634 raise errors.OpPrereqError("Do not specify both, iallocator and node",
1636 elif node is None and iallocator is None:
1637 default_iallocator = lu.cfg.GetDefaultIAllocator()
1638 if default_iallocator:
1639 setattr(lu.op, iallocator_slot, default_iallocator)
1641 raise errors.OpPrereqError("No iallocator or node given and no"
1642 " cluster-wide default iallocator found;"
1643 " please specify either an iallocator or a"
1644 " node, or set a cluster-wide default"
1648 def _GetDefaultIAllocator(cfg, iallocator):
1649 """Decides on which iallocator to use.
1651 @type cfg: L{config.ConfigWriter}
1652 @param cfg: Cluster configuration object
1653 @type iallocator: string or None
1654 @param iallocator: Iallocator specified in opcode
1656 @return: Iallocator name
1660 # Use default iallocator
1661 iallocator = cfg.GetDefaultIAllocator()
1664 raise errors.OpPrereqError("No iallocator was specified, neither in the"
1665 " opcode nor as a cluster-wide default",
1671 class LUClusterPostInit(LogicalUnit):
1672 """Logical unit for running hooks after cluster initialization.
1675 HPATH = "cluster-init"
1676 HTYPE = constants.HTYPE_CLUSTER
1678 def BuildHooksEnv(self):
1683 "OP_TARGET": self.cfg.GetClusterName(),
1686 def BuildHooksNodes(self):
1687 """Build hooks nodes.
1690 return ([], [self.cfg.GetMasterNode()])
1692 def Exec(self, feedback_fn):
1699 class LUClusterDestroy(LogicalUnit):
1700 """Logical unit for destroying the cluster.
1703 HPATH = "cluster-destroy"
1704 HTYPE = constants.HTYPE_CLUSTER
1706 def BuildHooksEnv(self):
1711 "OP_TARGET": self.cfg.GetClusterName(),
1714 def BuildHooksNodes(self):
1715 """Build hooks nodes.
1720 def CheckPrereq(self):
1721 """Check prerequisites.
1723 This checks whether the cluster is empty.
1725 Any errors are signaled by raising errors.OpPrereqError.
1728 master = self.cfg.GetMasterNode()
1730 nodelist = self.cfg.GetNodeList()
1731 if len(nodelist) != 1 or nodelist[0] != master:
1732 raise errors.OpPrereqError("There are still %d node(s) in"
1733 " this cluster." % (len(nodelist) - 1),
1735 instancelist = self.cfg.GetInstanceList()
1737 raise errors.OpPrereqError("There are still %d instance(s) in"
1738 " this cluster." % len(instancelist),
1741 def Exec(self, feedback_fn):
1742 """Destroys the cluster.
1745 master_params = self.cfg.GetMasterNetworkParameters()
1747 # Run post hooks on master node before it's removed
1748 _RunPostHook(self, master_params.name)
1750 ems = self.cfg.GetUseExternalMipScript()
1751 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
1754 self.LogWarning("Error disabling the master IP address: %s",
1757 return master_params.name
1760 def _VerifyCertificate(filename):
1761 """Verifies a certificate for L{LUClusterVerifyConfig}.
1763 @type filename: string
1764 @param filename: Path to PEM file
1768 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1769 utils.ReadFile(filename))
1770 except Exception, err: # pylint: disable=W0703
1771 return (LUClusterVerifyConfig.ETYPE_ERROR,
1772 "Failed to load X509 certificate %s: %s" % (filename, err))
1775 utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1776 constants.SSL_CERT_EXPIRATION_ERROR)
1779 fnamemsg = "While verifying %s: %s" % (filename, msg)
1784 return (None, fnamemsg)
1785 elif errcode == utils.CERT_WARNING:
1786 return (LUClusterVerifyConfig.ETYPE_WARNING, fnamemsg)
1787 elif errcode == utils.CERT_ERROR:
1788 return (LUClusterVerifyConfig.ETYPE_ERROR, fnamemsg)
1790 raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1793 def _GetAllHypervisorParameters(cluster, instances):
1794 """Compute the set of all hypervisor parameters.
1796 @type cluster: L{objects.Cluster}
1797 @param cluster: the cluster object
1798 @param instances: list of L{objects.Instance}
1799 @param instances: additional instances from which to obtain parameters
1800 @rtype: list of (origin, hypervisor, parameters)
1801 @return: a list with all parameters found, indicating the hypervisor they
1802 apply to, and the origin (can be "cluster", "os X", or "instance Y")
1807 for hv_name in cluster.enabled_hypervisors:
1808 hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
1810 for os_name, os_hvp in cluster.os_hvp.items():
1811 for hv_name, hv_params in os_hvp.items():
1813 full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
1814 hvp_data.append(("os %s" % os_name, hv_name, full_params))
1816 # TODO: collapse identical parameter values in a single one
1817 for instance in instances:
1818 if instance.hvparams:
1819 hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
1820 cluster.FillHV(instance)))
1825 class _VerifyErrors(object):
1826 """Mix-in for cluster/group verify LUs.
1828 It provides _Error and _ErrorIf, and updates the self.bad boolean. (Expects
1829 self.op and self._feedback_fn to be available.)
1833 ETYPE_FIELD = "code"
1834 ETYPE_ERROR = "ERROR"
1835 ETYPE_WARNING = "WARNING"
1837 def _Error(self, ecode, item, msg, *args, **kwargs):
1838 """Format an error message.
1840 Based on the opcode's error_codes parameter, either format a
1841 parseable error code, or a simpler error string.
1843 This must be called only from Exec and functions called from Exec.
1846 ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1847 itype, etxt, _ = ecode
1848 # first complete the msg
1851 # then format the whole message
1852 if self.op.error_codes: # This is a mix-in. pylint: disable=E1101
1853 msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1859 msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1860 # and finally report it via the feedback_fn
1861 self._feedback_fn(" - %s" % msg) # Mix-in. pylint: disable=E1101
1863 def _ErrorIf(self, cond, ecode, *args, **kwargs):
1864 """Log an error message if the passed condition is True.
1868 or self.op.debug_simulate_errors) # pylint: disable=E1101
1870 # If the error code is in the list of ignored errors, demote the error to a
1872 (_, etxt, _) = ecode
1873 if etxt in self.op.ignore_errors: # pylint: disable=E1101
1874 kwargs[self.ETYPE_FIELD] = self.ETYPE_WARNING
1877 self._Error(ecode, *args, **kwargs)
1879 # do not mark the operation as failed for WARN cases only
1880 if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1881 self.bad = self.bad or cond
1884 class LUClusterVerify(NoHooksLU):
1885 """Submits all jobs necessary to verify the cluster.
1890 def ExpandNames(self):
1891 self.needed_locks = {}
1893 def Exec(self, feedback_fn):
1896 if self.op.group_name:
1897 groups = [self.op.group_name]
1898 depends_fn = lambda: None
1900 groups = self.cfg.GetNodeGroupList()
1902 # Verify global configuration
1904 opcodes.OpClusterVerifyConfig(ignore_errors=self.op.ignore_errors)
1907 # Always depend on global verification
1908 depends_fn = lambda: [(-len(jobs), [])]
1910 jobs.extend([opcodes.OpClusterVerifyGroup(group_name=group,
1911 ignore_errors=self.op.ignore_errors,
1912 depends=depends_fn())]
1913 for group in groups)
1915 # Fix up all parameters
1916 for op in itertools.chain(*jobs): # pylint: disable=W0142
1917 op.debug_simulate_errors = self.op.debug_simulate_errors
1918 op.verbose = self.op.verbose
1919 op.error_codes = self.op.error_codes
1921 op.skip_checks = self.op.skip_checks
1922 except AttributeError:
1923 assert not isinstance(op, opcodes.OpClusterVerifyGroup)
1925 return ResultWithJobs(jobs)
1928 class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
1929 """Verifies the cluster config.
1934 def _VerifyHVP(self, hvp_data):
1935 """Verifies locally the syntax of the hypervisor parameters.
1938 for item, hv_name, hv_params in hvp_data:
1939 msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
1942 hv_class = hypervisor.GetHypervisor(hv_name)
1943 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1944 hv_class.CheckParameterSyntax(hv_params)
1945 except errors.GenericError, err:
1946 self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg % str(err))
1948 def ExpandNames(self):
1949 self.needed_locks = dict.fromkeys(locking.LEVELS, locking.ALL_SET)
1950 self.share_locks = _ShareAll()
1952 def CheckPrereq(self):
1953 """Check prerequisites.
1956 # Retrieve all information
1957 self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
1958 self.all_node_info = self.cfg.GetAllNodesInfo()
1959 self.all_inst_info = self.cfg.GetAllInstancesInfo()
1961 def Exec(self, feedback_fn):
1962 """Verify integrity of cluster, performing various test on nodes.
1966 self._feedback_fn = feedback_fn
1968 feedback_fn("* Verifying cluster config")
1970 for msg in self.cfg.VerifyConfig():
1971 self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg)
1973 feedback_fn("* Verifying cluster certificate files")
1975 for cert_filename in constants.ALL_CERT_FILES:
1976 (errcode, msg) = _VerifyCertificate(cert_filename)
1977 self._ErrorIf(errcode, constants.CV_ECLUSTERCERT, None, msg, code=errcode)
1979 feedback_fn("* Verifying hypervisor parameters")
1981 self._VerifyHVP(_GetAllHypervisorParameters(self.cfg.GetClusterInfo(),
1982 self.all_inst_info.values()))
1984 feedback_fn("* Verifying all nodes belong to an existing group")
1986 # We do this verification here because, should this bogus circumstance
1987 # occur, it would never be caught by VerifyGroup, which only acts on
1988 # nodes/instances reachable from existing node groups.
1990 dangling_nodes = set(node.name for node in self.all_node_info.values()
1991 if node.group not in self.all_group_info)
1993 dangling_instances = {}
1994 no_node_instances = []
1996 for inst in self.all_inst_info.values():
1997 if inst.primary_node in dangling_nodes:
1998 dangling_instances.setdefault(inst.primary_node, []).append(inst.name)
1999 elif inst.primary_node not in self.all_node_info:
2000 no_node_instances.append(inst.name)
2005 utils.CommaJoin(dangling_instances.get(node.name,
2007 for node in dangling_nodes]
2009 self._ErrorIf(bool(dangling_nodes), constants.CV_ECLUSTERDANGLINGNODES,
2011 "the following nodes (and their instances) belong to a non"
2012 " existing group: %s", utils.CommaJoin(pretty_dangling))
2014 self._ErrorIf(bool(no_node_instances), constants.CV_ECLUSTERDANGLINGINST,
2016 "the following instances have a non-existing primary-node:"
2017 " %s", utils.CommaJoin(no_node_instances))
2022 class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
2023 """Verifies the status of a node group.
2026 HPATH = "cluster-verify"
2027 HTYPE = constants.HTYPE_CLUSTER
2030 _HOOKS_INDENT_RE = re.compile("^", re.M)
2032 class NodeImage(object):
2033 """A class representing the logical and physical status of a node.
2036 @ivar name: the node name to which this object refers
2037 @ivar volumes: a structure as returned from
2038 L{ganeti.backend.GetVolumeList} (runtime)
2039 @ivar instances: a list of running instances (runtime)
2040 @ivar pinst: list of configured primary instances (config)
2041 @ivar sinst: list of configured secondary instances (config)
2042 @ivar sbp: dictionary of {primary-node: list of instances} for all
2043 instances for which this node is secondary (config)
2044 @ivar mfree: free memory, as reported by hypervisor (runtime)
2045 @ivar dfree: free disk, as reported by the node (runtime)
2046 @ivar offline: the offline status (config)
2047 @type rpc_fail: boolean
2048 @ivar rpc_fail: whether the RPC verify call was successfull (overall,
2049 not whether the individual keys were correct) (runtime)
2050 @type lvm_fail: boolean
2051 @ivar lvm_fail: whether the RPC call didn't return valid LVM data
2052 @type hyp_fail: boolean
2053 @ivar hyp_fail: whether the RPC call didn't return the instance list
2054 @type ghost: boolean
2055 @ivar ghost: whether this is a known node or not (config)
2056 @type os_fail: boolean
2057 @ivar os_fail: whether the RPC call didn't return valid OS data
2059 @ivar oslist: list of OSes as diagnosed by DiagnoseOS
2060 @type vm_capable: boolean
2061 @ivar vm_capable: whether the node can host instances
2064 def __init__(self, offline=False, name=None, vm_capable=True):
2073 self.offline = offline
2074 self.vm_capable = vm_capable
2075 self.rpc_fail = False
2076 self.lvm_fail = False
2077 self.hyp_fail = False
2079 self.os_fail = False
2082 def ExpandNames(self):
2083 # This raises errors.OpPrereqError on its own:
2084 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
2086 # Get instances in node group; this is unsafe and needs verification later
2088 self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
2090 self.needed_locks = {
2091 locking.LEVEL_INSTANCE: inst_names,
2092 locking.LEVEL_NODEGROUP: [self.group_uuid],
2093 locking.LEVEL_NODE: [],
2096 self.share_locks = _ShareAll()
2098 def DeclareLocks(self, level):
2099 if level == locking.LEVEL_NODE:
2100 # Get members of node group; this is unsafe and needs verification later
2101 nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
2103 all_inst_info = self.cfg.GetAllInstancesInfo()
2105 # In Exec(), we warn about mirrored instances that have primary and
2106 # secondary living in separate node groups. To fully verify that
2107 # volumes for these instances are healthy, we will need to do an
2108 # extra call to their secondaries. We ensure here those nodes will
2110 for inst in self.owned_locks(locking.LEVEL_INSTANCE):
2111 # Important: access only the instances whose lock is owned
2112 if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR:
2113 nodes.update(all_inst_info[inst].secondary_nodes)
2115 self.needed_locks[locking.LEVEL_NODE] = nodes
2117 def CheckPrereq(self):
2118 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
2119 self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
2121 group_nodes = set(self.group_info.members)
2123 self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
2126 group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
2128 unlocked_instances = \
2129 group_instances.difference(self.owned_locks(locking.LEVEL_INSTANCE))
2132 raise errors.OpPrereqError("Missing lock for nodes: %s" %
2133 utils.CommaJoin(unlocked_nodes),
2136 if unlocked_instances:
2137 raise errors.OpPrereqError("Missing lock for instances: %s" %
2138 utils.CommaJoin(unlocked_instances),
2141 self.all_node_info = self.cfg.GetAllNodesInfo()
2142 self.all_inst_info = self.cfg.GetAllInstancesInfo()
2144 self.my_node_names = utils.NiceSort(group_nodes)
2145 self.my_inst_names = utils.NiceSort(group_instances)
2147 self.my_node_info = dict((name, self.all_node_info[name])
2148 for name in self.my_node_names)
2150 self.my_inst_info = dict((name, self.all_inst_info[name])
2151 for name in self.my_inst_names)
2153 # We detect here the nodes that will need the extra RPC calls for verifying
2154 # split LV volumes; they should be locked.
2155 extra_lv_nodes = set()
2157 for inst in self.my_inst_info.values():
2158 if inst.disk_template in constants.DTS_INT_MIRROR:
2159 for nname in inst.all_nodes:
2160 if self.all_node_info[nname].group != self.group_uuid:
2161 extra_lv_nodes.add(nname)
2163 unlocked_lv_nodes = \
2164 extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
2166 if unlocked_lv_nodes:
2167 raise errors.OpPrereqError("Missing node locks for LV check: %s" %
2168 utils.CommaJoin(unlocked_lv_nodes),
2170 self.extra_lv_nodes = list(extra_lv_nodes)
2172 def _VerifyNode(self, ninfo, nresult):
2173 """Perform some basic validation on data returned from a node.
2175 - check the result data structure is well formed and has all the
2177 - check ganeti version
2179 @type ninfo: L{objects.Node}
2180 @param ninfo: the node to check
2181 @param nresult: the results from the node
2183 @return: whether overall this call was successful (and we can expect
2184 reasonable values in the respose)
2188 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2190 # main result, nresult should be a non-empty dict
2191 test = not nresult or not isinstance(nresult, dict)
2192 _ErrorIf(test, constants.CV_ENODERPC, node,
2193 "unable to verify node: no data returned")
2197 # compares ganeti version
2198 local_version = constants.PROTOCOL_VERSION
2199 remote_version = nresult.get("version", None)
2200 test = not (remote_version and
2201 isinstance(remote_version, (list, tuple)) and
2202 len(remote_version) == 2)
2203 _ErrorIf(test, constants.CV_ENODERPC, node,
2204 "connection to node returned invalid data")
2208 test = local_version != remote_version[0]
2209 _ErrorIf(test, constants.CV_ENODEVERSION, node,
2210 "incompatible protocol versions: master %s,"
2211 " node %s", local_version, remote_version[0])
2215 # node seems compatible, we can actually try to look into its results
2217 # full package version
2218 self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
2219 constants.CV_ENODEVERSION, node,
2220 "software version mismatch: master %s, node %s",
2221 constants.RELEASE_VERSION, remote_version[1],
2222 code=self.ETYPE_WARNING)
2224 hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
2225 if ninfo.vm_capable and isinstance(hyp_result, dict):
2226 for hv_name, hv_result in hyp_result.iteritems():
2227 test = hv_result is not None
2228 _ErrorIf(test, constants.CV_ENODEHV, node,
2229 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
2231 hvp_result = nresult.get(constants.NV_HVPARAMS, None)
2232 if ninfo.vm_capable and isinstance(hvp_result, list):
2233 for item, hv_name, hv_result in hvp_result:
2234 _ErrorIf(True, constants.CV_ENODEHV, node,
2235 "hypervisor %s parameter verify failure (source %s): %s",
2236 hv_name, item, hv_result)
2238 test = nresult.get(constants.NV_NODESETUP,
2239 ["Missing NODESETUP results"])
2240 _ErrorIf(test, constants.CV_ENODESETUP, node, "node setup error: %s",
2245 def _VerifyNodeTime(self, ninfo, nresult,
2246 nvinfo_starttime, nvinfo_endtime):
2247 """Check the node time.
2249 @type ninfo: L{objects.Node}
2250 @param ninfo: the node to check
2251 @param nresult: the remote results for the node
2252 @param nvinfo_starttime: the start time of the RPC call
2253 @param nvinfo_endtime: the end time of the RPC call
2257 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2259 ntime = nresult.get(constants.NV_TIME, None)
2261 ntime_merged = utils.MergeTime(ntime)
2262 except (ValueError, TypeError):
2263 _ErrorIf(True, constants.CV_ENODETIME, node, "Node returned invalid time")
2266 if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
2267 ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
2268 elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
2269 ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
2273 _ErrorIf(ntime_diff is not None, constants.CV_ENODETIME, node,
2274 "Node time diverges by at least %s from master node time",
2277 def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
2278 """Check the node LVM results.
2280 @type ninfo: L{objects.Node}
2281 @param ninfo: the node to check
2282 @param nresult: the remote results for the node
2283 @param vg_name: the configured VG name
2290 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2292 # checks vg existence and size > 20G
2293 vglist = nresult.get(constants.NV_VGLIST, None)
2295 _ErrorIf(test, constants.CV_ENODELVM, node, "unable to check volume groups")
2297 vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
2298 constants.MIN_VG_SIZE)
2299 _ErrorIf(vgstatus, constants.CV_ENODELVM, node, vgstatus)
2302 pvlist = nresult.get(constants.NV_PVLIST, None)
2303 test = pvlist is None
2304 _ErrorIf(test, constants.CV_ENODELVM, node, "Can't get PV list from node")
2306 # check that ':' is not present in PV names, since it's a
2307 # special character for lvcreate (denotes the range of PEs to
2309 for _, pvname, owner_vg in pvlist:
2310 test = ":" in pvname
2311 _ErrorIf(test, constants.CV_ENODELVM, node,
2312 "Invalid character ':' in PV '%s' of VG '%s'",
2315 def _VerifyNodeBridges(self, ninfo, nresult, bridges):
2316 """Check the node bridges.
2318 @type ninfo: L{objects.Node}
2319 @param ninfo: the node to check
2320 @param nresult: the remote results for the node
2321 @param bridges: the expected list of bridges
2328 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2330 missing = nresult.get(constants.NV_BRIDGES, None)
2331 test = not isinstance(missing, list)
2332 _ErrorIf(test, constants.CV_ENODENET, node,
2333 "did not return valid bridge information")
2335 _ErrorIf(bool(missing), constants.CV_ENODENET, node,
2336 "missing bridges: %s" % utils.CommaJoin(sorted(missing)))
2338 def _VerifyNodeUserScripts(self, ninfo, nresult):
2339 """Check the results of user scripts presence and executability on the node
2341 @type ninfo: L{objects.Node}
2342 @param ninfo: the node to check
2343 @param nresult: the remote results for the node
2348 test = not constants.NV_USERSCRIPTS in nresult
2349 self._ErrorIf(test, constants.CV_ENODEUSERSCRIPTS, node,
2350 "did not return user scripts information")
2352 broken_scripts = nresult.get(constants.NV_USERSCRIPTS, None)
2354 self._ErrorIf(broken_scripts, constants.CV_ENODEUSERSCRIPTS, node,
2355 "user scripts not present or not executable: %s" %
2356 utils.CommaJoin(sorted(broken_scripts)))
2358 def _VerifyNodeNetwork(self, ninfo, nresult):
2359 """Check the node network connectivity results.
2361 @type ninfo: L{objects.Node}
2362 @param ninfo: the node to check
2363 @param nresult: the remote results for the node
2367 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2369 test = constants.NV_NODELIST not in nresult
2370 _ErrorIf(test, constants.CV_ENODESSH, node,
2371 "node hasn't returned node ssh connectivity data")
2373 if nresult[constants.NV_NODELIST]:
2374 for a_node, a_msg in nresult[constants.NV_NODELIST].items():
2375 _ErrorIf(True, constants.CV_ENODESSH, node,
2376 "ssh communication with node '%s': %s", a_node, a_msg)
2378 test = constants.NV_NODENETTEST not in nresult
2379 _ErrorIf(test, constants.CV_ENODENET, node,
2380 "node hasn't returned node tcp connectivity data")
2382 if nresult[constants.NV_NODENETTEST]:
2383 nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
2385 _ErrorIf(True, constants.CV_ENODENET, node,
2386 "tcp communication with node '%s': %s",
2387 anode, nresult[constants.NV_NODENETTEST][anode])
2389 test = constants.NV_MASTERIP not in nresult
2390 _ErrorIf(test, constants.CV_ENODENET, node,
2391 "node hasn't returned node master IP reachability data")
2393 if not nresult[constants.NV_MASTERIP]:
2394 if node == self.master_node:
2395 msg = "the master node cannot reach the master IP (not configured?)"
2397 msg = "cannot reach the master IP"
2398 _ErrorIf(True, constants.CV_ENODENET, node, msg)
2400 def _VerifyInstance(self, instance, instanceconfig, node_image,
2402 """Verify an instance.
2404 This function checks to see if the required block devices are
2405 available on the instance's node.
2408 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2409 node_current = instanceconfig.primary_node
2411 node_vol_should = {}
2412 instanceconfig.MapLVsByNode(node_vol_should)
2414 ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(), self.group_info)
2415 err = _ComputeIPolicyInstanceViolation(ipolicy, instanceconfig)
2416 _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, err)
2418 for node in node_vol_should:
2419 n_img = node_image[node]
2420 if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
2421 # ignore missing volumes on offline or broken nodes
2423 for volume in node_vol_should[node]:
2424 test = volume not in n_img.volumes
2425 _ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance,
2426 "volume %s missing on node %s", volume, node)
2428 if instanceconfig.admin_state == constants.ADMINST_UP:
2429 pri_img = node_image[node_current]
2430 test = instance not in pri_img.instances and not pri_img.offline
2431 _ErrorIf(test, constants.CV_EINSTANCEDOWN, instance,
2432 "instance not running on its primary node %s",
2435 diskdata = [(nname, success, status, idx)
2436 for (nname, disks) in diskstatus.items()
2437 for idx, (success, status) in enumerate(disks)]
2439 for nname, success, bdev_status, idx in diskdata:
2440 # the 'ghost node' construction in Exec() ensures that we have a
2442 snode = node_image[nname]
2443 bad_snode = snode.ghost or snode.offline
2444 _ErrorIf(instanceconfig.admin_state == constants.ADMINST_UP and
2445 not success and not bad_snode,
2446 constants.CV_EINSTANCEFAULTYDISK, instance,
2447 "couldn't retrieve status for disk/%s on %s: %s",
2448 idx, nname, bdev_status)
2449 _ErrorIf((instanceconfig.admin_state == constants.ADMINST_UP and
2450 success and bdev_status.ldisk_status == constants.LDS_FAULTY),
2451 constants.CV_EINSTANCEFAULTYDISK, instance,
2452 "disk/%s on %s is faulty", idx, nname)
2454 def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
2455 """Verify if there are any unknown volumes in the cluster.
2457 The .os, .swap and backup volumes are ignored. All other volumes are
2458 reported as unknown.
2460 @type reserved: L{ganeti.utils.FieldSet}
2461 @param reserved: a FieldSet of reserved volume names
2464 for node, n_img in node_image.items():
2465 if (n_img.offline or n_img.rpc_fail or n_img.lvm_fail or
2466 self.all_node_info[node].group != self.group_uuid):
2467 # skip non-healthy nodes
2469 for volume in n_img.volumes:
2470 test = ((node not in node_vol_should or
2471 volume not in node_vol_should[node]) and
2472 not reserved.Matches(volume))
2473 self._ErrorIf(test, constants.CV_ENODEORPHANLV, node,
2474 "volume %s is unknown", volume)
2476 def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
2477 """Verify N+1 Memory Resilience.
2479 Check that if one single node dies we can still start all the
2480 instances it was primary for.
2483 cluster_info = self.cfg.GetClusterInfo()
2484 for node, n_img in node_image.items():
2485 # This code checks that every node which is now listed as
2486 # secondary has enough memory to host all instances it is
2487 # supposed to should a single other node in the cluster fail.
2488 # FIXME: not ready for failover to an arbitrary node
2489 # FIXME: does not support file-backed instances
2490 # WARNING: we currently take into account down instances as well
2491 # as up ones, considering that even if they're down someone
2492 # might want to start them even in the event of a node failure.
2493 if n_img.offline or self.all_node_info[node].group != self.group_uuid:
2494 # we're skipping nodes marked offline and nodes in other groups from
2495 # the N+1 warning, since most likely we don't have good memory
2496 # infromation from them; we already list instances living on such
2497 # nodes, and that's enough warning
2499 #TODO(dynmem): also consider ballooning out other instances
2500 for prinode, instances in n_img.sbp.items():
2502 for instance in instances:
2503 bep = cluster_info.FillBE(instance_cfg[instance])
2504 if bep[constants.BE_AUTO_BALANCE]:
2505 needed_mem += bep[constants.BE_MINMEM]
2506 test = n_img.mfree < needed_mem
2507 self._ErrorIf(test, constants.CV_ENODEN1, node,
2508 "not enough memory to accomodate instance failovers"
2509 " should node %s fail (%dMiB needed, %dMiB available)",
2510 prinode, needed_mem, n_img.mfree)
2513 def _VerifyFiles(cls, errorif, nodeinfo, master_node, all_nvinfo,
2514 (files_all, files_opt, files_mc, files_vm)):
2515 """Verifies file checksums collected from all nodes.
2517 @param errorif: Callback for reporting errors
2518 @param nodeinfo: List of L{objects.Node} objects
2519 @param master_node: Name of master node
2520 @param all_nvinfo: RPC results
2523 # Define functions determining which nodes to consider for a file
2526 (files_mc, lambda node: (node.master_candidate or
2527 node.name == master_node)),
2528 (files_vm, lambda node: node.vm_capable),
2531 # Build mapping from filename to list of nodes which should have the file
2533 for (files, fn) in files2nodefn:
2535 filenodes = nodeinfo
2537 filenodes = filter(fn, nodeinfo)
2538 nodefiles.update((filename,
2539 frozenset(map(operator.attrgetter("name"), filenodes)))
2540 for filename in files)
2542 assert set(nodefiles) == (files_all | files_mc | files_vm)
2544 fileinfo = dict((filename, {}) for filename in nodefiles)
2545 ignore_nodes = set()
2547 for node in nodeinfo:
2549 ignore_nodes.add(node.name)
2552 nresult = all_nvinfo[node.name]
2554 if nresult.fail_msg or not nresult.payload:
2557 node_files = nresult.payload.get(constants.NV_FILELIST, None)
2559 test = not (node_files and isinstance(node_files, dict))
2560 errorif(test, constants.CV_ENODEFILECHECK, node.name,
2561 "Node did not return file checksum data")
2563 ignore_nodes.add(node.name)
2566 # Build per-checksum mapping from filename to nodes having it
2567 for (filename, checksum) in node_files.items():
2568 assert filename in nodefiles
2569 fileinfo[filename].setdefault(checksum, set()).add(node.name)
2571 for (filename, checksums) in fileinfo.items():
2572 assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
2574 # Nodes having the file
2575 with_file = frozenset(node_name
2576 for nodes in fileinfo[filename].values()
2577 for node_name in nodes) - ignore_nodes
2579 expected_nodes = nodefiles[filename] - ignore_nodes
2581 # Nodes missing file
2582 missing_file = expected_nodes - with_file
2584 if filename in files_opt:
2586 errorif(missing_file and missing_file != expected_nodes,
2587 constants.CV_ECLUSTERFILECHECK, None,
2588 "File %s is optional, but it must exist on all or no"
2589 " nodes (not found on %s)",
2590 filename, utils.CommaJoin(utils.NiceSort(missing_file)))
2592 errorif(missing_file, constants.CV_ECLUSTERFILECHECK, None,
2593 "File %s is missing from node(s) %s", filename,
2594 utils.CommaJoin(utils.NiceSort(missing_file)))
2596 # Warn if a node has a file it shouldn't
2597 unexpected = with_file - expected_nodes
2599 constants.CV_ECLUSTERFILECHECK, None,
2600 "File %s should not exist on node(s) %s",
2601 filename, utils.CommaJoin(utils.NiceSort(unexpected)))
2603 # See if there are multiple versions of the file
2604 test = len(checksums) > 1
2606 variants = ["variant %s on %s" %
2607 (idx + 1, utils.CommaJoin(utils.NiceSort(nodes)))
2608 for (idx, (checksum, nodes)) in
2609 enumerate(sorted(checksums.items()))]
2613 errorif(test, constants.CV_ECLUSTERFILECHECK, None,
2614 "File %s found with %s different checksums (%s)",
2615 filename, len(checksums), "; ".join(variants))
2617 def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
2619 """Verifies and the node DRBD status.
2621 @type ninfo: L{objects.Node}
2622 @param ninfo: the node to check
2623 @param nresult: the remote results for the node
2624 @param instanceinfo: the dict of instances
2625 @param drbd_helper: the configured DRBD usermode helper
2626 @param drbd_map: the DRBD map as returned by
2627 L{ganeti.config.ConfigWriter.ComputeDRBDMap}
2631 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2634 helper_result = nresult.get(constants.NV_DRBDHELPER, None)
2635 test = (helper_result == None)
2636 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2637 "no drbd usermode helper returned")
2639 status, payload = helper_result
2641 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2642 "drbd usermode helper check unsuccessful: %s", payload)
2643 test = status and (payload != drbd_helper)
2644 _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2645 "wrong drbd usermode helper: %s", payload)
2647 # compute the DRBD minors
2649 for minor, instance in drbd_map[node].items():
2650 test = instance not in instanceinfo
2651 _ErrorIf(test, constants.CV_ECLUSTERCFG, None,
2652 "ghost instance '%s' in temporary DRBD map", instance)
2653 # ghost instance should not be running, but otherwise we
2654 # don't give double warnings (both ghost instance and
2655 # unallocated minor in use)
2657 node_drbd[minor] = (instance, False)
2659 instance = instanceinfo[instance]
2660 node_drbd[minor] = (instance.name,
2661 instance.admin_state == constants.ADMINST_UP)
2663 # and now check them
2664 used_minors = nresult.get(constants.NV_DRBDLIST, [])
2665 test = not isinstance(used_minors, (tuple, list))
2666 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2667 "cannot parse drbd status file: %s", str(used_minors))
2669 # we cannot check drbd status
2672 for minor, (iname, must_exist) in node_drbd.items():
2673 test = minor not in used_minors and must_exist
2674 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2675 "drbd minor %d of instance %s is not active", minor, iname)
2676 for minor in used_minors:
2677 test = minor not in node_drbd
2678 _ErrorIf(test, constants.CV_ENODEDRBD, node,
2679 "unallocated drbd minor %d is in use", minor)
2681 def _UpdateNodeOS(self, ninfo, nresult, nimg):
2682 """Builds the node OS structures.
2684 @type ninfo: L{objects.Node}
2685 @param ninfo: the node to check
2686 @param nresult: the remote results for the node
2687 @param nimg: the node image object
2691 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2693 remote_os = nresult.get(constants.NV_OSLIST, None)
2694 test = (not isinstance(remote_os, list) or
2695 not compat.all(isinstance(v, list) and len(v) == 7
2696 for v in remote_os))
2698 _ErrorIf(test, constants.CV_ENODEOS, node,
2699 "node hasn't returned valid OS data")
2708 for (name, os_path, status, diagnose,
2709 variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
2711 if name not in os_dict:
2714 # parameters is a list of lists instead of list of tuples due to
2715 # JSON lacking a real tuple type, fix it:
2716 parameters = [tuple(v) for v in parameters]
2717 os_dict[name].append((os_path, status, diagnose,
2718 set(variants), set(parameters), set(api_ver)))
2720 nimg.oslist = os_dict
2722 def _VerifyNodeOS(self, ninfo, nimg, base):
2723 """Verifies the node OS list.
2725 @type ninfo: L{objects.Node}
2726 @param ninfo: the node to check
2727 @param nimg: the node image object
2728 @param base: the 'template' node we match against (e.g. from the master)
2732 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2734 assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
2736 beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
2737 for os_name, os_data in nimg.oslist.items():
2738 assert os_data, "Empty OS status for OS %s?!" % os_name
2739 f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
2740 _ErrorIf(not f_status, constants.CV_ENODEOS, node,
2741 "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
2742 _ErrorIf(len(os_data) > 1, constants.CV_ENODEOS, node,
2743 "OS '%s' has multiple entries (first one shadows the rest): %s",
2744 os_name, utils.CommaJoin([v[0] for v in os_data]))
2745 # comparisons with the 'base' image
2746 test = os_name not in base.oslist
2747 _ErrorIf(test, constants.CV_ENODEOS, node,
2748 "Extra OS %s not present on reference node (%s)",
2752 assert base.oslist[os_name], "Base node has empty OS status?"
2753 _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
2755 # base OS is invalid, skipping
2757 for kind, a, b in [("API version", f_api, b_api),
2758 ("variants list", f_var, b_var),
2759 ("parameters", beautify_params(f_param),
2760 beautify_params(b_param))]:
2761 _ErrorIf(a != b, constants.CV_ENODEOS, node,
2762 "OS %s for %s differs from reference node %s: [%s] vs. [%s]",
2763 kind, os_name, base.name,
2764 utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
2766 # check any missing OSes
2767 missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
2768 _ErrorIf(missing, constants.CV_ENODEOS, node,
2769 "OSes present on reference node %s but missing on this node: %s",
2770 base.name, utils.CommaJoin(missing))
2772 def _VerifyOob(self, ninfo, nresult):
2773 """Verifies out of band functionality of a node.
2775 @type ninfo: L{objects.Node}
2776 @param ninfo: the node to check
2777 @param nresult: the remote results for the node
2781 # We just have to verify the paths on master and/or master candidates
2782 # as the oob helper is invoked on the master
2783 if ((ninfo.master_candidate or ninfo.master_capable) and
2784 constants.NV_OOB_PATHS in nresult):
2785 for path_result in nresult[constants.NV_OOB_PATHS]:
2786 self._ErrorIf(path_result, constants.CV_ENODEOOBPATH, node, path_result)
2788 def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
2789 """Verifies and updates the node volume data.
2791 This function will update a L{NodeImage}'s internal structures
2792 with data from the remote call.
2794 @type ninfo: L{objects.Node}
2795 @param ninfo: the node to check
2796 @param nresult: the remote results for the node
2797 @param nimg: the node image object
2798 @param vg_name: the configured VG name
2802 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2804 nimg.lvm_fail = True
2805 lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
2808 elif isinstance(lvdata, basestring):
2809 _ErrorIf(True, constants.CV_ENODELVM, node, "LVM problem on node: %s",
2810 utils.SafeEncode(lvdata))
2811 elif not isinstance(lvdata, dict):
2812 _ErrorIf(True, constants.CV_ENODELVM, node,
2813 "rpc call to node failed (lvlist)")
2815 nimg.volumes = lvdata
2816 nimg.lvm_fail = False
2818 def _UpdateNodeInstances(self, ninfo, nresult, nimg):
2819 """Verifies and updates the node instance list.
2821 If the listing was successful, then updates this node's instance
2822 list. Otherwise, it marks the RPC call as failed for the instance
2825 @type ninfo: L{objects.Node}
2826 @param ninfo: the node to check
2827 @param nresult: the remote results for the node
2828 @param nimg: the node image object
2831 idata = nresult.get(constants.NV_INSTANCELIST, None)
2832 test = not isinstance(idata, list)
2833 self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
2834 "rpc call to node failed (instancelist): %s",
2835 utils.SafeEncode(str(idata)))
2837 nimg.hyp_fail = True
2839 nimg.instances = idata
2841 def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
2842 """Verifies and computes a node information map
2844 @type ninfo: L{objects.Node}
2845 @param ninfo: the node to check
2846 @param nresult: the remote results for the node
2847 @param nimg: the node image object
2848 @param vg_name: the configured VG name
2852 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2854 # try to read free memory (from the hypervisor)
2855 hv_info = nresult.get(constants.NV_HVINFO, None)
2856 test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
2857 _ErrorIf(test, constants.CV_ENODEHV, node,
2858 "rpc call to node failed (hvinfo)")
2861 nimg.mfree = int(hv_info["memory_free"])
2862 except (ValueError, TypeError):
2863 _ErrorIf(True, constants.CV_ENODERPC, node,
2864 "node returned invalid nodeinfo, check hypervisor")
2866 # FIXME: devise a free space model for file based instances as well
2867 if vg_name is not None:
2868 test = (constants.NV_VGLIST not in nresult or
2869 vg_name not in nresult[constants.NV_VGLIST])
2870 _ErrorIf(test, constants.CV_ENODELVM, node,
2871 "node didn't return data for the volume group '%s'"
2872 " - it is either missing or broken", vg_name)
2875 nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
2876 except (ValueError, TypeError):
2877 _ErrorIf(True, constants.CV_ENODERPC, node,
2878 "node returned invalid LVM info, check LVM status")
2880 def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
2881 """Gets per-disk status information for all instances.
2883 @type nodelist: list of strings
2884 @param nodelist: Node names
2885 @type node_image: dict of (name, L{objects.Node})
2886 @param node_image: Node objects
2887 @type instanceinfo: dict of (name, L{objects.Instance})
2888 @param instanceinfo: Instance objects
2889 @rtype: {instance: {node: [(succes, payload)]}}
2890 @return: a dictionary of per-instance dictionaries with nodes as
2891 keys and disk information as values; the disk information is a
2892 list of tuples (success, payload)
2895 _ErrorIf = self._ErrorIf # pylint: disable=C0103
2898 node_disks_devonly = {}
2899 diskless_instances = set()
2900 diskless = constants.DT_DISKLESS
2902 for nname in nodelist:
2903 node_instances = list(itertools.chain(node_image[nname].pinst,
2904 node_image[nname].sinst))
2905 diskless_instances.update(inst for inst in node_instances
2906 if instanceinfo[inst].disk_template == diskless)
2907 disks = [(inst, disk)
2908 for inst in node_instances
2909 for disk in instanceinfo[inst].disks]
2912 # No need to collect data
2915 node_disks[nname] = disks
2917 # Creating copies as SetDiskID below will modify the objects and that can
2918 # lead to incorrect data returned from nodes
2919 devonly = [dev.Copy() for (_, dev) in disks]
2922 self.cfg.SetDiskID(dev, nname)
2924 node_disks_devonly[nname] = devonly
2926 assert len(node_disks) == len(node_disks_devonly)
2928 # Collect data from all nodes with disks
2929 result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2932 assert len(result) == len(node_disks)
2936 for (nname, nres) in result.items():
2937 disks = node_disks[nname]
2940 # No data from this node
2941 data = len(disks) * [(False, "node offline")]
2944 _ErrorIf(msg, constants.CV_ENODERPC, nname,
2945 "while getting disk information: %s", msg)
2947 # No data from this node
2948 data = len(disks) * [(False, msg)]
2951 for idx, i in enumerate(nres.payload):
2952 if isinstance(i, (tuple, list)) and len(i) == 2:
2955 logging.warning("Invalid result from node %s, entry %d: %s",
2957 data.append((False, "Invalid result from the remote node"))
2959 for ((inst, _), status) in zip(disks, data):
2960 instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2962 # Add empty entries for diskless instances.
2963 for inst in diskless_instances:
2964 assert inst not in instdisk
2967 assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2968 len(nnames) <= len(instanceinfo[inst].all_nodes) and
2969 compat.all(isinstance(s, (tuple, list)) and
2970 len(s) == 2 for s in statuses)
2971 for inst, nnames in instdisk.items()
2972 for nname, statuses in nnames.items())
2973 assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2978 def _SshNodeSelector(group_uuid, all_nodes):
2979 """Create endless iterators for all potential SSH check hosts.
2982 nodes = [node for node in all_nodes
2983 if (node.group != group_uuid and
2985 keyfunc = operator.attrgetter("group")
2987 return map(itertools.cycle,
2988 [sorted(map(operator.attrgetter("name"), names))
2989 for _, names in itertools.groupby(sorted(nodes, key=keyfunc),
2993 def _SelectSshCheckNodes(cls, group_nodes, group_uuid, all_nodes):
2994 """Choose which nodes should talk to which other nodes.
2996 We will make nodes contact all nodes in their group, and one node from
2999 @warning: This algorithm has a known issue if one node group is much
3000 smaller than others (e.g. just one node). In such a case all other
3001 nodes will talk to the single node.
3004 online_nodes = sorted(node.name for node in group_nodes if not node.offline)
3005 sel = cls._SshNodeSelector(group_uuid, all_nodes)
3007 return (online_nodes,
3008 dict((name, sorted([i.next() for i in sel]))
3009 for name in online_nodes))
3011 def BuildHooksEnv(self):
3014 Cluster-Verify hooks just ran in the post phase and their failure makes
3015 the output be logged in the verify output and the verification to fail.
3019 "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
3022 env.update(("NODE_TAGS_%s" % node.name, " ".join(node.GetTags()))
3023 for node in self.my_node_info.values())
3027 def BuildHooksNodes(self):
3028 """Build hooks nodes.
3031 return ([], self.my_node_names)
3033 def Exec(self, feedback_fn):
3034 """Verify integrity of the node group, performing various test on nodes.
3037 # This method has too many local variables. pylint: disable=R0914
3038 feedback_fn("* Verifying group '%s'" % self.group_info.name)
3040 if not self.my_node_names:
3042 feedback_fn("* Empty node group, skipping verification")
3046 _ErrorIf = self._ErrorIf # pylint: disable=C0103
3047 verbose = self.op.verbose
3048 self._feedback_fn = feedback_fn
3050 vg_name = self.cfg.GetVGName()
3051 drbd_helper = self.cfg.GetDRBDHelper()
3052 cluster = self.cfg.GetClusterInfo()
3053 groupinfo = self.cfg.GetAllNodeGroupsInfo()
3054 hypervisors = cluster.enabled_hypervisors
3055 node_data_list = [self.my_node_info[name] for name in self.my_node_names]
3057 i_non_redundant = [] # Non redundant instances
3058 i_non_a_balanced = [] # Non auto-balanced instances
3059 i_offline = 0 # Count of offline instances
3060 n_offline = 0 # Count of offline nodes
3061 n_drained = 0 # Count of nodes being drained
3062 node_vol_should = {}
3064 # FIXME: verify OS list
3067 filemap = _ComputeAncillaryFiles(cluster, False)
3069 # do local checksums
3070 master_node = self.master_node = self.cfg.GetMasterNode()
3071 master_ip = self.cfg.GetMasterIP()
3073 feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_names))
3076 if self.cfg.GetUseExternalMipScript():
3077 user_scripts.append(constants.EXTERNAL_MASTER_SETUP_SCRIPT)
3079 node_verify_param = {
3080 constants.NV_FILELIST:
3081 utils.UniqueSequence(filename
3082 for files in filemap
3083 for filename in files),
3084 constants.NV_NODELIST:
3085 self._SelectSshCheckNodes(node_data_list, self.group_uuid,
3086 self.all_node_info.values()),
3087 constants.NV_HYPERVISOR: hypervisors,
3088 constants.NV_HVPARAMS:
3089 _GetAllHypervisorParameters(cluster, self.all_inst_info.values()),
3090 constants.NV_NODENETTEST: [(node.name, node.primary_ip, node.secondary_ip)
3091 for node in node_data_list
3092 if not node.offline],
3093 constants.NV_INSTANCELIST: hypervisors,
3094 constants.NV_VERSION: None,
3095 constants.NV_HVINFO: self.cfg.GetHypervisorType(),
3096 constants.NV_NODESETUP: None,
3097 constants.NV_TIME: None,
3098 constants.NV_MASTERIP: (master_node, master_ip),
3099 constants.NV_OSLIST: None,
3100 constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
3101 constants.NV_USERSCRIPTS: user_scripts,
3104 if vg_name is not None:
3105 node_verify_param[constants.NV_VGLIST] = None
3106 node_verify_param[constants.NV_LVLIST] = vg_name
3107 node_verify_param[constants.NV_PVLIST] = [vg_name]
3108 node_verify_param[constants.NV_DRBDLIST] = None
3111 node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
3114 # FIXME: this needs to be changed per node-group, not cluster-wide
3116 default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
3117 if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3118 bridges.add(default_nicpp[constants.NIC_LINK])
3119 for instance in self.my_inst_info.values():
3120 for nic in instance.nics:
3121 full_nic = cluster.SimpleFillNIC(nic.nicparams)
3122 if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3123 bridges.add(full_nic[constants.NIC_LINK])
3126 node_verify_param[constants.NV_BRIDGES] = list(bridges)
3128 # Build our expected cluster state
3129 node_image = dict((node.name, self.NodeImage(offline=node.offline,
3131 vm_capable=node.vm_capable))
3132 for node in node_data_list)
3136 for node in self.all_node_info.values():
3137 path = _SupportsOob(self.cfg, node)
3138 if path and path not in oob_paths:
3139 oob_paths.append(path)
3142 node_verify_param[constants.NV_OOB_PATHS] = oob_paths
3144 for instance in self.my_inst_names:
3145 inst_config = self.my_inst_info[instance]
3147 for nname in inst_config.all_nodes:
3148 if nname not in node_image:
3149 gnode = self.NodeImage(name=nname)
3150 gnode.ghost = (nname not in self.all_node_info)
3151 node_image[nname] = gnode
3153 inst_config.MapLVsByNode(node_vol_should)
3155 pnode = inst_config.primary_node
3156 node_image[pnode].pinst.append(instance)
3158 for snode in inst_config.secondary_nodes:
3159 nimg = node_image[snode]
3160 nimg.sinst.append(instance)
3161 if pnode not in nimg.sbp:
3162 nimg.sbp[pnode] = []
3163 nimg.sbp[pnode].append(instance)
3165 # At this point, we have the in-memory data structures complete,
3166 # except for the runtime information, which we'll gather next
3168 # Due to the way our RPC system works, exact response times cannot be
3169 # guaranteed (e.g. a broken node could run into a timeout). By keeping the
3170 # time before and after executing the request, we can at least have a time
3172 nvinfo_starttime = time.time()
3173 all_nvinfo = self.rpc.call_node_verify(self.my_node_names,
3175 self.cfg.GetClusterName())
3176 nvinfo_endtime = time.time()
3178 if self.extra_lv_nodes and vg_name is not None:
3180 self.rpc.call_node_verify(self.extra_lv_nodes,
3181 {constants.NV_LVLIST: vg_name},
3182 self.cfg.GetClusterName())
3184 extra_lv_nvinfo = {}
3186 all_drbd_map = self.cfg.ComputeDRBDMap()
3188 feedback_fn("* Gathering disk information (%s nodes)" %
3189 len(self.my_node_names))
3190 instdisk = self._CollectDiskInfo(self.my_node_names, node_image,
3193 feedback_fn("* Verifying configuration file consistency")
3195 # If not all nodes are being checked, we need to make sure the master node
3196 # and a non-checked vm_capable node are in the list.
3197 absent_nodes = set(self.all_node_info).difference(self.my_node_info)
3199 vf_nvinfo = all_nvinfo.copy()
3200 vf_node_info = list(self.my_node_info.values())
3201 additional_nodes = []
3202 if master_node not in self.my_node_info:
3203 additional_nodes.append(master_node)
3204 vf_node_info.append(self.all_node_info[master_node])
3205 # Add the first vm_capable node we find which is not included
3206 for node in absent_nodes:
3207 nodeinfo = self.all_node_info[node]
3208 if nodeinfo.vm_capable and not nodeinfo.offline:
3209 additional_nodes.append(node)
3210 vf_node_info.append(self.all_node_info[node])
3212 key = constants.NV_FILELIST
3213 vf_nvinfo.update(self.rpc.call_node_verify(additional_nodes,
3214 {key: node_verify_param[key]},
3215 self.cfg.GetClusterName()))
3217 vf_nvinfo = all_nvinfo
3218 vf_node_info = self.my_node_info.values()
3220 self._VerifyFiles(_ErrorIf, vf_node_info, master_node, vf_nvinfo, filemap)
3222 feedback_fn("* Verifying node status")
3226 for node_i in node_data_list:
3228 nimg = node_image[node]
3232 feedback_fn("* Skipping offline node %s" % (node,))
3236 if node == master_node:
3238 elif node_i.master_candidate:
3239 ntype = "master candidate"
3240 elif node_i.drained:
3246 feedback_fn("* Verifying node %s (%s)" % (node, ntype))
3248 msg = all_nvinfo[node].fail_msg
3249 _ErrorIf(msg, constants.CV_ENODERPC, node, "while contacting node: %s",
3252 nimg.rpc_fail = True
3255 nresult = all_nvinfo[node].payload
3257 nimg.call_ok = self._VerifyNode(node_i, nresult)
3258 self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
3259 self._VerifyNodeNetwork(node_i, nresult)
3260 self._VerifyNodeUserScripts(node_i, nresult)
3261 self._VerifyOob(node_i, nresult)
3264 self._VerifyNodeLVM(node_i, nresult, vg_name)
3265 self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info, drbd_helper,
3268 self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
3269 self._UpdateNodeInstances(node_i, nresult, nimg)
3270 self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
3271 self._UpdateNodeOS(node_i, nresult, nimg)
3273 if not nimg.os_fail:
3274 if refos_img is None:
3276 self._VerifyNodeOS(node_i, nimg, refos_img)
3277 self._VerifyNodeBridges(node_i, nresult, bridges)
3279 # Check whether all running instancies are primary for the node. (This
3280 # can no longer be done from _VerifyInstance below, since some of the
3281 # wrong instances could be from other node groups.)
3282 non_primary_inst = set(nimg.instances).difference(nimg.pinst)
3284 for inst in non_primary_inst:
3285 # FIXME: investigate best way to handle offline insts
3286 if inst.admin_state == constants.ADMINST_OFFLINE:
3288 feedback_fn("* Skipping offline instance %s" % inst.name)
3291 test = inst in self.all_inst_info
3292 _ErrorIf(test, constants.CV_EINSTANCEWRONGNODE, inst,
3293 "instance should not run on node %s", node_i.name)
3294 _ErrorIf(not test, constants.CV_ENODEORPHANINSTANCE, node_i.name,
3295 "node is running unknown instance %s", inst)
3297 for node, result in extra_lv_nvinfo.items():
3298 self._UpdateNodeVolumes(self.all_node_info[node], result.payload,
3299 node_image[node], vg_name)
3301 feedback_fn("* Verifying instance status")
3302 for instance in self.my_inst_names:
3304 feedback_fn("* Verifying instance %s" % instance)
3305 inst_config = self.my_inst_info[instance]
3306 self._VerifyInstance(instance, inst_config, node_image,
3308 inst_nodes_offline = []
3310 pnode = inst_config.primary_node
3311 pnode_img = node_image[pnode]
3312 _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
3313 constants.CV_ENODERPC, pnode, "instance %s, connection to"
3314 " primary node failed", instance)
3316 _ErrorIf(inst_config.admin_state == constants.ADMINST_UP and
3318 constants.CV_EINSTANCEBADNODE, instance,
3319 "instance is marked as running and lives on offline node %s",
3320 inst_config.primary_node)
3322 # If the instance is non-redundant we cannot survive losing its primary
3323 # node, so we are not N+1 compliant. On the other hand we have no disk
3324 # templates with more than one secondary so that situation is not well
3326 # FIXME: does not support file-backed instances
3327 if not inst_config.secondary_nodes:
3328 i_non_redundant.append(instance)
3330 _ErrorIf(len(inst_config.secondary_nodes) > 1,
3331 constants.CV_EINSTANCELAYOUT,
3332 instance, "instance has multiple secondary nodes: %s",
3333 utils.CommaJoin(inst_config.secondary_nodes),
3334 code=self.ETYPE_WARNING)
3336 if inst_config.disk_template in constants.DTS_INT_MIRROR:
3337 pnode = inst_config.primary_node
3338 instance_nodes = utils.NiceSort(inst_config.all_nodes)
3339 instance_groups = {}
3341 for node in instance_nodes:
3342 instance_groups.setdefault(self.all_node_info[node].group,
3346 "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
3347 # Sort so that we always list the primary node first.
3348 for group, nodes in sorted(instance_groups.items(),
3349 key=lambda (_, nodes): pnode in nodes,
3352 self._ErrorIf(len(instance_groups) > 1,
3353 constants.CV_EINSTANCESPLITGROUPS,
3354 instance, "instance has primary and secondary nodes in"
3355 " different groups: %s", utils.CommaJoin(pretty_list),
3356 code=self.ETYPE_WARNING)
3358 if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
3359 i_non_a_balanced.append(instance)
3361 for snode in inst_config.secondary_nodes:
3362 s_img = node_image[snode]
3363 _ErrorIf(s_img.rpc_fail and not s_img.offline, constants.CV_ENODERPC,
3364 snode, "instance %s, connection to secondary node failed",
3368 inst_nodes_offline.append(snode)
3370 # warn that the instance lives on offline nodes
3371 _ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE, instance,
3372 "instance has offline secondary node(s) %s",
3373 utils.CommaJoin(inst_nodes_offline))
3374 # ... or ghost/non-vm_capable nodes
3375 for node in inst_config.all_nodes:
3376 _ErrorIf(node_image[node].ghost, constants.CV_EINSTANCEBADNODE,
3377 instance, "instance lives on ghost node %s", node)
3378 _ErrorIf(not node_image[node].vm_capable, constants.CV_EINSTANCEBADNODE,
3379 instance, "instance lives on non-vm_capable node %s", node)
3381 feedback_fn("* Verifying orphan volumes")
3382 reserved = utils.FieldSet(*cluster.reserved_lvs)
3384 # We will get spurious "unknown volume" warnings if any node of this group
3385 # is secondary for an instance whose primary is in another group. To avoid
3386 # them, we find these instances and add their volumes to node_vol_should.
3387 for inst in self.all_inst_info.values():
3388 for secondary in inst.secondary_nodes:
3389 if (secondary in self.my_node_info
3390 and inst.name not in self.my_inst_info):
3391 inst.MapLVsByNode(node_vol_should)
3394 self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
3396 if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
3397 feedback_fn("* Verifying N+1 Memory redundancy")
3398 self._VerifyNPlusOneMemory(node_image, self.my_inst_info)
3400 feedback_fn("* Other Notes")
3402 feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
3403 % len(i_non_redundant))
3405 if i_non_a_balanced:
3406 feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
3407 % len(i_non_a_balanced))
3410 feedback_fn(" - NOTICE: %d offline instance(s) found." % i_offline)
3413 feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
3416 feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
3420 def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
3421 """Analyze the post-hooks' result
3423 This method analyses the hook result, handles it, and sends some
3424 nicely-formatted feedback back to the user.
3426 @param phase: one of L{constants.HOOKS_PHASE_POST} or
3427 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
3428 @param hooks_results: the results of the multi-node hooks rpc call
3429 @param feedback_fn: function used send feedback back to the caller
3430 @param lu_result: previous Exec result
3431 @return: the new Exec result, based on the previous result
3435 # We only really run POST phase hooks, only for non-empty groups,
3436 # and are only interested in their results
3437 if not self.my_node_names:
3440 elif phase == constants.HOOKS_PHASE_POST:
3441 # Used to change hooks' output to proper indentation
3442 feedback_fn("* Hooks Results")
3443 assert hooks_results, "invalid result from hooks"
3445 for node_name in hooks_results:
3446 res = hooks_results[node_name]
3448 test = msg and not res.offline
3449 self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
3450 "Communication failure in hooks execution: %s", msg)
3451 if res.offline or msg:
3452 # No need to investigate payload if node is offline or gave
3455 for script, hkr, output in res.payload:
3456 test = hkr == constants.HKR_FAIL
3457 self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
3458 "Script %s failed, output:", script)
3460 output = self._HOOKS_INDENT_RE.sub(" ", output)
3461 feedback_fn("%s" % output)
3467 class LUClusterVerifyDisks(NoHooksLU):
3468 """Verifies the cluster disks status.
3473 def ExpandNames(self):
3474 self.share_locks = _ShareAll()
3475 self.needed_locks = {
3476 locking.LEVEL_NODEGROUP: locking.ALL_SET,
3479 def Exec(self, feedback_fn):
3480 group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
3482 # Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
3483 return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
3484 for group in group_names])
3487 class LUGroupVerifyDisks(NoHooksLU):
3488 """Verifies the status of all disks in a node group.
3493 def ExpandNames(self):
3494 # Raises errors.OpPrereqError on its own if group can't be found
3495 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
3497 self.share_locks = _ShareAll()
3498 self.needed_locks = {
3499 locking.LEVEL_INSTANCE: [],
3500 locking.LEVEL_NODEGROUP: [],
3501 locking.LEVEL_NODE: [],
3504 def DeclareLocks(self, level):
3505 if level == locking.LEVEL_INSTANCE:
3506 assert not self.needed_locks[locking.LEVEL_INSTANCE]
3508 # Lock instances optimistically, needs verification once node and group
3509 # locks have been acquired
3510 self.needed_locks[locking.LEVEL_INSTANCE] = \
3511 self.cfg.GetNodeGroupInstances(self.group_uuid)
3513 elif level == locking.LEVEL_NODEGROUP:
3514 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3516 self.needed_locks[locking.LEVEL_NODEGROUP] = \
3517 set([self.group_uuid] +
3518 # Lock all groups used by instances optimistically; this requires
3519 # going via the node before it's locked, requiring verification
3522 for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
3523 for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
3525 elif level == locking.LEVEL_NODE:
3526 # This will only lock the nodes in the group to be verified which contain
3528 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3529 self._LockInstancesNodes()
3531 # Lock all nodes in group to be verified
3532 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
3533 member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
3534 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3536 def CheckPrereq(self):
3537 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3538 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3539 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3541 assert self.group_uuid in owned_groups
3543 # Check if locked instances are still correct
3544 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
3546 # Get instance information
3547 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
3549 # Check if node groups for locked instances are still correct
3550 _CheckInstancesNodeGroups(self.cfg, self.instances,
3551 owned_groups, owned_nodes, self.group_uuid)
3553 def Exec(self, feedback_fn):
3554 """Verify integrity of cluster disks.
3556 @rtype: tuple of three items
3557 @return: a tuple of (dict of node-to-node_error, list of instances
3558 which need activate-disks, dict of instance: (node, volume) for
3563 res_instances = set()
3566 nv_dict = _MapInstanceDisksToNodes([inst
3567 for inst in self.instances.values()
3568 if inst.admin_state == constants.ADMINST_UP])
3571 nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
3572 set(self.cfg.GetVmCapableNodeList()))
3574 node_lvs = self.rpc.call_lv_list(nodes, [])
3576 for (node, node_res) in node_lvs.items():
3577 if node_res.offline:
3580 msg = node_res.fail_msg
3582 logging.warning("Error enumerating LVs on node %s: %s", node, msg)
3583 res_nodes[node] = msg
3586 for lv_name, (_, _, lv_online) in node_res.payload.items():
3587 inst = nv_dict.pop((node, lv_name), None)
3588 if not (lv_online or inst is None):
3589 res_instances.add(inst)
3591 # any leftover items in nv_dict are missing LVs, let's arrange the data
3593 for key, inst in nv_dict.iteritems():
3594 res_missing.setdefault(inst, []).append(list(key))
3596 return (res_nodes, list(res_instances), res_missing)
3599 class LUClusterRepairDiskSizes(NoHooksLU):
3600 """Verifies the cluster disks sizes.
3605 def ExpandNames(self):
3606 if self.op.instances:
3607 self.wanted_names = _GetWantedInstances(self, self.op.instances)
3608 self.needed_locks = {
3609 locking.LEVEL_NODE_RES: [],
3610 locking.LEVEL_INSTANCE: self.wanted_names,
3612 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
3614 self.wanted_names = None
3615 self.needed_locks = {
3616 locking.LEVEL_NODE_RES: locking.ALL_SET,
3617 locking.LEVEL_INSTANCE: locking.ALL_SET,
3619 self.share_locks = {
3620 locking.LEVEL_NODE_RES: 1,
3621 locking.LEVEL_INSTANCE: 0,
3624 def DeclareLocks(self, level):
3625 if level == locking.LEVEL_NODE_RES and self.wanted_names is not None:
3626 self._LockInstancesNodes(primary_only=True, level=level)
3628 def CheckPrereq(self):
3629 """Check prerequisites.
3631 This only checks the optional instance list against the existing names.
3634 if self.wanted_names is None:
3635 self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
3637 self.wanted_instances = \
3638 map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
3640 def _EnsureChildSizes(self, disk):
3641 """Ensure children of the disk have the needed disk size.
3643 This is valid mainly for DRBD8 and fixes an issue where the
3644 children have smaller disk size.
3646 @param disk: an L{ganeti.objects.Disk} object
3649 if disk.dev_type == constants.LD_DRBD8:
3650 assert disk.children, "Empty children for DRBD8?"
3651 fchild = disk.children[0]
3652 mismatch = fchild.size < disk.size
3654 self.LogInfo("Child disk has size %d, parent %d, fixing",
3655 fchild.size, disk.size)
3656 fchild.size = disk.size
3658 # and we recurse on this child only, not on the metadev
3659 return self._EnsureChildSizes(fchild) or mismatch
3663 def Exec(self, feedback_fn):
3664 """Verify the size of cluster disks.
3667 # TODO: check child disks too
3668 # TODO: check differences in size between primary/secondary nodes
3670 for instance in self.wanted_instances:
3671 pnode = instance.primary_node
3672 if pnode not in per_node_disks:
3673 per_node_disks[pnode] = []
3674 for idx, disk in enumerate(instance.disks):
3675 per_node_disks[pnode].append((instance, idx, disk))
3677 assert not (frozenset(per_node_disks.keys()) -
3678 self.owned_locks(locking.LEVEL_NODE_RES)), \
3679 "Not owning correct locks"
3680 assert not self.owned_locks(locking.LEVEL_NODE)
3683 for node, dskl in per_node_disks.items():
3684 newl = [v[2].Copy() for v in dskl]
3686 self.cfg.SetDiskID(dsk, node)
3687 result = self.rpc.call_blockdev_getsize(node, newl)
3689 self.LogWarning("Failure in blockdev_getsize call to node"
3690 " %s, ignoring", node)
3692 if len(result.payload) != len(dskl):
3693 logging.warning("Invalid result from node %s: len(dksl)=%d,"
3694 " result.payload=%s", node, len(dskl), result.payload)
3695 self.LogWarning("Invalid result from node %s, ignoring node results",
3698 for ((instance, idx, disk), size) in zip(dskl, result.payload):
3700 self.LogWarning("Disk %d of instance %s did not return size"
3701 " information, ignoring", idx, instance.name)
3703 if not isinstance(size, (int, long)):
3704 self.LogWarning("Disk %d of instance %s did not return valid"
3705 " size information, ignoring", idx, instance.name)
3708 if size != disk.size:
3709 self.LogInfo("Disk %d of instance %s has mismatched size,"
3710 " correcting: recorded %d, actual %d", idx,
3711 instance.name, disk.size, size)
3713 self.cfg.Update(instance, feedback_fn)
3714 changed.append((instance.name, idx, size))
3715 if self._EnsureChildSizes(disk):
3716 self.cfg.Update(instance, feedback_fn)
3717 changed.append((instance.name, idx, disk.size))
3721 class LUClusterRename(LogicalUnit):
3722 """Rename the cluster.
3725 HPATH = "cluster-rename"
3726 HTYPE = constants.HTYPE_CLUSTER
3728 def BuildHooksEnv(self):
3733 "OP_TARGET": self.cfg.GetClusterName(),
3734 "NEW_NAME": self.op.name,
3737 def BuildHooksNodes(self):
3738 """Build hooks nodes.
3741 return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
3743 def CheckPrereq(self):
3744 """Verify that the passed name is a valid one.
3747 hostname = netutils.GetHostname(name=self.op.name,
3748 family=self.cfg.GetPrimaryIPFamily())
3750 new_name = hostname.name
3751 self.ip = new_ip = hostname.ip
3752 old_name = self.cfg.GetClusterName()
3753 old_ip = self.cfg.GetMasterIP()
3754 if new_name == old_name and new_ip == old_ip:
3755 raise errors.OpPrereqError("Neither the name nor the IP address of the"
3756 " cluster has changed",
3758 if new_ip != old_ip:
3759 if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
3760 raise errors.OpPrereqError("The given cluster IP address (%s) is"
3761 " reachable on the network" %
3762 new_ip, errors.ECODE_NOTUNIQUE)
3764 self.op.name = new_name
3766 def Exec(self, feedback_fn):
3767 """Rename the cluster.
3770 clustername = self.op.name
3773 # shutdown the master IP
3774 master_params = self.cfg.GetMasterNetworkParameters()
3775 ems = self.cfg.GetUseExternalMipScript()
3776 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
3778 result.Raise("Could not disable the master role")
3781 cluster = self.cfg.GetClusterInfo()
3782 cluster.cluster_name = clustername
3783 cluster.master_ip = new_ip
3784 self.cfg.Update(cluster, feedback_fn)
3786 # update the known hosts file
3787 ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
3788 node_list = self.cfg.GetOnlineNodeList()
3790 node_list.remove(master_params.name)
3793 _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
3795 master_params.ip = new_ip
3796 result = self.rpc.call_node_activate_master_ip(master_params.name,
3798 msg = result.fail_msg
3800 self.LogWarning("Could not re-enable the master role on"
3801 " the master, please restart manually: %s", msg)
3806 def _ValidateNetmask(cfg, netmask):
3807 """Checks if a netmask is valid.
3809 @type cfg: L{config.ConfigWriter}
3810 @param cfg: The cluster configuration
3812 @param netmask: the netmask to be verified
3813 @raise errors.OpPrereqError: if the validation fails
3816 ip_family = cfg.GetPrimaryIPFamily()
3818 ipcls = netutils.IPAddress.GetClassFromIpFamily(ip_family)
3819 except errors.ProgrammerError:
3820 raise errors.OpPrereqError("Invalid primary ip family: %s." %
3822 if not ipcls.ValidateNetmask(netmask):
3823 raise errors.OpPrereqError("CIDR netmask (%s) not valid" %
3827 class LUClusterSetParams(LogicalUnit):
3828 """Change the parameters of the cluster.
3831 HPATH = "cluster-modify"
3832 HTYPE = constants.HTYPE_CLUSTER
3835 def CheckArguments(self):
3839 if self.op.uid_pool:
3840 uidpool.CheckUidPool(self.op.uid_pool)
3842 if self.op.add_uids:
3843 uidpool.CheckUidPool(self.op.add_uids)
3845 if self.op.remove_uids:
3846 uidpool.CheckUidPool(self.op.remove_uids)
3848 if self.op.master_netmask is not None:
3849 _ValidateNetmask(self.cfg, self.op.master_netmask)
3851 if self.op.diskparams:
3852 for dt_params in self.op.diskparams.values():
3853 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
3855 def ExpandNames(self):
3856 # FIXME: in the future maybe other cluster params won't require checking on
3857 # all nodes to be modified.
3858 self.needed_locks = {
3859 locking.LEVEL_NODE: locking.ALL_SET,
3860 locking.LEVEL_INSTANCE: locking.ALL_SET,
3861 locking.LEVEL_NODEGROUP: locking.ALL_SET,
3863 self.share_locks = {
3864 locking.LEVEL_NODE: 1,
3865 locking.LEVEL_INSTANCE: 1,
3866 locking.LEVEL_NODEGROUP: 1,
3869 def BuildHooksEnv(self):
3874 "OP_TARGET": self.cfg.GetClusterName(),
3875 "NEW_VG_NAME": self.op.vg_name,
3878 def BuildHooksNodes(self):
3879 """Build hooks nodes.
3882 mn = self.cfg.GetMasterNode()
3885 def CheckPrereq(self):
3886 """Check prerequisites.
3888 This checks whether the given params don't conflict and
3889 if the given volume group is valid.
3892 if self.op.vg_name is not None and not self.op.vg_name:
3893 if self.cfg.HasAnyDiskOfType(constants.LD_LV):
3894 raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
3895 " instances exist", errors.ECODE_INVAL)
3897 if self.op.drbd_helper is not None and not self.op.drbd_helper:
3898 if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
3899 raise errors.OpPrereqError("Cannot disable drbd helper while"
3900 " drbd-based instances exist",
3903 node_list = self.owned_locks(locking.LEVEL_NODE)
3905 # if vg_name not None, checks given volume group on all nodes
3907 vglist = self.rpc.call_vg_list(node_list)
3908 for node in node_list:
3909 msg = vglist[node].fail_msg
3911 # ignoring down node
3912 self.LogWarning("Error while gathering data on node %s"
3913 " (ignoring node): %s", node, msg)
3915 vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
3917 constants.MIN_VG_SIZE)
3919 raise errors.OpPrereqError("Error on node '%s': %s" %
3920 (node, vgstatus), errors.ECODE_ENVIRON)
3922 if self.op.drbd_helper:
3923 # checks given drbd helper on all nodes
3924 helpers = self.rpc.call_drbd_helper(node_list)
3925 for (node, ninfo) in self.cfg.GetMultiNodeInfo(node_list):
3927 self.LogInfo("Not checking drbd helper on offline node %s", node)
3929 msg = helpers[node].fail_msg
3931 raise errors.OpPrereqError("Error checking drbd helper on node"
3932 " '%s': %s" % (node, msg),
3933 errors.ECODE_ENVIRON)
3934 node_helper = helpers[node].payload
3935 if node_helper != self.op.drbd_helper:
3936 raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
3937 (node, node_helper), errors.ECODE_ENVIRON)
3939 self.cluster = cluster = self.cfg.GetClusterInfo()
3940 # validate params changes
3941 if self.op.beparams:
3942 objects.UpgradeBeParams(self.op.beparams)
3943 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
3944 self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
3946 if self.op.ndparams:
3947 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
3948 self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
3950 # TODO: we need a more general way to handle resetting
3951 # cluster-level parameters to default values
3952 if self.new_ndparams["oob_program"] == "":
3953 self.new_ndparams["oob_program"] = \
3954 constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
3956 if self.op.hv_state:
3957 new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
3958 self.cluster.hv_state_static)
3959 self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values))
3960 for hv, values in new_hv_state.items())
3962 if self.op.disk_state:
3963 new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state,
3964 self.cluster.disk_state_static)
3965 self.new_disk_state = \
3966 dict((storage, dict((name, cluster.SimpleFillDiskState(values))
3967 for name, values in svalues.items()))
3968 for storage, svalues in new_disk_state.items())
3971 self.new_ipolicy = _GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
3974 all_instances = self.cfg.GetAllInstancesInfo().values()
3976 for group in self.cfg.GetAllNodeGroupsInfo().values():
3977 instances = frozenset([inst for inst in all_instances
3978 if compat.any(node in group.members
3979 for node in inst.all_nodes)])
3980 new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
3981 new = _ComputeNewInstanceViolations(_CalculateGroupIPolicy(cluster,
3983 new_ipolicy, instances)
3985 violations.update(new)
3988 self.LogWarning("After the ipolicy change the following instances"
3989 " violate them: %s",
3990 utils.CommaJoin(violations))
3992 if self.op.nicparams:
3993 utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
3994 self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
3995 objects.NIC.CheckParameterSyntax(self.new_nicparams)
3998 # check all instances for consistency
3999 for instance in self.cfg.GetAllInstancesInfo().values():
4000 for nic_idx, nic in enumerate(instance.nics):
4001 params_copy = copy.deepcopy(nic.nicparams)
4002 params_filled = objects.FillDict(self.new_nicparams, params_copy)
4004 # check parameter syntax
4006 objects.NIC.CheckParameterSyntax(params_filled)
4007 except errors.ConfigurationError, err:
4008 nic_errors.append("Instance %s, nic/%d: %s" %
4009 (instance.name, nic_idx, err))
4011 # if we're moving instances to routed, check that they have an ip
4012 target_mode = params_filled[constants.NIC_MODE]
4013 if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
4014 nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
4015 " address" % (instance.name, nic_idx))
4017 raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
4018 "\n".join(nic_errors))
4020 # hypervisor list/parameters
4021 self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
4022 if self.op.hvparams:
4023 for hv_name, hv_dict in self.op.hvparams.items():
4024 if hv_name not in self.new_hvparams:
4025 self.new_hvparams[hv_name] = hv_dict
4027 self.new_hvparams[hv_name].update(hv_dict)
4029 # disk template parameters
4030 self.new_diskparams = objects.FillDict(cluster.diskparams, {})
4031 if self.op.diskparams:
4032 for dt_name, dt_params in self.op.diskparams.items():
4033 if dt_name not in self.op.diskparams:
4034 self.new_diskparams[dt_name] = dt_params
4036 self.new_diskparams[dt_name].update(dt_params)
4038 # os hypervisor parameters
4039 self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
4041 for os_name, hvs in self.op.os_hvp.items():
4042 if os_name not in self.new_os_hvp:
4043 self.new_os_hvp[os_name] = hvs
4045 for hv_name, hv_dict in hvs.items():
4046 if hv_name not in self.new_os_hvp[os_name]:
4047 self.new_os_hvp[os_name][hv_name] = hv_dict
4049 self.new_os_hvp[os_name][hv_name].update(hv_dict)
4052 self.new_osp = objects.FillDict(cluster.osparams, {})
4053 if self.op.osparams:
4054 for os_name, osp in self.op.osparams.items():
4055 if os_name not in self.new_osp:
4056 self.new_osp[os_name] = {}
4058 self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
4061 if not self.new_osp[os_name]:
4062 # we removed all parameters
4063 del self.new_osp[os_name]
4065 # check the parameter validity (remote check)
4066 _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
4067 os_name, self.new_osp[os_name])
4069 # changes to the hypervisor list
4070 if self.op.enabled_hypervisors is not None:
4071 self.hv_list = self.op.enabled_hypervisors
4072 for hv in self.hv_list:
4073 # if the hypervisor doesn't already exist in the cluster
4074 # hvparams, we initialize it to empty, and then (in both
4075 # cases) we make sure to fill the defaults, as we might not
4076 # have a complete defaults list if the hypervisor wasn't
4078 if hv not in new_hvp:
4080 new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
4081 utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
4083 self.hv_list = cluster.enabled_hypervisors
4085 if self.op.hvparams or self.op.enabled_hypervisors is not None:
4086 # either the enabled list has changed, or the parameters have, validate
4087 for hv_name, hv_params in self.new_hvparams.items():
4088 if ((self.op.hvparams and hv_name in self.op.hvparams) or
4089 (self.op.enabled_hypervisors and
4090 hv_name in self.op.enabled_hypervisors)):
4091 # either this is a new hypervisor, or its parameters have changed
4092 hv_class = hypervisor.GetHypervisor(hv_name)
4093 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
4094 hv_class.CheckParameterSyntax(hv_params)
4095 _CheckHVParams(self, node_list, hv_name, hv_params)
4098 # no need to check any newly-enabled hypervisors, since the
4099 # defaults have already been checked in the above code-block
4100 for os_name, os_hvp in self.new_os_hvp.items():
4101 for hv_name, hv_params in os_hvp.items():
4102 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
4103 # we need to fill in the new os_hvp on top of the actual hv_p
4104 cluster_defaults = self.new_hvparams.get(hv_name, {})
4105 new_osp = objects.FillDict(cluster_defaults, hv_params)
4106 hv_class = hypervisor.GetHypervisor(hv_name)
4107 hv_class.CheckParameterSyntax(new_osp)
4108 _CheckHVParams(self, node_list, hv_name, new_osp)
4110 if self.op.default_iallocator:
4111 alloc_script = utils.FindFile(self.op.default_iallocator,
4112 constants.IALLOCATOR_SEARCH_PATH,
4114 if alloc_script is None:
4115 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
4116 " specified" % self.op.default_iallocator,
4119 def Exec(self, feedback_fn):
4120 """Change the parameters of the cluster.
4123 if self.op.vg_name is not None:
4124 new_volume = self.op.vg_name
4127 if new_volume != self.cfg.GetVGName():
4128 self.cfg.SetVGName(new_volume)
4130 feedback_fn("Cluster LVM configuration already in desired"
4131 " state, not changing")
4132 if self.op.drbd_helper is not None:
4133 new_helper = self.op.drbd_helper
4136 if new_helper != self.cfg.GetDRBDHelper():
4137 self.cfg.SetDRBDHelper(new_helper)
4139 feedback_fn("Cluster DRBD helper already in desired state,"
4141 if self.op.hvparams:
4142 self.cluster.hvparams = self.new_hvparams
4144 self.cluster.os_hvp = self.new_os_hvp
4145 if self.op.enabled_hypervisors is not None:
4146 self.cluster.hvparams = self.new_hvparams
4147 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
4148 if self.op.beparams:
4149 self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
4150 if self.op.nicparams:
4151 self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
4153 self.cluster.ipolicy = self.new_ipolicy
4154 if self.op.osparams:
4155 self.cluster.osparams = self.new_osp
4156 if self.op.ndparams:
4157 self.cluster.ndparams = self.new_ndparams
4158 if self.op.diskparams:
4159 self.cluster.diskparams = self.new_diskparams
4160 if self.op.hv_state:
4161 self.cluster.hv_state_static = self.new_hv_state
4162 if self.op.disk_state:
4163 self.cluster.disk_state_static = self.new_disk_state
4165 if self.op.candidate_pool_size is not None:
4166 self.cluster.candidate_pool_size = self.op.candidate_pool_size
4167 # we need to update the pool size here, otherwise the save will fail
4168 _AdjustCandidatePool(self, [])
4170 if self.op.maintain_node_health is not None:
4171 if self.op.maintain_node_health and not constants.ENABLE_CONFD:
4172 feedback_fn("Note: CONFD was disabled at build time, node health"
4173 " maintenance is not useful (still enabling it)")
4174 self.cluster.maintain_node_health = self.op.maintain_node_health
4176 if self.op.prealloc_wipe_disks is not None:
4177 self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
4179 if self.op.add_uids is not None:
4180 uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
4182 if self.op.remove_uids is not None:
4183 uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
4185 if self.op.uid_pool is not None:
4186 self.cluster.uid_pool = self.op.uid_pool
4188 if self.op.default_iallocator is not None:
4189 self.cluster.default_iallocator = self.op.default_iallocator
4191 if self.op.reserved_lvs is not None:
4192 self.cluster.reserved_lvs = self.op.reserved_lvs
4194 if self.op.use_external_mip_script is not None:
4195 self.cluster.use_external_mip_script = self.op.use_external_mip_script
4197 def helper_os(aname, mods, desc):
4199 lst = getattr(self.cluster, aname)
4200 for key, val in mods:
4201 if key == constants.DDM_ADD:
4203 feedback_fn("OS %s already in %s, ignoring" % (val, desc))
4206 elif key == constants.DDM_REMOVE:
4210 feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
4212 raise errors.ProgrammerError("Invalid modification '%s'" % key)
4214 if self.op.hidden_os:
4215 helper_os("hidden_os", self.op.hidden_os, "hidden")
4217 if self.op.blacklisted_os:
4218 helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
4220 if self.op.master_netdev:
4221 master_params = self.cfg.GetMasterNetworkParameters()
4222 ems = self.cfg.GetUseExternalMipScript()
4223 feedback_fn("Shutting down master ip on the current netdev (%s)" %
4224 self.cluster.master_netdev)
4225 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
4227 result.Raise("Could not disable the master ip")
4228 feedback_fn("Changing master_netdev from %s to %s" %
4229 (master_params.netdev, self.op.master_netdev))
4230 self.cluster.master_netdev = self.op.master_netdev
4232 if self.op.master_netmask:
4233 master_params = self.cfg.GetMasterNetworkParameters()
4234 feedback_fn("Changing master IP netmask to %s" % self.op.master_netmask)
4235 result = self.rpc.call_node_change_master_netmask(master_params.name,
4236 master_params.netmask,
4237 self.op.master_netmask,
4239 master_params.netdev)
4241 msg = "Could not change the master IP netmask: %s" % result.fail_msg
4244 self.cluster.master_netmask = self.op.master_netmask
4246 self.cfg.Update(self.cluster, feedback_fn)
4248 if self.op.master_netdev:
4249 master_params = self.cfg.GetMasterNetworkParameters()
4250 feedback_fn("Starting the master ip on the new master netdev (%s)" %
4251 self.op.master_netdev)
4252 ems = self.cfg.GetUseExternalMipScript()
4253 result = self.rpc.call_node_activate_master_ip(master_params.name,
4256 self.LogWarning("Could not re-enable the master ip on"
4257 " the master, please restart manually: %s",
4261 def _UploadHelper(lu, nodes, fname):
4262 """Helper for uploading a file and showing warnings.
4265 if os.path.exists(fname):
4266 result = lu.rpc.call_upload_file(nodes, fname)
4267 for to_node, to_result in result.items():
4268 msg = to_result.fail_msg
4270 msg = ("Copy of file %s to node %s failed: %s" %
4271 (fname, to_node, msg))
4272 lu.proc.LogWarning(msg)
4275 def _ComputeAncillaryFiles(cluster, redist):
4276 """Compute files external to Ganeti which need to be consistent.
4278 @type redist: boolean
4279 @param redist: Whether to include files which need to be redistributed
4282 # Compute files for all nodes
4284 constants.SSH_KNOWN_HOSTS_FILE,
4285 constants.CONFD_HMAC_KEY,
4286 constants.CLUSTER_DOMAIN_SECRET_FILE,
4287 constants.SPICE_CERT_FILE,
4288 constants.SPICE_CACERT_FILE,
4289 constants.RAPI_USERS_FILE,
4293 files_all.update(constants.ALL_CERT_FILES)
4294 files_all.update(ssconf.SimpleStore().GetFileList())
4296 # we need to ship at least the RAPI certificate
4297 files_all.add(constants.RAPI_CERT_FILE)
4299 if cluster.modify_etc_hosts:
4300 files_all.add(constants.ETC_HOSTS)
4302 # Files which are optional, these must:
4303 # - be present in one other category as well
4304 # - either exist or not exist on all nodes of that category (mc, vm all)
4306 constants.RAPI_USERS_FILE,
4309 # Files which should only be on master candidates
4313 files_mc.add(constants.CLUSTER_CONF_FILE)
4315 # FIXME: this should also be replicated but Ganeti doesn't support files_mc
4317 files_mc.add(constants.DEFAULT_MASTER_SETUP_SCRIPT)
4319 # Files which should only be on VM-capable nodes
4320 files_vm = set(filename
4321 for hv_name in cluster.enabled_hypervisors
4322 for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[0])
4324 files_opt |= set(filename
4325 for hv_name in cluster.enabled_hypervisors
4326 for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[1])
4328 # Filenames in each category must be unique
4329 all_files_set = files_all | files_mc | files_vm
4330 assert (len(all_files_set) ==
4331 sum(map(len, [files_all, files_mc, files_vm]))), \
4332 "Found file listed in more than one file list"
4334 # Optional files must be present in one other category
4335 assert all_files_set.issuperset(files_opt), \
4336 "Optional file not in a different required list"
4338 return (files_all, files_opt, files_mc, files_vm)
4341 def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
4342 """Distribute additional files which are part of the cluster configuration.
4344 ConfigWriter takes care of distributing the config and ssconf files, but
4345 there are more files which should be distributed to all nodes. This function
4346 makes sure those are copied.
4348 @param lu: calling logical unit
4349 @param additional_nodes: list of nodes not in the config to distribute to
4350 @type additional_vm: boolean
4351 @param additional_vm: whether the additional nodes are vm-capable or not
4354 # Gather target nodes
4355 cluster = lu.cfg.GetClusterInfo()
4356 master_info = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
4358 online_nodes = lu.cfg.GetOnlineNodeList()
4359 vm_nodes = lu.cfg.GetVmCapableNodeList()
4361 if additional_nodes is not None:
4362 online_nodes.extend(additional_nodes)
4364 vm_nodes.extend(additional_nodes)
4366 # Never distribute to master node
4367 for nodelist in [online_nodes, vm_nodes]:
4368 if master_info.name in nodelist:
4369 nodelist.remove(master_info.name)
4372 (files_all, _, files_mc, files_vm) = \
4373 _ComputeAncillaryFiles(cluster, True)
4375 # Never re-distribute configuration file from here
4376 assert not (constants.CLUSTER_CONF_FILE in files_all or
4377 constants.CLUSTER_CONF_FILE in files_vm)
4378 assert not files_mc, "Master candidates not handled in this function"
4381 (online_nodes, files_all),
4382 (vm_nodes, files_vm),
4386 for (node_list, files) in filemap:
4388 _UploadHelper(lu, node_list, fname)
4391 class LUClusterRedistConf(NoHooksLU):
4392 """Force the redistribution of cluster configuration.
4394 This is a very simple LU.
4399 def ExpandNames(self):
4400 self.needed_locks = {
4401 locking.LEVEL_NODE: locking.ALL_SET,
4403 self.share_locks[locking.LEVEL_NODE] = 1
4405 def Exec(self, feedback_fn):
4406 """Redistribute the configuration.
4409 self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
4410 _RedistributeAncillaryFiles(self)
4413 class LUClusterActivateMasterIp(NoHooksLU):
4414 """Activate the master IP on the master node.
4417 def Exec(self, feedback_fn):
4418 """Activate the master IP.
4421 master_params = self.cfg.GetMasterNetworkParameters()
4422 ems = self.cfg.GetUseExternalMipScript()
4423 result = self.rpc.call_node_activate_master_ip(master_params.name,
4425 result.Raise("Could not activate the master IP")
4428 class LUClusterDeactivateMasterIp(NoHooksLU):
4429 """Deactivate the master IP on the master node.
4432 def Exec(self, feedback_fn):
4433 """Deactivate the master IP.
4436 master_params = self.cfg.GetMasterNetworkParameters()
4437 ems = self.cfg.GetUseExternalMipScript()
4438 result = self.rpc.call_node_deactivate_master_ip(master_params.name,
4440 result.Raise("Could not deactivate the master IP")
4443 def _WaitForSync(lu, instance, disks=None, oneshot=False):
4444 """Sleep and poll for an instance's disk to sync.
4447 if not instance.disks or disks is not None and not disks:
4450 disks = _ExpandCheckDisks(instance, disks)
4453 lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
4455 node = instance.primary_node
4458 lu.cfg.SetDiskID(dev, node)
4460 # TODO: Convert to utils.Retry
4463 degr_retries = 10 # in seconds, as we sleep 1 second each time
4467 cumul_degraded = False
4468 rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
4469 msg = rstats.fail_msg
4471 lu.LogWarning("Can't get any data from node %s: %s", node, msg)
4474 raise errors.RemoteError("Can't contact node %s for mirror data,"
4475 " aborting." % node)
4478 rstats = rstats.payload
4480 for i, mstat in enumerate(rstats):
4482 lu.LogWarning("Can't compute data for node %s/%s",
4483 node, disks[i].iv_name)
4486 cumul_degraded = (cumul_degraded or
4487 (mstat.is_degraded and mstat.sync_percent is None))
4488 if mstat.sync_percent is not None:
4490 if mstat.estimated_time is not None:
4491 rem_time = ("%s remaining (estimated)" %
4492 utils.FormatSeconds(mstat.estimated_time))
4493 max_time = mstat.estimated_time
4495 rem_time = "no time estimate"
4496 lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
4497 (disks[i].iv_name, mstat.sync_percent, rem_time))
4499 # if we're done but degraded, let's do a few small retries, to
4500 # make sure we see a stable and not transient situation; therefore
4501 # we force restart of the loop
4502 if (done or oneshot) and cumul_degraded and degr_retries > 0:
4503 logging.info("Degraded disks found, %d retries left", degr_retries)
4511 time.sleep(min(60, max_time))
4514 lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
4515 return not cumul_degraded
4518 def _CheckDiskConsistency(lu, instance, dev, node, on_primary, ldisk=False):
4519 """Check that mirrors are not degraded.
4521 The ldisk parameter, if True, will change the test from the
4522 is_degraded attribute (which represents overall non-ok status for
4523 the device(s)) to the ldisk (representing the local storage status).
4526 lu.cfg.SetDiskID(dev, node)
4530 if on_primary or dev.AssembleOnSecondary():
4531 rstats = lu.rpc.call_blockdev_find(node, dev)
4532 msg = rstats.fail_msg
4534 lu.LogWarning("Can't find disk on node %s: %s", node, msg)
4536 elif not rstats.payload:
4537 lu.LogWarning("Can't find disk on node %s", node)
4541 result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
4543 result = result and not rstats.payload.is_degraded
4546 for child in dev.children:
4547 result = result and _CheckDiskConsistency(lu, instance, child, node,
4553 class LUOobCommand(NoHooksLU):
4554 """Logical unit for OOB handling.
4558 _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
4560 def ExpandNames(self):
4561 """Gather locks we need.
4564 if self.op.node_names:
4565 self.op.node_names = _GetWantedNodes(self, self.op.node_names)
4566 lock_names = self.op.node_names
4568 lock_names = locking.ALL_SET
4570 self.needed_locks = {
4571 locking.LEVEL_NODE: lock_names,
4574 def CheckPrereq(self):
4575 """Check prerequisites.
4578 - the node exists in the configuration
4581 Any errors are signaled by raising errors.OpPrereqError.
4585 self.master_node = self.cfg.GetMasterNode()
4587 assert self.op.power_delay >= 0.0
4589 if self.op.node_names:
4590 if (self.op.command in self._SKIP_MASTER and
4591 self.master_node in self.op.node_names):
4592 master_node_obj = self.cfg.GetNodeInfo(self.master_node)
4593 master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
4595 if master_oob_handler:
4596 additional_text = ("run '%s %s %s' if you want to operate on the"
4597 " master regardless") % (master_oob_handler,
4601 additional_text = "it does not support out-of-band operations"
4603 raise errors.OpPrereqError(("Operating on the master node %s is not"
4604 " allowed for %s; %s") %
4605 (self.master_node, self.op.command,
4606 additional_text), errors.ECODE_INVAL)
4608 self.op.node_names = self.cfg.GetNodeList()
4609 if self.op.command in self._SKIP_MASTER:
4610 self.op.node_names.remove(self.master_node)
4612 if self.op.command in self._SKIP_MASTER:
4613 assert self.master_node not in self.op.node_names
4615 for (node_name, node) in self.cfg.GetMultiNodeInfo(self.op.node_names):
4617 raise errors.OpPrereqError("Node %s not found" % node_name,
4620 self.nodes.append(node)
4622 if (not self.op.ignore_status and
4623 (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
4624 raise errors.OpPrereqError(("Cannot power off node %s because it is"
4625 " not marked offline") % node_name,
4628 def Exec(self, feedback_fn):
4629 """Execute OOB and return result if we expect any.
4632 master_node = self.master_node
4635 for idx, node in enumerate(utils.NiceSort(self.nodes,
4636 key=lambda node: node.name)):
4637 node_entry = [(constants.RS_NORMAL, node.name)]
4638 ret.append(node_entry)
4640 oob_program = _SupportsOob(self.cfg, node)
4643 node_entry.append((constants.RS_UNAVAIL, None))
4646 logging.info("Executing out-of-band command '%s' using '%s' on %s",
4647 self.op.command, oob_program, node.name)
4648 result = self.rpc.call_run_oob(master_node, oob_program,
4649 self.op.command, node.name,
4653 self.LogWarning("Out-of-band RPC failed on node '%s': %s",
4654 node.name, result.fail_msg)
4655 node_entry.append((constants.RS_NODATA, None))
4658 self._CheckPayload(result)
4659 except errors.OpExecError, err:
4660 self.LogWarning("Payload returned by node '%s' is not valid: %s",
4662 node_entry.append((constants.RS_NODATA, None))
4664 if self.op.command == constants.OOB_HEALTH:
4665 # For health we should log important events
4666 for item, status in result.payload:
4667 if status in [constants.OOB_STATUS_WARNING,
4668 constants.OOB_STATUS_CRITICAL]:
4669 self.LogWarning("Item '%s' on node '%s' has status '%s'",
4670 item, node.name, status)
4672 if self.op.command == constants.OOB_POWER_ON:
4674 elif self.op.command == constants.OOB_POWER_OFF:
4675 node.powered = False
4676 elif self.op.command == constants.OOB_POWER_STATUS:
4677 powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
4678 if powered != node.powered:
4679 logging.warning(("Recorded power state (%s) of node '%s' does not"
4680 " match actual power state (%s)"), node.powered,
4683 # For configuration changing commands we should update the node
4684 if self.op.command in (constants.OOB_POWER_ON,
4685 constants.OOB_POWER_OFF):
4686 self.cfg.Update(node, feedback_fn)
4688 node_entry.append((constants.RS_NORMAL, result.payload))
4690 if (self.op.command == constants.OOB_POWER_ON and
4691 idx < len(self.nodes) - 1):
4692 time.sleep(self.op.power_delay)
4696 def _CheckPayload(self, result):
4697 """Checks if the payload is valid.
4699 @param result: RPC result
4700 @raises errors.OpExecError: If payload is not valid
4704 if self.op.command == constants.OOB_HEALTH:
4705 if not isinstance(result.payload, list):
4706 errs.append("command 'health' is expected to return a list but got %s" %
4707 type(result.payload))
4709 for item, status in result.payload:
4710 if status not in constants.OOB_STATUSES:
4711 errs.append("health item '%s' has invalid status '%s'" %
4714 if self.op.command == constants.OOB_POWER_STATUS:
4715 if not isinstance(result.payload, dict):
4716 errs.append("power-status is expected to return a dict but got %s" %
4717 type(result.payload))
4719 if self.op.command in [
4720 constants.OOB_POWER_ON,
4721 constants.OOB_POWER_OFF,
4722 constants.OOB_POWER_CYCLE,
4724 if result.payload is not None:
4725 errs.append("%s is expected to not return payload but got '%s'" %
4726 (self.op.command, result.payload))
4729 raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
4730 utils.CommaJoin(errs))
4733 class _OsQuery(_QueryBase):
4734 FIELDS = query.OS_FIELDS
4736 def ExpandNames(self, lu):
4737 # Lock all nodes in shared mode
4738 # Temporary removal of locks, should be reverted later
4739 # TODO: reintroduce locks when they are lighter-weight
4740 lu.needed_locks = {}
4741 #self.share_locks[locking.LEVEL_NODE] = 1
4742 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4744 # The following variables interact with _QueryBase._GetNames
4746 self.wanted = self.names
4748 self.wanted = locking.ALL_SET
4750 self.do_locking = self.use_locking
4752 def DeclareLocks(self, lu, level):
4756 def _DiagnoseByOS(rlist):
4757 """Remaps a per-node return list into an a per-os per-node dictionary
4759 @param rlist: a map with node names as keys and OS objects as values
4762 @return: a dictionary with osnames as keys and as value another
4763 map, with nodes as keys and tuples of (path, status, diagnose,
4764 variants, parameters, api_versions) as values, eg::
4766 {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
4767 (/srv/..., False, "invalid api")],
4768 "node2": [(/srv/..., True, "", [], [])]}
4773 # we build here the list of nodes that didn't fail the RPC (at RPC
4774 # level), so that nodes with a non-responding node daemon don't
4775 # make all OSes invalid
4776 good_nodes = [node_name for node_name in rlist
4777 if not rlist[node_name].fail_msg]
4778 for node_name, nr in rlist.items():
4779 if nr.fail_msg or not nr.payload:
4781 for (name, path, status, diagnose, variants,
4782 params, api_versions) in nr.payload:
4783 if name not in all_os:
4784 # build a list of nodes for this os containing empty lists
4785 # for each node in node_list
4787 for nname in good_nodes:
4788 all_os[name][nname] = []
4789 # convert params from [name, help] to (name, help)
4790 params = [tuple(v) for v in params]
4791 all_os[name][node_name].append((path, status, diagnose,
4792 variants, params, api_versions))
4795 def _GetQueryData(self, lu):
4796 """Computes the list of nodes and their attributes.
4799 # Locking is not used
4800 assert not (compat.any(lu.glm.is_owned(level)
4801 for level in locking.LEVELS
4802 if level != locking.LEVEL_CLUSTER) or
4803 self.do_locking or self.use_locking)
4805 valid_nodes = [node.name
4806 for node in lu.cfg.GetAllNodesInfo().values()
4807 if not node.offline and node.vm_capable]
4808 pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes))
4809 cluster = lu.cfg.GetClusterInfo()
4813 for (os_name, os_data) in pol.items():
4814 info = query.OsInfo(name=os_name, valid=True, node_status=os_data,
4815 hidden=(os_name in cluster.hidden_os),
4816 blacklisted=(os_name in cluster.blacklisted_os))
4820 api_versions = set()
4822 for idx, osl in enumerate(os_data.values()):
4823 info.valid = bool(info.valid and osl and osl[0][1])
4827 (node_variants, node_params, node_api) = osl[0][3:6]
4830 variants.update(node_variants)
4831 parameters.update(node_params)
4832 api_versions.update(node_api)
4834 # Filter out inconsistent values
4835 variants.intersection_update(node_variants)
4836 parameters.intersection_update(node_params)
4837 api_versions.intersection_update(node_api)
4839 info.variants = list(variants)
4840 info.parameters = list(parameters)
4841 info.api_versions = list(api_versions)
4843 data[os_name] = info
4845 # Prepare data in requested order
4846 return [data[name] for name in self._GetNames(lu, pol.keys(), None)
4850 class LUOsDiagnose(NoHooksLU):
4851 """Logical unit for OS diagnose/query.
4857 def _BuildFilter(fields, names):
4858 """Builds a filter for querying OSes.
4861 name_filter = qlang.MakeSimpleFilter("name", names)
4863 # Legacy behaviour: Hide hidden, blacklisted or invalid OSes if the
4864 # respective field is not requested
4865 status_filter = [[qlang.OP_NOT, [qlang.OP_TRUE, fname]]
4866 for fname in ["hidden", "blacklisted"]
4867 if fname not in fields]
4868 if "valid" not in fields:
4869 status_filter.append([qlang.OP_TRUE, "valid"])
4872 status_filter.insert(0, qlang.OP_AND)
4874 status_filter = None
4876 if name_filter and status_filter:
4877 return [qlang.OP_AND, name_filter, status_filter]
4881 return status_filter
4883 def CheckArguments(self):
4884 self.oq = _OsQuery(self._BuildFilter(self.op.output_fields, self.op.names),
4885 self.op.output_fields, False)
4887 def ExpandNames(self):
4888 self.oq.ExpandNames(self)
4890 def Exec(self, feedback_fn):
4891 return self.oq.OldStyleQuery(self)
4894 class LUNodeRemove(LogicalUnit):
4895 """Logical unit for removing a node.
4898 HPATH = "node-remove"
4899 HTYPE = constants.HTYPE_NODE
4901 def BuildHooksEnv(self):
4906 "OP_TARGET": self.op.node_name,
4907 "NODE_NAME": self.op.node_name,
4910 def BuildHooksNodes(self):
4911 """Build hooks nodes.
4913 This doesn't run on the target node in the pre phase as a failed
4914 node would then be impossible to remove.
4917 all_nodes = self.cfg.GetNodeList()
4919 all_nodes.remove(self.op.node_name)
4922 return (all_nodes, all_nodes)
4924 def CheckPrereq(self):
4925 """Check prerequisites.
4928 - the node exists in the configuration
4929 - it does not have primary or secondary instances
4930 - it's not the master
4932 Any errors are signaled by raising errors.OpPrereqError.
4935 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4936 node = self.cfg.GetNodeInfo(self.op.node_name)
4937 assert node is not None
4939 masternode = self.cfg.GetMasterNode()
4940 if node.name == masternode:
4941 raise errors.OpPrereqError("Node is the master node, failover to another"
4942 " node is required", errors.ECODE_INVAL)
4944 for instance_name, instance in self.cfg.GetAllInstancesInfo().items():
4945 if node.name in instance.all_nodes:
4946 raise errors.OpPrereqError("Instance %s is still running on the node,"
4947 " please remove first" % instance_name,
4949 self.op.node_name = node.name
4952 def Exec(self, feedback_fn):
4953 """Removes the node from the cluster.
4957 logging.info("Stopping the node daemon and removing configs from node %s",
4960 modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
4962 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
4965 # Promote nodes to master candidate as needed
4966 _AdjustCandidatePool(self, exceptions=[node.name])
4967 self.context.RemoveNode(node.name)
4969 # Run post hooks on the node before it's removed
4970 _RunPostHook(self, node.name)
4972 result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
4973 msg = result.fail_msg
4975 self.LogWarning("Errors encountered on the remote node while leaving"
4976 " the cluster: %s", msg)
4978 # Remove node from our /etc/hosts
4979 if self.cfg.GetClusterInfo().modify_etc_hosts:
4980 master_node = self.cfg.GetMasterNode()
4981 result = self.rpc.call_etc_hosts_modify(master_node,
4982 constants.ETC_HOSTS_REMOVE,
4984 result.Raise("Can't update hosts file with new host data")
4985 _RedistributeAncillaryFiles(self)
4988 class _NodeQuery(_QueryBase):
4989 FIELDS = query.NODE_FIELDS
4991 def ExpandNames(self, lu):
4992 lu.needed_locks = {}
4993 lu.share_locks = _ShareAll()
4996 self.wanted = _GetWantedNodes(lu, self.names)
4998 self.wanted = locking.ALL_SET
5000 self.do_locking = (self.use_locking and
5001 query.NQ_LIVE in self.requested_data)
5004 # If any non-static field is requested we need to lock the nodes
5005 lu.needed_locks[locking.LEVEL_NODE] = self.wanted
5007 def DeclareLocks(self, lu, level):
5010 def _GetQueryData(self, lu):
5011 """Computes the list of nodes and their attributes.
5014 all_info = lu.cfg.GetAllNodesInfo()
5016 nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
5018 # Gather data as requested
5019 if query.NQ_LIVE in self.requested_data:
5020 # filter out non-vm_capable nodes
5021 toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
5023 node_data = lu.rpc.call_node_info(toquery_nodes, [lu.cfg.GetVGName()],
5024 [lu.cfg.GetHypervisorType()])
5025 live_data = dict((name, _MakeLegacyNodeInfo(nresult.payload))
5026 for (name, nresult) in node_data.items()
5027 if not nresult.fail_msg and nresult.payload)
5031 if query.NQ_INST in self.requested_data:
5032 node_to_primary = dict([(name, set()) for name in nodenames])
5033 node_to_secondary = dict([(name, set()) for name in nodenames])
5035 inst_data = lu.cfg.GetAllInstancesInfo()
5037 for inst in inst_data.values():
5038 if inst.primary_node in node_to_primary:
5039 node_to_primary[inst.primary_node].add(inst.name)
5040 for secnode in inst.secondary_nodes:
5041 if secnode in node_to_secondary:
5042 node_to_secondary[secnode].add(inst.name)
5044 node_to_primary = None
5045 node_to_secondary = None
5047 if query.NQ_OOB in self.requested_data:
5048 oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
5049 for name, node in all_info.iteritems())
5053 if query.NQ_GROUP in self.requested_data:
5054 groups = lu.cfg.GetAllNodeGroupsInfo()
5058 return query.NodeQueryData([all_info[name] for name in nodenames],
5059 live_data, lu.cfg.GetMasterNode(),
5060 node_to_primary, node_to_secondary, groups,
5061 oob_support, lu.cfg.GetClusterInfo())
5064 class LUNodeQuery(NoHooksLU):
5065 """Logical unit for querying nodes.
5068 # pylint: disable=W0142
5071 def CheckArguments(self):
5072 self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
5073 self.op.output_fields, self.op.use_locking)
5075 def ExpandNames(self):
5076 self.nq.ExpandNames(self)
5078 def DeclareLocks(self, level):
5079 self.nq.DeclareLocks(self, level)
5081 def Exec(self, feedback_fn):
5082 return self.nq.OldStyleQuery(self)
5085 class LUNodeQueryvols(NoHooksLU):
5086 """Logical unit for getting volumes on node(s).
5090 _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
5091 _FIELDS_STATIC = utils.FieldSet("node")
5093 def CheckArguments(self):
5094 _CheckOutputFields(static=self._FIELDS_STATIC,
5095 dynamic=self._FIELDS_DYNAMIC,
5096 selected=self.op.output_fields)
5098 def ExpandNames(self):
5099 self.share_locks = _ShareAll()
5100 self.needed_locks = {}
5102 if not self.op.nodes:
5103 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5105 self.needed_locks[locking.LEVEL_NODE] = \
5106 _GetWantedNodes(self, self.op.nodes)
5108 def Exec(self, feedback_fn):
5109 """Computes the list of nodes and their attributes.
5112 nodenames = self.owned_locks(locking.LEVEL_NODE)
5113 volumes = self.rpc.call_node_volumes(nodenames)
5115 ilist = self.cfg.GetAllInstancesInfo()
5116 vol2inst = _MapInstanceDisksToNodes(ilist.values())
5119 for node in nodenames:
5120 nresult = volumes[node]
5123 msg = nresult.fail_msg
5125 self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
5128 node_vols = sorted(nresult.payload,
5129 key=operator.itemgetter("dev"))
5131 for vol in node_vols:
5133 for field in self.op.output_fields:
5136 elif field == "phys":
5140 elif field == "name":
5142 elif field == "size":
5143 val = int(float(vol["size"]))
5144 elif field == "instance":
5145 val = vol2inst.get((node, vol["vg"] + "/" + vol["name"]), "-")
5147 raise errors.ParameterError(field)
5148 node_output.append(str(val))
5150 output.append(node_output)
5155 class LUNodeQueryStorage(NoHooksLU):
5156 """Logical unit for getting information on storage units on node(s).
5159 _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
5162 def CheckArguments(self):
5163 _CheckOutputFields(static=self._FIELDS_STATIC,
5164 dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
5165 selected=self.op.output_fields)
5167 def ExpandNames(self):
5168 self.share_locks = _ShareAll()
5169 self.needed_locks = {}
5172 self.needed_locks[locking.LEVEL_NODE] = \
5173 _GetWantedNodes(self, self.op.nodes)
5175 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5177 def Exec(self, feedback_fn):
5178 """Computes the list of nodes and their attributes.
5181 self.nodes = self.owned_locks(locking.LEVEL_NODE)
5183 # Always get name to sort by
5184 if constants.SF_NAME in self.op.output_fields:
5185 fields = self.op.output_fields[:]
5187 fields = [constants.SF_NAME] + self.op.output_fields
5189 # Never ask for node or type as it's only known to the LU
5190 for extra in [constants.SF_NODE, constants.SF_TYPE]:
5191 while extra in fields:
5192 fields.remove(extra)
5194 field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
5195 name_idx = field_idx[constants.SF_NAME]
5197 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
5198 data = self.rpc.call_storage_list(self.nodes,
5199 self.op.storage_type, st_args,
5200 self.op.name, fields)
5204 for node in utils.NiceSort(self.nodes):
5205 nresult = data[node]
5209 msg = nresult.fail_msg
5211 self.LogWarning("Can't get storage data from node %s: %s", node, msg)
5214 rows = dict([(row[name_idx], row) for row in nresult.payload])
5216 for name in utils.NiceSort(rows.keys()):
5221 for field in self.op.output_fields:
5222 if field == constants.SF_NODE:
5224 elif field == constants.SF_TYPE:
5225 val = self.op.storage_type
5226 elif field in field_idx:
5227 val = row[field_idx[field]]
5229 raise errors.ParameterError(field)
5238 class _InstanceQuery(_QueryBase):
5239 FIELDS = query.INSTANCE_FIELDS
5241 def ExpandNames(self, lu):
5242 lu.needed_locks = {}
5243 lu.share_locks = _ShareAll()
5246 self.wanted = _GetWantedInstances(lu, self.names)
5248 self.wanted = locking.ALL_SET
5250 self.do_locking = (self.use_locking and
5251 query.IQ_LIVE in self.requested_data)
5253 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
5254 lu.needed_locks[locking.LEVEL_NODEGROUP] = []
5255 lu.needed_locks[locking.LEVEL_NODE] = []
5256 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5258 self.do_grouplocks = (self.do_locking and
5259 query.IQ_NODES in self.requested_data)
5261 def DeclareLocks(self, lu, level):
5263 if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
5264 assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
5266 # Lock all groups used by instances optimistically; this requires going
5267 # via the node before it's locked, requiring verification later on
5268 lu.needed_locks[locking.LEVEL_NODEGROUP] = \
5270 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
5271 for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
5272 elif level == locking.LEVEL_NODE:
5273 lu._LockInstancesNodes() # pylint: disable=W0212
5276 def _CheckGroupLocks(lu):
5277 owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
5278 owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
5280 # Check if node groups for locked instances are still correct
5281 for instance_name in owned_instances:
5282 _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
5284 def _GetQueryData(self, lu):
5285 """Computes the list of instances and their attributes.
5288 if self.do_grouplocks:
5289 self._CheckGroupLocks(lu)
5291 cluster = lu.cfg.GetClusterInfo()
5292 all_info = lu.cfg.GetAllInstancesInfo()
5294 instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
5296 instance_list = [all_info[name] for name in instance_names]
5297 nodes = frozenset(itertools.chain(*(inst.all_nodes
5298 for inst in instance_list)))
5299 hv_list = list(set([inst.hypervisor for inst in instance_list]))
5302 wrongnode_inst = set()
5304 # Gather data as requested
5305 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
5307 node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
5309 result = node_data[name]
5311 # offline nodes will be in both lists
5312 assert result.fail_msg
5313 offline_nodes.append(name)
5315 bad_nodes.append(name)
5316 elif result.payload:
5317 for inst in result.payload:
5318 if inst in all_info:
5319 if all_info[inst].primary_node == name:
5320 live_data.update(result.payload)
5322 wrongnode_inst.add(inst)
5324 # orphan instance; we don't list it here as we don't
5325 # handle this case yet in the output of instance listing
5326 logging.warning("Orphan instance '%s' found on node %s",
5328 # else no instance is alive
5332 if query.IQ_DISKUSAGE in self.requested_data:
5333 disk_usage = dict((inst.name,
5334 _ComputeDiskSize(inst.disk_template,
5335 [{constants.IDISK_SIZE: disk.size}
5336 for disk in inst.disks]))
5337 for inst in instance_list)
5341 if query.IQ_CONSOLE in self.requested_data:
5343 for inst in instance_list:
5344 if inst.name in live_data:
5345 # Instance is running
5346 consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
5348 consinfo[inst.name] = None
5349 assert set(consinfo.keys()) == set(instance_names)
5353 if query.IQ_NODES in self.requested_data:
5354 node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
5356 nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
5357 groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
5358 for uuid in set(map(operator.attrgetter("group"),
5364 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
5365 disk_usage, offline_nodes, bad_nodes,
5366 live_data, wrongnode_inst, consinfo,
5370 class LUQuery(NoHooksLU):
5371 """Query for resources/items of a certain kind.
5374 # pylint: disable=W0142
5377 def CheckArguments(self):
5378 qcls = _GetQueryImplementation(self.op.what)
5380 self.impl = qcls(self.op.qfilter, self.op.fields, self.op.use_locking)
5382 def ExpandNames(self):
5383 self.impl.ExpandNames(self)
5385 def DeclareLocks(self, level):
5386 self.impl.DeclareLocks(self, level)
5388 def Exec(self, feedback_fn):
5389 return self.impl.NewStyleQuery(self)
5392 class LUQueryFields(NoHooksLU):
5393 """Query for resources/items of a certain kind.
5396 # pylint: disable=W0142
5399 def CheckArguments(self):
5400 self.qcls = _GetQueryImplementation(self.op.what)
5402 def ExpandNames(self):
5403 self.needed_locks = {}
5405 def Exec(self, feedback_fn):
5406 return query.QueryFields(self.qcls.FIELDS, self.op.fields)
5409 class LUNodeModifyStorage(NoHooksLU):
5410 """Logical unit for modifying a storage volume on a node.
5415 def CheckArguments(self):
5416 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5418 storage_type = self.op.storage_type
5421 modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
5423 raise errors.OpPrereqError("Storage units of type '%s' can not be"
5424 " modified" % storage_type,
5427 diff = set(self.op.changes.keys()) - modifiable
5429 raise errors.OpPrereqError("The following fields can not be modified for"
5430 " storage units of type '%s': %r" %
5431 (storage_type, list(diff)),
5434 def ExpandNames(self):
5435 self.needed_locks = {
5436 locking.LEVEL_NODE: self.op.node_name,
5439 def Exec(self, feedback_fn):
5440 """Computes the list of nodes and their attributes.
5443 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
5444 result = self.rpc.call_storage_modify(self.op.node_name,
5445 self.op.storage_type, st_args,
5446 self.op.name, self.op.changes)
5447 result.Raise("Failed to modify storage unit '%s' on %s" %
5448 (self.op.name, self.op.node_name))
5451 class LUNodeAdd(LogicalUnit):
5452 """Logical unit for adding node to the cluster.
5456 HTYPE = constants.HTYPE_NODE
5457 _NFLAGS = ["master_capable", "vm_capable"]
5459 def CheckArguments(self):
5460 self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
5461 # validate/normalize the node name
5462 self.hostname = netutils.GetHostname(name=self.op.node_name,
5463 family=self.primary_ip_family)
5464 self.op.node_name = self.hostname.name
5466 if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
5467 raise errors.OpPrereqError("Cannot readd the master node",
5470 if self.op.readd and self.op.group:
5471 raise errors.OpPrereqError("Cannot pass a node group when a node is"
5472 " being readded", errors.ECODE_INVAL)
5474 def BuildHooksEnv(self):
5477 This will run on all nodes before, and on all nodes + the new node after.
5481 "OP_TARGET": self.op.node_name,
5482 "NODE_NAME": self.op.node_name,
5483 "NODE_PIP": self.op.primary_ip,
5484 "NODE_SIP": self.op.secondary_ip,
5485 "MASTER_CAPABLE": str(self.op.master_capable),
5486 "VM_CAPABLE": str(self.op.vm_capable),
5489 def BuildHooksNodes(self):
5490 """Build hooks nodes.
5493 # Exclude added node
5494 pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name]))
5495 post_nodes = pre_nodes + [self.op.node_name, ]
5497 return (pre_nodes, post_nodes)
5499 def CheckPrereq(self):
5500 """Check prerequisites.
5503 - the new node is not already in the config
5505 - its parameters (single/dual homed) matches the cluster
5507 Any errors are signaled by raising errors.OpPrereqError.
5511 hostname = self.hostname
5512 node = hostname.name
5513 primary_ip = self.op.primary_ip = hostname.ip
5514 if self.op.secondary_ip is None:
5515 if self.primary_ip_family == netutils.IP6Address.family:
5516 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
5517 " IPv4 address must be given as secondary",
5519 self.op.secondary_ip = primary_ip
5521 secondary_ip = self.op.secondary_ip
5522 if not netutils.IP4Address.IsValid(secondary_ip):
5523 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
5524 " address" % secondary_ip, errors.ECODE_INVAL)
5526 node_list = cfg.GetNodeList()
5527 if not self.op.readd and node in node_list:
5528 raise errors.OpPrereqError("Node %s is already in the configuration" %
5529 node, errors.ECODE_EXISTS)
5530 elif self.op.readd and node not in node_list:
5531 raise errors.OpPrereqError("Node %s is not in the configuration" % node,
5534 self.changed_primary_ip = False
5536 for existing_node_name, existing_node in cfg.GetMultiNodeInfo(node_list):
5537 if self.op.readd and node == existing_node_name:
5538 if existing_node.secondary_ip != secondary_ip:
5539 raise errors.OpPrereqError("Readded node doesn't have the same IP"
5540 " address configuration as before",
5542 if existing_node.primary_ip != primary_ip:
5543 self.changed_primary_ip = True
5547 if (existing_node.primary_ip == primary_ip or
5548 existing_node.secondary_ip == primary_ip or
5549 existing_node.primary_ip == secondary_ip or
5550 existing_node.secondary_ip == secondary_ip):
5551 raise errors.OpPrereqError("New node ip address(es) conflict with"
5552 " existing node %s" % existing_node.name,
5553 errors.ECODE_NOTUNIQUE)
5555 # After this 'if' block, None is no longer a valid value for the
5556 # _capable op attributes
5558 old_node = self.cfg.GetNodeInfo(node)
5559 assert old_node is not None, "Can't retrieve locked node %s" % node
5560 for attr in self._NFLAGS:
5561 if getattr(self.op, attr) is None:
5562 setattr(self.op, attr, getattr(old_node, attr))
5564 for attr in self._NFLAGS:
5565 if getattr(self.op, attr) is None:
5566 setattr(self.op, attr, True)
5568 if self.op.readd and not self.op.vm_capable:
5569 pri, sec = cfg.GetNodeInstances(node)
5571 raise errors.OpPrereqError("Node %s being re-added with vm_capable"
5572 " flag set to false, but it already holds"
5573 " instances" % node,
5576 # check that the type of the node (single versus dual homed) is the
5577 # same as for the master
5578 myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
5579 master_singlehomed = myself.secondary_ip == myself.primary_ip
5580 newbie_singlehomed = secondary_ip == primary_ip
5581 if master_singlehomed != newbie_singlehomed:
5582 if master_singlehomed:
5583 raise errors.OpPrereqError("The master has no secondary ip but the"
5584 " new node has one",
5587 raise errors.OpPrereqError("The master has a secondary ip but the"
5588 " new node doesn't have one",
5591 # checks reachability
5592 if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
5593 raise errors.OpPrereqError("Node not reachable by ping",
5594 errors.ECODE_ENVIRON)
5596 if not newbie_singlehomed:
5597 # check reachability from my secondary ip to newbie's secondary ip
5598 if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
5599 source=myself.secondary_ip):
5600 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
5601 " based ping to node daemon port",
5602 errors.ECODE_ENVIRON)
5609 if self.op.master_capable:
5610 self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
5612 self.master_candidate = False
5615 self.new_node = old_node
5617 node_group = cfg.LookupNodeGroup(self.op.group)
5618 self.new_node = objects.Node(name=node,
5619 primary_ip=primary_ip,
5620 secondary_ip=secondary_ip,
5621 master_candidate=self.master_candidate,
5622 offline=False, drained=False,
5625 if self.op.ndparams:
5626 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
5628 if self.op.hv_state:
5629 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
5631 if self.op.disk_state:
5632 self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
5634 # TODO: If we need to have multiple DnsOnlyRunner we probably should make
5635 # it a property on the base class.
5636 result = rpc.DnsOnlyRunner().call_version([node])[node]
5637 result.Raise("Can't get version information from node %s" % node)
5638 if constants.PROTOCOL_VERSION == result.payload:
5639 logging.info("Communication to node %s fine, sw version %s match",
5640 node, result.payload)
5642 raise errors.OpPrereqError("Version mismatch master version %s,"
5643 " node version %s" %
5644 (constants.PROTOCOL_VERSION, result.payload),
5645 errors.ECODE_ENVIRON)
5647 def Exec(self, feedback_fn):
5648 """Adds the new node to the cluster.
5651 new_node = self.new_node
5652 node = new_node.name
5654 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
5657 # We adding a new node so we assume it's powered
5658 new_node.powered = True
5660 # for re-adds, reset the offline/drained/master-candidate flags;
5661 # we need to reset here, otherwise offline would prevent RPC calls
5662 # later in the procedure; this also means that if the re-add
5663 # fails, we are left with a non-offlined, broken node
5665 new_node.drained = new_node.offline = False # pylint: disable=W0201
5666 self.LogInfo("Readding a node, the offline/drained flags were reset")
5667 # if we demote the node, we do cleanup later in the procedure
5668 new_node.master_candidate = self.master_candidate
5669 if self.changed_primary_ip:
5670 new_node.primary_ip = self.op.primary_ip
5672 # copy the master/vm_capable flags
5673 for attr in self._NFLAGS:
5674 setattr(new_node, attr, getattr(self.op, attr))
5676 # notify the user about any possible mc promotion
5677 if new_node.master_candidate:
5678 self.LogInfo("Node will be a master candidate")
5680 if self.op.ndparams:
5681 new_node.ndparams = self.op.ndparams
5683 new_node.ndparams = {}
5685 if self.op.hv_state:
5686 new_node.hv_state_static = self.new_hv_state
5688 if self.op.disk_state:
5689 new_node.disk_state_static = self.new_disk_state
5691 # Add node to our /etc/hosts, and add key to known_hosts
5692 if self.cfg.GetClusterInfo().modify_etc_hosts:
5693 master_node = self.cfg.GetMasterNode()
5694 result = self.rpc.call_etc_hosts_modify(master_node,
5695 constants.ETC_HOSTS_ADD,
5698 result.Raise("Can't update hosts file with new host data")
5700 if new_node.secondary_ip != new_node.primary_ip:
5701 _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
5704 node_verify_list = [self.cfg.GetMasterNode()]
5705 node_verify_param = {
5706 constants.NV_NODELIST: ([node], {}),
5707 # TODO: do a node-net-test as well?
5710 result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
5711 self.cfg.GetClusterName())
5712 for verifier in node_verify_list:
5713 result[verifier].Raise("Cannot communicate with node %s" % verifier)
5714 nl_payload = result[verifier].payload[constants.NV_NODELIST]
5716 for failed in nl_payload:
5717 feedback_fn("ssh/hostname verification failed"
5718 " (checking from %s): %s" %
5719 (verifier, nl_payload[failed]))
5720 raise errors.OpExecError("ssh/hostname verification failed")
5723 _RedistributeAncillaryFiles(self)
5724 self.context.ReaddNode(new_node)
5725 # make sure we redistribute the config
5726 self.cfg.Update(new_node, feedback_fn)
5727 # and make sure the new node will not have old files around
5728 if not new_node.master_candidate:
5729 result = self.rpc.call_node_demote_from_mc(new_node.name)
5730 msg = result.fail_msg
5732 self.LogWarning("Node failed to demote itself from master"
5733 " candidate status: %s" % msg)
5735 _RedistributeAncillaryFiles(self, additional_nodes=[node],
5736 additional_vm=self.op.vm_capable)
5737 self.context.AddNode(new_node, self.proc.GetECId())
5740 class LUNodeSetParams(LogicalUnit):
5741 """Modifies the parameters of a node.
5743 @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
5744 to the node role (as _ROLE_*)
5745 @cvar _R2F: a dictionary from node role to tuples of flags
5746 @cvar _FLAGS: a list of attribute names corresponding to the flags
5749 HPATH = "node-modify"
5750 HTYPE = constants.HTYPE_NODE
5752 (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
5754 (True, False, False): _ROLE_CANDIDATE,
5755 (False, True, False): _ROLE_DRAINED,
5756 (False, False, True): _ROLE_OFFLINE,
5757 (False, False, False): _ROLE_REGULAR,
5759 _R2F = dict((v, k) for k, v in _F2R.items())
5760 _FLAGS = ["master_candidate", "drained", "offline"]
5762 def CheckArguments(self):
5763 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5764 all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
5765 self.op.master_capable, self.op.vm_capable,
5766 self.op.secondary_ip, self.op.ndparams, self.op.hv_state,
5768 if all_mods.count(None) == len(all_mods):
5769 raise errors.OpPrereqError("Please pass at least one modification",
5771 if all_mods.count(True) > 1:
5772 raise errors.OpPrereqError("Can't set the node into more than one"
5773 " state at the same time",
5776 # Boolean value that tells us whether we might be demoting from MC
5777 self.might_demote = (self.op.master_candidate == False or
5778 self.op.offline == True or
5779 self.op.drained == True or
5780 self.op.master_capable == False)
5782 if self.op.secondary_ip:
5783 if not netutils.IP4Address.IsValid(self.op.secondary_ip):
5784 raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
5785 " address" % self.op.secondary_ip,
5788 self.lock_all = self.op.auto_promote and self.might_demote
5789 self.lock_instances = self.op.secondary_ip is not None
5791 def _InstanceFilter(self, instance):
5792 """Filter for getting affected instances.
5795 return (instance.disk_template in constants.DTS_INT_MIRROR and
5796 self.op.node_name in instance.all_nodes)
5798 def ExpandNames(self):
5800 self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
5802 self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
5804 # Since modifying a node can have severe effects on currently running
5805 # operations the resource lock is at least acquired in shared mode
5806 self.needed_locks[locking.LEVEL_NODE_RES] = \
5807 self.needed_locks[locking.LEVEL_NODE]
5809 # Get node resource and instance locks in shared mode; they are not used
5810 # for anything but read-only access
5811 self.share_locks[locking.LEVEL_NODE_RES] = 1
5812 self.share_locks[locking.LEVEL_INSTANCE] = 1
5814 if self.lock_instances:
5815 self.needed_locks[locking.LEVEL_INSTANCE] = \
5816 frozenset(self.cfg.GetInstancesInfoByFilter(self._InstanceFilter))
5818 def BuildHooksEnv(self):
5821 This runs on the master node.
5825 "OP_TARGET": self.op.node_name,
5826 "MASTER_CANDIDATE": str(self.op.master_candidate),
5827 "OFFLINE": str(self.op.offline),
5828 "DRAINED": str(self.op.drained),
5829 "MASTER_CAPABLE": str(self.op.master_capable),
5830 "VM_CAPABLE": str(self.op.vm_capable),
5833 def BuildHooksNodes(self):
5834 """Build hooks nodes.
5837 nl = [self.cfg.GetMasterNode(), self.op.node_name]
5840 def CheckPrereq(self):
5841 """Check prerequisites.
5843 This only checks the instance list against the existing names.
5846 node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
5848 if self.lock_instances:
5849 affected_instances = \
5850 self.cfg.GetInstancesInfoByFilter(self._InstanceFilter)
5852 # Verify instance locks
5853 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
5854 wanted_instances = frozenset(affected_instances.keys())
5855 if wanted_instances - owned_instances:
5856 raise errors.OpPrereqError("Instances affected by changing node %s's"
5857 " secondary IP address have changed since"
5858 " locks were acquired, wanted '%s', have"
5859 " '%s'; retry the operation" %
5861 utils.CommaJoin(wanted_instances),
5862 utils.CommaJoin(owned_instances)),
5865 affected_instances = None
5867 if (self.op.master_candidate is not None or
5868 self.op.drained is not None or
5869 self.op.offline is not None):
5870 # we can't change the master's node flags
5871 if self.op.node_name == self.cfg.GetMasterNode():
5872 raise errors.OpPrereqError("The master role can be changed"
5873 " only via master-failover",
5876 if self.op.master_candidate and not node.master_capable:
5877 raise errors.OpPrereqError("Node %s is not master capable, cannot make"
5878 " it a master candidate" % node.name,
5881 if self.op.vm_capable == False:
5882 (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
5884 raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
5885 " the vm_capable flag" % node.name,
5888 if node.master_candidate and self.might_demote and not self.lock_all:
5889 assert not self.op.auto_promote, "auto_promote set but lock_all not"
5890 # check if after removing the current node, we're missing master
5892 (mc_remaining, mc_should, _) = \
5893 self.cfg.GetMasterCandidateStats(exceptions=[node.name])
5894 if mc_remaining < mc_should:
5895 raise errors.OpPrereqError("Not enough master candidates, please"
5896 " pass auto promote option to allow"
5897 " promotion", errors.ECODE_STATE)
5899 self.old_flags = old_flags = (node.master_candidate,
5900 node.drained, node.offline)
5901 assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
5902 self.old_role = old_role = self._F2R[old_flags]
5904 # Check for ineffective changes
5905 for attr in self._FLAGS:
5906 if (getattr(self.op, attr) == False and getattr(node, attr) == False):
5907 self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
5908 setattr(self.op, attr, None)
5910 # Past this point, any flag change to False means a transition
5911 # away from the respective state, as only real changes are kept
5913 # TODO: We might query the real power state if it supports OOB
5914 if _SupportsOob(self.cfg, node):
5915 if self.op.offline is False and not (node.powered or
5916 self.op.powered == True):
5917 raise errors.OpPrereqError(("Node %s needs to be turned on before its"
5918 " offline status can be reset") %
5920 elif self.op.powered is not None:
5921 raise errors.OpPrereqError(("Unable to change powered state for node %s"
5922 " as it does not support out-of-band"
5923 " handling") % self.op.node_name)
5925 # If we're being deofflined/drained, we'll MC ourself if needed
5926 if (self.op.drained == False or self.op.offline == False or
5927 (self.op.master_capable and not node.master_capable)):
5928 if _DecideSelfPromotion(self):
5929 self.op.master_candidate = True
5930 self.LogInfo("Auto-promoting node to master candidate")
5932 # If we're no longer master capable, we'll demote ourselves from MC
5933 if self.op.master_capable == False and node.master_candidate:
5934 self.LogInfo("Demoting from master candidate")
5935 self.op.master_candidate = False
5938 assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
5939 if self.op.master_candidate:
5940 new_role = self._ROLE_CANDIDATE
5941 elif self.op.drained:
5942 new_role = self._ROLE_DRAINED
5943 elif self.op.offline:
5944 new_role = self._ROLE_OFFLINE
5945 elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
5946 # False is still in new flags, which means we're un-setting (the
5948 new_role = self._ROLE_REGULAR
5949 else: # no new flags, nothing, keep old role
5952 self.new_role = new_role
5954 if old_role == self._ROLE_OFFLINE and new_role != old_role:
5955 # Trying to transition out of offline status
5956 result = self.rpc.call_version([node.name])[node.name]
5958 raise errors.OpPrereqError("Node %s is being de-offlined but fails"
5959 " to report its version: %s" %
5960 (node.name, result.fail_msg),
5963 self.LogWarning("Transitioning node from offline to online state"
5964 " without using re-add. Please make sure the node"
5967 if self.op.secondary_ip:
5968 # Ok even without locking, because this can't be changed by any LU
5969 master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
5970 master_singlehomed = master.secondary_ip == master.primary_ip
5971 if master_singlehomed and self.op.secondary_ip:
5972 raise errors.OpPrereqError("Cannot change the secondary ip on a single"
5973 " homed cluster", errors.ECODE_INVAL)
5975 assert not (frozenset(affected_instances) -
5976 self.owned_locks(locking.LEVEL_INSTANCE))
5979 if affected_instances:
5980 raise errors.OpPrereqError("Cannot change secondary IP address:"
5981 " offline node has instances (%s)"
5982 " configured to use it" %
5983 utils.CommaJoin(affected_instances.keys()))
5985 # On online nodes, check that no instances are running, and that
5986 # the node has the new ip and we can reach it.
5987 for instance in affected_instances.values():
5988 _CheckInstanceState(self, instance, INSTANCE_DOWN,
5989 msg="cannot change secondary ip")
5991 _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
5992 if master.name != node.name:
5993 # check reachability from master secondary ip to new secondary ip
5994 if not netutils.TcpPing(self.op.secondary_ip,
5995 constants.DEFAULT_NODED_PORT,
5996 source=master.secondary_ip):
5997 raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
5998 " based ping to node daemon port",
5999 errors.ECODE_ENVIRON)
6001 if self.op.ndparams:
6002 new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
6003 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
6004 self.new_ndparams = new_ndparams
6006 if self.op.hv_state:
6007 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
6008 self.node.hv_state_static)
6010 if self.op.disk_state:
6011 self.new_disk_state = \
6012 _MergeAndVerifyDiskState(self.op.disk_state,
6013 self.node.disk_state_static)
6015 def Exec(self, feedback_fn):
6020 old_role = self.old_role
6021 new_role = self.new_role
6025 if self.op.ndparams:
6026 node.ndparams = self.new_ndparams
6028 if self.op.powered is not None:
6029 node.powered = self.op.powered
6031 if self.op.hv_state:
6032 node.hv_state_static = self.new_hv_state
6034 if self.op.disk_state:
6035 node.disk_state_static = self.new_disk_state
6037 for attr in ["master_capable", "vm_capable"]:
6038 val = getattr(self.op, attr)
6040 setattr(node, attr, val)
6041 result.append((attr, str(val)))
6043 if new_role != old_role:
6044 # Tell the node to demote itself, if no longer MC and not offline
6045 if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
6046 msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
6048 self.LogWarning("Node failed to demote itself: %s", msg)
6050 new_flags = self._R2F[new_role]
6051 for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
6053 result.append((desc, str(nf)))
6054 (node.master_candidate, node.drained, node.offline) = new_flags
6056 # we locked all nodes, we adjust the CP before updating this node
6058 _AdjustCandidatePool(self, [node.name])
6060 if self.op.secondary_ip:
6061 node.secondary_ip = self.op.secondary_ip
6062 result.append(("secondary_ip", self.op.secondary_ip))
6064 # this will trigger configuration file update, if needed
6065 self.cfg.Update(node, feedback_fn)
6067 # this will trigger job queue propagation or cleanup if the mc
6069 if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
6070 self.context.ReaddNode(node)
6075 class LUNodePowercycle(NoHooksLU):
6076 """Powercycles a node.
6081 def CheckArguments(self):
6082 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6083 if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
6084 raise errors.OpPrereqError("The node is the master and the force"
6085 " parameter was not set",
6088 def ExpandNames(self):
6089 """Locking for PowercycleNode.
6091 This is a last-resort option and shouldn't block on other
6092 jobs. Therefore, we grab no locks.
6095 self.needed_locks = {}
6097 def Exec(self, feedback_fn):
6101 result = self.rpc.call_node_powercycle(self.op.node_name,
6102 self.cfg.GetHypervisorType())
6103 result.Raise("Failed to schedule the reboot")
6104 return result.payload
6107 class LUClusterQuery(NoHooksLU):
6108 """Query cluster configuration.
6113 def ExpandNames(self):
6114 self.needed_locks = {}
6116 def Exec(self, feedback_fn):
6117 """Return cluster config.
6120 cluster = self.cfg.GetClusterInfo()
6123 # Filter just for enabled hypervisors
6124 for os_name, hv_dict in cluster.os_hvp.items():
6125 os_hvp[os_name] = {}
6126 for hv_name, hv_params in hv_dict.items():
6127 if hv_name in cluster.enabled_hypervisors:
6128 os_hvp[os_name][hv_name] = hv_params
6130 # Convert ip_family to ip_version
6131 primary_ip_version = constants.IP4_VERSION
6132 if cluster.primary_ip_family == netutils.IP6Address.family:
6133 primary_ip_version = constants.IP6_VERSION
6136 "software_version": constants.RELEASE_VERSION,
6137 "protocol_version": constants.PROTOCOL_VERSION,
6138 "config_version": constants.CONFIG_VERSION,
6139 "os_api_version": max(constants.OS_API_VERSIONS),
6140 "export_version": constants.EXPORT_VERSION,
6141 "architecture": runtime.GetArchInfo(),
6142 "name": cluster.cluster_name,
6143 "master": cluster.master_node,
6144 "default_hypervisor": cluster.primary_hypervisor,
6145 "enabled_hypervisors": cluster.enabled_hypervisors,
6146 "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
6147 for hypervisor_name in cluster.enabled_hypervisors]),
6149 "beparams": cluster.beparams,
6150 "osparams": cluster.osparams,
6151 "ipolicy": cluster.ipolicy,
6152 "nicparams": cluster.nicparams,
6153 "ndparams": cluster.ndparams,
6154 "candidate_pool_size": cluster.candidate_pool_size,
6155 "master_netdev": cluster.master_netdev,
6156 "master_netmask": cluster.master_netmask,
6157 "use_external_mip_script": cluster.use_external_mip_script,
6158 "volume_group_name": cluster.volume_group_name,
6159 "drbd_usermode_helper": cluster.drbd_usermode_helper,
6160 "file_storage_dir": cluster.file_storage_dir,
6161 "shared_file_storage_dir": cluster.shared_file_storage_dir,
6162 "maintain_node_health": cluster.maintain_node_health,
6163 "ctime": cluster.ctime,
6164 "mtime": cluster.mtime,
6165 "uuid": cluster.uuid,
6166 "tags": list(cluster.GetTags()),
6167 "uid_pool": cluster.uid_pool,
6168 "default_iallocator": cluster.default_iallocator,
6169 "reserved_lvs": cluster.reserved_lvs,
6170 "primary_ip_version": primary_ip_version,
6171 "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
6172 "hidden_os": cluster.hidden_os,
6173 "blacklisted_os": cluster.blacklisted_os,
6179 class LUClusterConfigQuery(NoHooksLU):
6180 """Return configuration values.
6185 def CheckArguments(self):
6186 self.cq = _ClusterQuery(None, self.op.output_fields, False)
6188 def ExpandNames(self):
6189 self.cq.ExpandNames(self)
6191 def DeclareLocks(self, level):
6192 self.cq.DeclareLocks(self, level)
6194 def Exec(self, feedback_fn):
6195 result = self.cq.OldStyleQuery(self)
6197 assert len(result) == 1
6202 class _ClusterQuery(_QueryBase):
6203 FIELDS = query.CLUSTER_FIELDS
6205 #: Do not sort (there is only one item)
6208 def ExpandNames(self, lu):
6209 lu.needed_locks = {}
6211 # The following variables interact with _QueryBase._GetNames
6212 self.wanted = locking.ALL_SET
6213 self.do_locking = self.use_locking
6216 raise errors.OpPrereqError("Can not use locking for cluster queries",
6219 def DeclareLocks(self, lu, level):
6222 def _GetQueryData(self, lu):
6223 """Computes the list of nodes and their attributes.
6226 # Locking is not used
6227 assert not (compat.any(lu.glm.is_owned(level)
6228 for level in locking.LEVELS
6229 if level != locking.LEVEL_CLUSTER) or
6230 self.do_locking or self.use_locking)
6232 if query.CQ_CONFIG in self.requested_data:
6233 cluster = lu.cfg.GetClusterInfo()
6235 cluster = NotImplemented
6237 if query.CQ_QUEUE_DRAINED in self.requested_data:
6238 drain_flag = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
6240 drain_flag = NotImplemented
6242 if query.CQ_WATCHER_PAUSE in self.requested_data:
6243 watcher_pause = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
6245 watcher_pause = NotImplemented
6247 return query.ClusterQueryData(cluster, drain_flag, watcher_pause)
6250 class LUInstanceActivateDisks(NoHooksLU):
6251 """Bring up an instance's disks.
6256 def ExpandNames(self):
6257 self._ExpandAndLockInstance()
6258 self.needed_locks[locking.LEVEL_NODE] = []
6259 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6261 def DeclareLocks(self, level):
6262 if level == locking.LEVEL_NODE:
6263 self._LockInstancesNodes()
6265 def CheckPrereq(self):
6266 """Check prerequisites.
6268 This checks that the instance is in the cluster.
6271 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6272 assert self.instance is not None, \
6273 "Cannot retrieve locked instance %s" % self.op.instance_name
6274 _CheckNodeOnline(self, self.instance.primary_node)
6276 def Exec(self, feedback_fn):
6277 """Activate the disks.
6280 disks_ok, disks_info = \
6281 _AssembleInstanceDisks(self, self.instance,
6282 ignore_size=self.op.ignore_size)
6284 raise errors.OpExecError("Cannot activate block devices")
6289 def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
6291 """Prepare the block devices for an instance.
6293 This sets up the block devices on all nodes.
6295 @type lu: L{LogicalUnit}
6296 @param lu: the logical unit on whose behalf we execute
6297 @type instance: L{objects.Instance}
6298 @param instance: the instance for whose disks we assemble
6299 @type disks: list of L{objects.Disk} or None
6300 @param disks: which disks to assemble (or all, if None)
6301 @type ignore_secondaries: boolean
6302 @param ignore_secondaries: if true, errors on secondary nodes
6303 won't result in an error return from the function
6304 @type ignore_size: boolean
6305 @param ignore_size: if true, the current known size of the disk
6306 will not be used during the disk activation, useful for cases
6307 when the size is wrong
6308 @return: False if the operation failed, otherwise a list of
6309 (host, instance_visible_name, node_visible_name)
6310 with the mapping from node devices to instance devices
6315 iname = instance.name
6316 disks = _ExpandCheckDisks(instance, disks)
6318 # With the two passes mechanism we try to reduce the window of
6319 # opportunity for the race condition of switching DRBD to primary
6320 # before handshaking occured, but we do not eliminate it
6322 # The proper fix would be to wait (with some limits) until the
6323 # connection has been made and drbd transitions from WFConnection
6324 # into any other network-connected state (Connected, SyncTarget,
6327 # 1st pass, assemble on all nodes in secondary mode
6328 for idx, inst_disk in enumerate(disks):
6329 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
6331 node_disk = node_disk.Copy()
6332 node_disk.UnsetSize()
6333 lu.cfg.SetDiskID(node_disk, node)
6334 result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
6336 msg = result.fail_msg
6338 lu.proc.LogWarning("Could not prepare block device %s on node %s"
6339 " (is_primary=False, pass=1): %s",
6340 inst_disk.iv_name, node, msg)
6341 if not ignore_secondaries:
6344 # FIXME: race condition on drbd migration to primary
6346 # 2nd pass, do only the primary node
6347 for idx, inst_disk in enumerate(disks):
6350 for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
6351 if node != instance.primary_node:
6354 node_disk = node_disk.Copy()
6355 node_disk.UnsetSize()
6356 lu.cfg.SetDiskID(node_disk, node)
6357 result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
6359 msg = result.fail_msg
6361 lu.proc.LogWarning("Could not prepare block device %s on node %s"
6362 " (is_primary=True, pass=2): %s",
6363 inst_disk.iv_name, node, msg)
6366 dev_path = result.payload
6368 device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
6370 # leave the disks configured for the primary node
6371 # this is a workaround that would be fixed better by
6372 # improving the logical/physical id handling
6374 lu.cfg.SetDiskID(disk, instance.primary_node)
6376 return disks_ok, device_info
6379 def _StartInstanceDisks(lu, instance, force):
6380 """Start the disks of an instance.
6383 disks_ok, _ = _AssembleInstanceDisks(lu, instance,
6384 ignore_secondaries=force)
6386 _ShutdownInstanceDisks(lu, instance)
6387 if force is not None and not force:
6388 lu.proc.LogWarning("", hint="If the message above refers to a"
6390 " you can retry the operation using '--force'.")
6391 raise errors.OpExecError("Disk consistency error")
6394 class LUInstanceDeactivateDisks(NoHooksLU):
6395 """Shutdown an instance's disks.
6400 def ExpandNames(self):
6401 self._ExpandAndLockInstance()
6402 self.needed_locks[locking.LEVEL_NODE] = []
6403 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6405 def DeclareLocks(self, level):
6406 if level == locking.LEVEL_NODE:
6407 self._LockInstancesNodes()
6409 def CheckPrereq(self):
6410 """Check prerequisites.
6412 This checks that the instance is in the cluster.
6415 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6416 assert self.instance is not None, \
6417 "Cannot retrieve locked instance %s" % self.op.instance_name
6419 def Exec(self, feedback_fn):
6420 """Deactivate the disks
6423 instance = self.instance
6425 _ShutdownInstanceDisks(self, instance)
6427 _SafeShutdownInstanceDisks(self, instance)
6430 def _SafeShutdownInstanceDisks(lu, instance, disks=None):
6431 """Shutdown block devices of an instance.
6433 This function checks if an instance is running, before calling
6434 _ShutdownInstanceDisks.
6437 _CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
6438 _ShutdownInstanceDisks(lu, instance, disks=disks)
6441 def _ExpandCheckDisks(instance, disks):
6442 """Return the instance disks selected by the disks list
6444 @type disks: list of L{objects.Disk} or None
6445 @param disks: selected disks
6446 @rtype: list of L{objects.Disk}
6447 @return: selected instance disks to act on
6451 return instance.disks
6453 if not set(disks).issubset(instance.disks):
6454 raise errors.ProgrammerError("Can only act on disks belonging to the"
6459 def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
6460 """Shutdown block devices of an instance.
6462 This does the shutdown on all nodes of the instance.
6464 If the ignore_primary is false, errors on the primary node are
6469 disks = _ExpandCheckDisks(instance, disks)
6472 for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
6473 lu.cfg.SetDiskID(top_disk, node)
6474 result = lu.rpc.call_blockdev_shutdown(node, top_disk)
6475 msg = result.fail_msg
6477 lu.LogWarning("Could not shutdown block device %s on node %s: %s",
6478 disk.iv_name, node, msg)
6479 if ((node == instance.primary_node and not ignore_primary) or
6480 (node != instance.primary_node and not result.offline)):
6485 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
6486 """Checks if a node has enough free memory.
6488 This function check if a given node has the needed amount of free
6489 memory. In case the node has less memory or we cannot get the
6490 information from the node, this function raise an OpPrereqError
6493 @type lu: C{LogicalUnit}
6494 @param lu: a logical unit from which we get configuration data
6496 @param node: the node to check
6497 @type reason: C{str}
6498 @param reason: string to use in the error message
6499 @type requested: C{int}
6500 @param requested: the amount of memory in MiB to check for
6501 @type hypervisor_name: C{str}
6502 @param hypervisor_name: the hypervisor to ask for memory stats
6504 @return: node current free memory
6505 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
6506 we cannot check the node
6509 nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name])
6510 nodeinfo[node].Raise("Can't get data from node %s" % node,
6511 prereq=True, ecode=errors.ECODE_ENVIRON)
6512 (_, _, (hv_info, )) = nodeinfo[node].payload
6514 free_mem = hv_info.get("memory_free", None)
6515 if not isinstance(free_mem, int):
6516 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
6517 " was '%s'" % (node, free_mem),
6518 errors.ECODE_ENVIRON)
6519 if requested > free_mem:
6520 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
6521 " needed %s MiB, available %s MiB" %
6522 (node, reason, requested, free_mem),
6527 def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
6528 """Checks if nodes have enough free disk space in the all VGs.
6530 This function check if all given nodes have the needed amount of
6531 free disk. In case any node has less disk or we cannot get the
6532 information from the node, this function raise an OpPrereqError
6535 @type lu: C{LogicalUnit}
6536 @param lu: a logical unit from which we get configuration data
6537 @type nodenames: C{list}
6538 @param nodenames: the list of node names to check
6539 @type req_sizes: C{dict}
6540 @param req_sizes: the hash of vg and corresponding amount of disk in
6542 @raise errors.OpPrereqError: if the node doesn't have enough disk,
6543 or we cannot check the node
6546 for vg, req_size in req_sizes.items():
6547 _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
6550 def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
6551 """Checks if nodes have enough free disk space in the specified VG.
6553 This function check if all given nodes have the needed amount of
6554 free disk. In case any node has less disk or we cannot get the
6555 information from the node, this function raise an OpPrereqError
6558 @type lu: C{LogicalUnit}
6559 @param lu: a logical unit from which we get configuration data
6560 @type nodenames: C{list}
6561 @param nodenames: the list of node names to check
6563 @param vg: the volume group to check
6564 @type requested: C{int}
6565 @param requested: the amount of disk in MiB to check for
6566 @raise errors.OpPrereqError: if the node doesn't have enough disk,
6567 or we cannot check the node
6570 nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None)
6571 for node in nodenames:
6572 info = nodeinfo[node]
6573 info.Raise("Cannot get current information from node %s" % node,
6574 prereq=True, ecode=errors.ECODE_ENVIRON)
6575 (_, (vg_info, ), _) = info.payload
6576 vg_free = vg_info.get("vg_free", None)
6577 if not isinstance(vg_free, int):
6578 raise errors.OpPrereqError("Can't compute free disk space on node"
6579 " %s for vg %s, result was '%s'" %
6580 (node, vg, vg_free), errors.ECODE_ENVIRON)
6581 if requested > vg_free:
6582 raise errors.OpPrereqError("Not enough disk space on target node %s"
6583 " vg %s: required %d MiB, available %d MiB" %
6584 (node, vg, requested, vg_free),
6588 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
6589 """Checks if nodes have enough physical CPUs
6591 This function checks if all given nodes have the needed number of
6592 physical CPUs. In case any node has less CPUs or we cannot get the
6593 information from the node, this function raises an OpPrereqError
6596 @type lu: C{LogicalUnit}
6597 @param lu: a logical unit from which we get configuration data
6598 @type nodenames: C{list}
6599 @param nodenames: the list of node names to check
6600 @type requested: C{int}
6601 @param requested: the minimum acceptable number of physical CPUs
6602 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
6603 or we cannot check the node
6606 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name])
6607 for node in nodenames:
6608 info = nodeinfo[node]
6609 info.Raise("Cannot get current information from node %s" % node,
6610 prereq=True, ecode=errors.ECODE_ENVIRON)
6611 (_, _, (hv_info, )) = info.payload
6612 num_cpus = hv_info.get("cpu_total", None)
6613 if not isinstance(num_cpus, int):
6614 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
6615 " on node %s, result was '%s'" %
6616 (node, num_cpus), errors.ECODE_ENVIRON)
6617 if requested > num_cpus:
6618 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
6619 "required" % (node, num_cpus, requested),
6623 class LUInstanceStartup(LogicalUnit):
6624 """Starts an instance.
6627 HPATH = "instance-start"
6628 HTYPE = constants.HTYPE_INSTANCE
6631 def CheckArguments(self):
6633 if self.op.beparams:
6634 # fill the beparams dict
6635 objects.UpgradeBeParams(self.op.beparams)
6636 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
6638 def ExpandNames(self):
6639 self._ExpandAndLockInstance()
6640 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
6642 def DeclareLocks(self, level):
6643 if level == locking.LEVEL_NODE_RES:
6644 self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES)
6646 def BuildHooksEnv(self):
6649 This runs on master, primary and secondary nodes of the instance.
6653 "FORCE": self.op.force,
6656 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6660 def BuildHooksNodes(self):
6661 """Build hooks nodes.
6664 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6667 def CheckPrereq(self):
6668 """Check prerequisites.
6670 This checks that the instance is in the cluster.
6673 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6674 assert self.instance is not None, \
6675 "Cannot retrieve locked instance %s" % self.op.instance_name
6678 if self.op.hvparams:
6679 # check hypervisor parameter syntax (locally)
6680 cluster = self.cfg.GetClusterInfo()
6681 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
6682 filled_hvp = cluster.FillHV(instance)
6683 filled_hvp.update(self.op.hvparams)
6684 hv_type = hypervisor.GetHypervisor(instance.hypervisor)
6685 hv_type.CheckParameterSyntax(filled_hvp)
6686 _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
6688 _CheckInstanceState(self, instance, INSTANCE_ONLINE)
6690 self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
6692 if self.primary_offline and self.op.ignore_offline_nodes:
6693 self.proc.LogWarning("Ignoring offline primary node")
6695 if self.op.hvparams or self.op.beparams:
6696 self.proc.LogWarning("Overridden parameters are ignored")
6698 _CheckNodeOnline(self, instance.primary_node)
6700 bep = self.cfg.GetClusterInfo().FillBE(instance)
6701 bep.update(self.op.beparams)
6703 # check bridges existence
6704 _CheckInstanceBridgesExist(self, instance)
6706 remote_info = self.rpc.call_instance_info(instance.primary_node,
6708 instance.hypervisor)
6709 remote_info.Raise("Error checking node %s" % instance.primary_node,
6710 prereq=True, ecode=errors.ECODE_ENVIRON)
6711 if not remote_info.payload: # not running already
6712 _CheckNodeFreeMemory(self, instance.primary_node,
6713 "starting instance %s" % instance.name,
6714 bep[constants.BE_MINMEM], instance.hypervisor)
6716 def Exec(self, feedback_fn):
6717 """Start the instance.
6720 instance = self.instance
6721 force = self.op.force
6723 if not self.op.no_remember:
6724 self.cfg.MarkInstanceUp(instance.name)
6726 if self.primary_offline:
6727 assert self.op.ignore_offline_nodes
6728 self.proc.LogInfo("Primary node offline, marked instance as started")
6730 node_current = instance.primary_node
6732 _StartInstanceDisks(self, instance, force)
6735 self.rpc.call_instance_start(node_current,
6736 (instance, self.op.hvparams,
6738 self.op.startup_paused)
6739 msg = result.fail_msg
6741 _ShutdownInstanceDisks(self, instance)
6742 raise errors.OpExecError("Could not start instance: %s" % msg)
6745 class LUInstanceReboot(LogicalUnit):
6746 """Reboot an instance.
6749 HPATH = "instance-reboot"
6750 HTYPE = constants.HTYPE_INSTANCE
6753 def ExpandNames(self):
6754 self._ExpandAndLockInstance()
6756 def BuildHooksEnv(self):
6759 This runs on master, primary and secondary nodes of the instance.
6763 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
6764 "REBOOT_TYPE": self.op.reboot_type,
6765 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6768 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6772 def BuildHooksNodes(self):
6773 """Build hooks nodes.
6776 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6779 def CheckPrereq(self):
6780 """Check prerequisites.
6782 This checks that the instance is in the cluster.
6785 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6786 assert self.instance is not None, \
6787 "Cannot retrieve locked instance %s" % self.op.instance_name
6788 _CheckInstanceState(self, instance, INSTANCE_ONLINE)
6789 _CheckNodeOnline(self, instance.primary_node)
6791 # check bridges existence
6792 _CheckInstanceBridgesExist(self, instance)
6794 def Exec(self, feedback_fn):
6795 """Reboot the instance.
6798 instance = self.instance
6799 ignore_secondaries = self.op.ignore_secondaries
6800 reboot_type = self.op.reboot_type
6802 remote_info = self.rpc.call_instance_info(instance.primary_node,
6804 instance.hypervisor)
6805 remote_info.Raise("Error checking node %s" % instance.primary_node)
6806 instance_running = bool(remote_info.payload)
6808 node_current = instance.primary_node
6810 if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
6811 constants.INSTANCE_REBOOT_HARD]:
6812 for disk in instance.disks:
6813 self.cfg.SetDiskID(disk, node_current)
6814 result = self.rpc.call_instance_reboot(node_current, instance,
6816 self.op.shutdown_timeout)
6817 result.Raise("Could not reboot instance")
6819 if instance_running:
6820 result = self.rpc.call_instance_shutdown(node_current, instance,
6821 self.op.shutdown_timeout)
6822 result.Raise("Could not shutdown instance for full reboot")
6823 _ShutdownInstanceDisks(self, instance)
6825 self.LogInfo("Instance %s was already stopped, starting now",
6827 _StartInstanceDisks(self, instance, ignore_secondaries)
6828 result = self.rpc.call_instance_start(node_current,
6829 (instance, None, None), False)
6830 msg = result.fail_msg
6832 _ShutdownInstanceDisks(self, instance)
6833 raise errors.OpExecError("Could not start instance for"
6834 " full reboot: %s" % msg)
6836 self.cfg.MarkInstanceUp(instance.name)
6839 class LUInstanceShutdown(LogicalUnit):
6840 """Shutdown an instance.
6843 HPATH = "instance-stop"
6844 HTYPE = constants.HTYPE_INSTANCE
6847 def ExpandNames(self):
6848 self._ExpandAndLockInstance()
6850 def BuildHooksEnv(self):
6853 This runs on master, primary and secondary nodes of the instance.
6856 env = _BuildInstanceHookEnvByObject(self, self.instance)
6857 env["TIMEOUT"] = self.op.timeout
6860 def BuildHooksNodes(self):
6861 """Build hooks nodes.
6864 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6867 def CheckPrereq(self):
6868 """Check prerequisites.
6870 This checks that the instance is in the cluster.
6873 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6874 assert self.instance is not None, \
6875 "Cannot retrieve locked instance %s" % self.op.instance_name
6877 _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
6879 self.primary_offline = \
6880 self.cfg.GetNodeInfo(self.instance.primary_node).offline
6882 if self.primary_offline and self.op.ignore_offline_nodes:
6883 self.proc.LogWarning("Ignoring offline primary node")
6885 _CheckNodeOnline(self, self.instance.primary_node)
6887 def Exec(self, feedback_fn):
6888 """Shutdown the instance.
6891 instance = self.instance
6892 node_current = instance.primary_node
6893 timeout = self.op.timeout
6895 if not self.op.no_remember:
6896 self.cfg.MarkInstanceDown(instance.name)
6898 if self.primary_offline:
6899 assert self.op.ignore_offline_nodes
6900 self.proc.LogInfo("Primary node offline, marked instance as stopped")
6902 result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
6903 msg = result.fail_msg
6905 self.proc.LogWarning("Could not shutdown instance: %s" % msg)
6907 _ShutdownInstanceDisks(self, instance)
6910 class LUInstanceReinstall(LogicalUnit):
6911 """Reinstall an instance.
6914 HPATH = "instance-reinstall"
6915 HTYPE = constants.HTYPE_INSTANCE
6918 def ExpandNames(self):
6919 self._ExpandAndLockInstance()
6921 def BuildHooksEnv(self):
6924 This runs on master, primary and secondary nodes of the instance.
6927 return _BuildInstanceHookEnvByObject(self, self.instance)
6929 def BuildHooksNodes(self):
6930 """Build hooks nodes.
6933 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6936 def CheckPrereq(self):
6937 """Check prerequisites.
6939 This checks that the instance is in the cluster and is not running.
6942 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6943 assert instance is not None, \
6944 "Cannot retrieve locked instance %s" % self.op.instance_name
6945 _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
6946 " offline, cannot reinstall")
6947 for node in instance.secondary_nodes:
6948 _CheckNodeOnline(self, node, "Instance secondary node offline,"
6949 " cannot reinstall")
6951 if instance.disk_template == constants.DT_DISKLESS:
6952 raise errors.OpPrereqError("Instance '%s' has no disks" %
6953 self.op.instance_name,
6955 _CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
6957 if self.op.os_type is not None:
6959 pnode = _ExpandNodeName(self.cfg, instance.primary_node)
6960 _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
6961 instance_os = self.op.os_type
6963 instance_os = instance.os
6965 nodelist = list(instance.all_nodes)
6967 if self.op.osparams:
6968 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
6969 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
6970 self.os_inst = i_osdict # the new dict (without defaults)
6974 self.instance = instance
6976 def Exec(self, feedback_fn):
6977 """Reinstall the instance.
6980 inst = self.instance
6982 if self.op.os_type is not None:
6983 feedback_fn("Changing OS to '%s'..." % self.op.os_type)
6984 inst.os = self.op.os_type
6985 # Write to configuration
6986 self.cfg.Update(inst, feedback_fn)
6988 _StartInstanceDisks(self, inst, None)
6990 feedback_fn("Running the instance OS create scripts...")
6991 # FIXME: pass debug option from opcode to backend
6992 result = self.rpc.call_instance_os_add(inst.primary_node,
6993 (inst, self.os_inst), True,
6994 self.op.debug_level)
6995 result.Raise("Could not install OS for instance %s on node %s" %
6996 (inst.name, inst.primary_node))
6998 _ShutdownInstanceDisks(self, inst)
7001 class LUInstanceRecreateDisks(LogicalUnit):
7002 """Recreate an instance's missing disks.
7005 HPATH = "instance-recreate-disks"
7006 HTYPE = constants.HTYPE_INSTANCE
7009 _MODIFYABLE = frozenset([
7010 constants.IDISK_SIZE,
7011 constants.IDISK_MODE,
7014 # New or changed disk parameters may have different semantics
7015 assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
7016 constants.IDISK_ADOPT,
7018 # TODO: Implement support changing VG while recreating
7020 constants.IDISK_METAVG,
7023 def CheckArguments(self):
7024 if self.op.disks and ht.TPositiveInt(self.op.disks[0]):
7025 # Normalize and convert deprecated list of disk indices
7026 self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
7028 duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
7030 raise errors.OpPrereqError("Some disks have been specified more than"
7031 " once: %s" % utils.CommaJoin(duplicates),
7034 for (idx, params) in self.op.disks:
7035 utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
7036 unsupported = frozenset(params.keys()) - self._MODIFYABLE
7038 raise errors.OpPrereqError("Parameters for disk %s try to change"
7039 " unmodifyable parameter(s): %s" %
7040 (idx, utils.CommaJoin(unsupported)),
7043 def ExpandNames(self):
7044 self._ExpandAndLockInstance()
7045 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7047 self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
7048 self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
7050 self.needed_locks[locking.LEVEL_NODE] = []
7051 self.needed_locks[locking.LEVEL_NODE_RES] = []
7053 def DeclareLocks(self, level):
7054 if level == locking.LEVEL_NODE:
7055 # if we replace the nodes, we only need to lock the old primary,
7056 # otherwise we need to lock all nodes for disk re-creation
7057 primary_only = bool(self.op.nodes)
7058 self._LockInstancesNodes(primary_only=primary_only)
7059 elif level == locking.LEVEL_NODE_RES:
7061 self.needed_locks[locking.LEVEL_NODE_RES] = \
7062 self.needed_locks[locking.LEVEL_NODE][:]
7064 def BuildHooksEnv(self):
7067 This runs on master, primary and secondary nodes of the instance.
7070 return _BuildInstanceHookEnvByObject(self, self.instance)
7072 def BuildHooksNodes(self):
7073 """Build hooks nodes.
7076 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7079 def CheckPrereq(self):
7080 """Check prerequisites.
7082 This checks that the instance is in the cluster and is not running.
7085 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7086 assert instance is not None, \
7087 "Cannot retrieve locked instance %s" % self.op.instance_name
7089 if len(self.op.nodes) != len(instance.all_nodes):
7090 raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
7091 " %d replacement nodes were specified" %
7092 (instance.name, len(instance.all_nodes),
7093 len(self.op.nodes)),
7095 assert instance.disk_template != constants.DT_DRBD8 or \
7096 len(self.op.nodes) == 2
7097 assert instance.disk_template != constants.DT_PLAIN or \
7098 len(self.op.nodes) == 1
7099 primary_node = self.op.nodes[0]
7101 primary_node = instance.primary_node
7102 _CheckNodeOnline(self, primary_node)
7104 if instance.disk_template == constants.DT_DISKLESS:
7105 raise errors.OpPrereqError("Instance '%s' has no disks" %
7106 self.op.instance_name, errors.ECODE_INVAL)
7108 # if we replace nodes *and* the old primary is offline, we don't
7110 assert instance.primary_node in self.owned_locks(locking.LEVEL_NODE)
7111 assert instance.primary_node in self.owned_locks(locking.LEVEL_NODE_RES)
7112 old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
7113 if not (self.op.nodes and old_pnode.offline):
7114 _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
7115 msg="cannot recreate disks")
7118 self.disks = dict(self.op.disks)
7120 self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
7122 maxidx = max(self.disks.keys())
7123 if maxidx >= len(instance.disks):
7124 raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
7127 if (self.op.nodes and
7128 sorted(self.disks.keys()) != range(len(instance.disks))):
7129 raise errors.OpPrereqError("Can't recreate disks partially and"
7130 " change the nodes at the same time",
7133 self.instance = instance
7135 def Exec(self, feedback_fn):
7136 """Recreate the disks.
7139 instance = self.instance
7141 assert (self.owned_locks(locking.LEVEL_NODE) ==
7142 self.owned_locks(locking.LEVEL_NODE_RES))
7145 mods = [] # keeps track of needed changes
7147 for idx, disk in enumerate(instance.disks):
7149 changes = self.disks[idx]
7151 # Disk should not be recreated
7155 # update secondaries for disks, if needed
7156 if self.op.nodes and disk.dev_type == constants.LD_DRBD8:
7157 # need to update the nodes and minors
7158 assert len(self.op.nodes) == 2
7159 assert len(disk.logical_id) == 6 # otherwise disk internals
7161 (_, _, old_port, _, _, old_secret) = disk.logical_id
7162 new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name)
7163 new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
7164 new_minors[0], new_minors[1], old_secret)
7165 assert len(disk.logical_id) == len(new_id)
7169 mods.append((idx, new_id, changes))
7171 # now that we have passed all asserts above, we can apply the mods
7172 # in a single run (to avoid partial changes)
7173 for idx, new_id, changes in mods:
7174 disk = instance.disks[idx]
7175 if new_id is not None:
7176 assert disk.dev_type == constants.LD_DRBD8
7177 disk.logical_id = new_id
7179 disk.Update(size=changes.get(constants.IDISK_SIZE, None),
7180 mode=changes.get(constants.IDISK_MODE, None))
7182 # change primary node, if needed
7184 instance.primary_node = self.op.nodes[0]
7185 self.LogWarning("Changing the instance's nodes, you will have to"
7186 " remove any disks left on the older nodes manually")
7189 self.cfg.Update(instance, feedback_fn)
7191 _CreateDisks(self, instance, to_skip=to_skip)
7194 class LUInstanceRename(LogicalUnit):
7195 """Rename an instance.
7198 HPATH = "instance-rename"
7199 HTYPE = constants.HTYPE_INSTANCE
7201 def CheckArguments(self):
7205 if self.op.ip_check and not self.op.name_check:
7206 # TODO: make the ip check more flexible and not depend on the name check
7207 raise errors.OpPrereqError("IP address check requires a name check",
7210 def BuildHooksEnv(self):
7213 This runs on master, primary and secondary nodes of the instance.
7216 env = _BuildInstanceHookEnvByObject(self, self.instance)
7217 env["INSTANCE_NEW_NAME"] = self.op.new_name
7220 def BuildHooksNodes(self):
7221 """Build hooks nodes.
7224 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7227 def CheckPrereq(self):
7228 """Check prerequisites.
7230 This checks that the instance is in the cluster and is not running.
7233 self.op.instance_name = _ExpandInstanceName(self.cfg,
7234 self.op.instance_name)
7235 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7236 assert instance is not None
7237 _CheckNodeOnline(self, instance.primary_node)
7238 _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
7239 msg="cannot rename")
7240 self.instance = instance
7242 new_name = self.op.new_name
7243 if self.op.name_check:
7244 hostname = netutils.GetHostname(name=new_name)
7245 if hostname.name != new_name:
7246 self.LogInfo("Resolved given name '%s' to '%s'", new_name,
7248 if not utils.MatchNameComponent(self.op.new_name, [hostname.name]):
7249 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
7250 " same as given hostname '%s'") %
7251 (hostname.name, self.op.new_name),
7253 new_name = self.op.new_name = hostname.name
7254 if (self.op.ip_check and
7255 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
7256 raise errors.OpPrereqError("IP %s of instance %s already in use" %
7257 (hostname.ip, new_name),
7258 errors.ECODE_NOTUNIQUE)
7260 instance_list = self.cfg.GetInstanceList()
7261 if new_name in instance_list and new_name != instance.name:
7262 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
7263 new_name, errors.ECODE_EXISTS)
7265 def Exec(self, feedback_fn):
7266 """Rename the instance.
7269 inst = self.instance
7270 old_name = inst.name
7272 rename_file_storage = False
7273 if (inst.disk_template in constants.DTS_FILEBASED and
7274 self.op.new_name != inst.name):
7275 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
7276 rename_file_storage = True
7278 self.cfg.RenameInstance(inst.name, self.op.new_name)
7279 # Change the instance lock. This is definitely safe while we hold the BGL.
7280 # Otherwise the new lock would have to be added in acquired mode.
7282 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
7283 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
7285 # re-read the instance from the configuration after rename
7286 inst = self.cfg.GetInstanceInfo(self.op.new_name)
7288 if rename_file_storage:
7289 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
7290 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
7291 old_file_storage_dir,
7292 new_file_storage_dir)
7293 result.Raise("Could not rename on node %s directory '%s' to '%s'"
7294 " (but the instance has been renamed in Ganeti)" %
7295 (inst.primary_node, old_file_storage_dir,
7296 new_file_storage_dir))
7298 _StartInstanceDisks(self, inst, None)
7300 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
7301 old_name, self.op.debug_level)
7302 msg = result.fail_msg
7304 msg = ("Could not run OS rename script for instance %s on node %s"
7305 " (but the instance has been renamed in Ganeti): %s" %
7306 (inst.name, inst.primary_node, msg))
7307 self.proc.LogWarning(msg)
7309 _ShutdownInstanceDisks(self, inst)
7314 class LUInstanceRemove(LogicalUnit):
7315 """Remove an instance.
7318 HPATH = "instance-remove"
7319 HTYPE = constants.HTYPE_INSTANCE
7322 def ExpandNames(self):
7323 self._ExpandAndLockInstance()
7324 self.needed_locks[locking.LEVEL_NODE] = []
7325 self.needed_locks[locking.LEVEL_NODE_RES] = []
7326 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7328 def DeclareLocks(self, level):
7329 if level == locking.LEVEL_NODE:
7330 self._LockInstancesNodes()
7331 elif level == locking.LEVEL_NODE_RES:
7333 self.needed_locks[locking.LEVEL_NODE_RES] = \
7334 self.needed_locks[locking.LEVEL_NODE][:]
7336 def BuildHooksEnv(self):
7339 This runs on master, primary and secondary nodes of the instance.
7342 env = _BuildInstanceHookEnvByObject(self, self.instance)
7343 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
7346 def BuildHooksNodes(self):
7347 """Build hooks nodes.
7350 nl = [self.cfg.GetMasterNode()]
7351 nl_post = list(self.instance.all_nodes) + nl
7352 return (nl, nl_post)
7354 def CheckPrereq(self):
7355 """Check prerequisites.
7357 This checks that the instance is in the cluster.
7360 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7361 assert self.instance is not None, \
7362 "Cannot retrieve locked instance %s" % self.op.instance_name
7364 def Exec(self, feedback_fn):
7365 """Remove the instance.
7368 instance = self.instance
7369 logging.info("Shutting down instance %s on node %s",
7370 instance.name, instance.primary_node)
7372 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
7373 self.op.shutdown_timeout)
7374 msg = result.fail_msg
7376 if self.op.ignore_failures:
7377 feedback_fn("Warning: can't shutdown instance: %s" % msg)
7379 raise errors.OpExecError("Could not shutdown instance %s on"
7381 (instance.name, instance.primary_node, msg))
7383 assert (self.owned_locks(locking.LEVEL_NODE) ==
7384 self.owned_locks(locking.LEVEL_NODE_RES))
7385 assert not (set(instance.all_nodes) -
7386 self.owned_locks(locking.LEVEL_NODE)), \
7387 "Not owning correct locks"
7389 _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
7392 def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
7393 """Utility function to remove an instance.
7396 logging.info("Removing block devices for instance %s", instance.name)
7398 if not _RemoveDisks(lu, instance, ignore_failures=ignore_failures):
7399 if not ignore_failures:
7400 raise errors.OpExecError("Can't remove instance's disks")
7401 feedback_fn("Warning: can't remove instance's disks")
7403 logging.info("Removing instance %s out of cluster config", instance.name)
7405 lu.cfg.RemoveInstance(instance.name)
7407 assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
7408 "Instance lock removal conflict"
7410 # Remove lock for the instance
7411 lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
7414 class LUInstanceQuery(NoHooksLU):
7415 """Logical unit for querying instances.
7418 # pylint: disable=W0142
7421 def CheckArguments(self):
7422 self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
7423 self.op.output_fields, self.op.use_locking)
7425 def ExpandNames(self):
7426 self.iq.ExpandNames(self)
7428 def DeclareLocks(self, level):
7429 self.iq.DeclareLocks(self, level)
7431 def Exec(self, feedback_fn):
7432 return self.iq.OldStyleQuery(self)
7435 class LUInstanceFailover(LogicalUnit):
7436 """Failover an instance.
7439 HPATH = "instance-failover"
7440 HTYPE = constants.HTYPE_INSTANCE
7443 def CheckArguments(self):
7444 """Check the arguments.
7447 self.iallocator = getattr(self.op, "iallocator", None)
7448 self.target_node = getattr(self.op, "target_node", None)
7450 def ExpandNames(self):
7451 self._ExpandAndLockInstance()
7453 if self.op.target_node is not None:
7454 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
7456 self.needed_locks[locking.LEVEL_NODE] = []
7457 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7459 self.needed_locks[locking.LEVEL_NODE_RES] = []
7460 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
7462 ignore_consistency = self.op.ignore_consistency
7463 shutdown_timeout = self.op.shutdown_timeout
7464 self._migrater = TLMigrateInstance(self, self.op.instance_name,
7467 ignore_consistency=ignore_consistency,
7468 shutdown_timeout=shutdown_timeout,
7469 ignore_ipolicy=self.op.ignore_ipolicy)
7470 self.tasklets = [self._migrater]
7472 def DeclareLocks(self, level):
7473 if level == locking.LEVEL_NODE:
7474 instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
7475 if instance.disk_template in constants.DTS_EXT_MIRROR:
7476 if self.op.target_node is None:
7477 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7479 self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
7480 self.op.target_node]
7481 del self.recalculate_locks[locking.LEVEL_NODE]
7483 self._LockInstancesNodes()
7484 elif level == locking.LEVEL_NODE_RES:
7486 self.needed_locks[locking.LEVEL_NODE_RES] = \
7487 self.needed_locks[locking.LEVEL_NODE][:]
7489 def BuildHooksEnv(self):
7492 This runs on master, primary and secondary nodes of the instance.
7495 instance = self._migrater.instance
7496 source_node = instance.primary_node
7497 target_node = self.op.target_node
7499 "IGNORE_CONSISTENCY": self.op.ignore_consistency,
7500 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
7501 "OLD_PRIMARY": source_node,
7502 "NEW_PRIMARY": target_node,
7505 if instance.disk_template in constants.DTS_INT_MIRROR:
7506 env["OLD_SECONDARY"] = instance.secondary_nodes[0]
7507 env["NEW_SECONDARY"] = source_node
7509 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
7511 env.update(_BuildInstanceHookEnvByObject(self, instance))
7515 def BuildHooksNodes(self):
7516 """Build hooks nodes.
7519 instance = self._migrater.instance
7520 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
7521 return (nl, nl + [instance.primary_node])
7524 class LUInstanceMigrate(LogicalUnit):
7525 """Migrate an instance.
7527 This is migration without shutting down, compared to the failover,
7528 which is done with shutdown.
7531 HPATH = "instance-migrate"
7532 HTYPE = constants.HTYPE_INSTANCE
7535 def ExpandNames(self):
7536 self._ExpandAndLockInstance()
7538 if self.op.target_node is not None:
7539 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
7541 self.needed_locks[locking.LEVEL_NODE] = []
7542 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7544 self.needed_locks[locking.LEVEL_NODE] = []
7545 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7548 TLMigrateInstance(self, self.op.instance_name,
7549 cleanup=self.op.cleanup,
7551 fallback=self.op.allow_failover,
7552 allow_runtime_changes=self.op.allow_runtime_changes,
7553 ignore_ipolicy=self.op.ignore_ipolicy)
7554 self.tasklets = [self._migrater]
7556 def DeclareLocks(self, level):
7557 if level == locking.LEVEL_NODE:
7558 instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
7559 if instance.disk_template in constants.DTS_EXT_MIRROR:
7560 if self.op.target_node is None:
7561 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7563 self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
7564 self.op.target_node]
7565 del self.recalculate_locks[locking.LEVEL_NODE]
7567 self._LockInstancesNodes()
7568 elif level == locking.LEVEL_NODE_RES:
7570 self.needed_locks[locking.LEVEL_NODE_RES] = \
7571 self.needed_locks[locking.LEVEL_NODE][:]
7573 def BuildHooksEnv(self):
7576 This runs on master, primary and secondary nodes of the instance.
7579 instance = self._migrater.instance
7580 source_node = instance.primary_node
7581 target_node = self.op.target_node
7582 env = _BuildInstanceHookEnvByObject(self, instance)
7584 "MIGRATE_LIVE": self._migrater.live,
7585 "MIGRATE_CLEANUP": self.op.cleanup,
7586 "OLD_PRIMARY": source_node,
7587 "NEW_PRIMARY": target_node,
7588 "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
7591 if instance.disk_template in constants.DTS_INT_MIRROR:
7592 env["OLD_SECONDARY"] = target_node
7593 env["NEW_SECONDARY"] = source_node
7595 env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
7599 def BuildHooksNodes(self):
7600 """Build hooks nodes.
7603 instance = self._migrater.instance
7604 nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
7605 return (nl, nl + [instance.primary_node])
7608 class LUInstanceMove(LogicalUnit):
7609 """Move an instance by data-copying.
7612 HPATH = "instance-move"
7613 HTYPE = constants.HTYPE_INSTANCE
7616 def ExpandNames(self):
7617 self._ExpandAndLockInstance()
7618 target_node = _ExpandNodeName(self.cfg, self.op.target_node)
7619 self.op.target_node = target_node
7620 self.needed_locks[locking.LEVEL_NODE] = [target_node]
7621 self.needed_locks[locking.LEVEL_NODE_RES] = []
7622 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7624 def DeclareLocks(self, level):
7625 if level == locking.LEVEL_NODE:
7626 self._LockInstancesNodes(primary_only=True)
7627 elif level == locking.LEVEL_NODE_RES:
7629 self.needed_locks[locking.LEVEL_NODE_RES] = \
7630 self.needed_locks[locking.LEVEL_NODE][:]
7632 def BuildHooksEnv(self):
7635 This runs on master, primary and secondary nodes of the instance.
7639 "TARGET_NODE": self.op.target_node,
7640 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
7642 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7645 def BuildHooksNodes(self):
7646 """Build hooks nodes.
7650 self.cfg.GetMasterNode(),
7651 self.instance.primary_node,
7652 self.op.target_node,
7656 def CheckPrereq(self):
7657 """Check prerequisites.
7659 This checks that the instance is in the cluster.
7662 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7663 assert self.instance is not None, \
7664 "Cannot retrieve locked instance %s" % self.op.instance_name
7666 node = self.cfg.GetNodeInfo(self.op.target_node)
7667 assert node is not None, \
7668 "Cannot retrieve locked node %s" % self.op.target_node
7670 self.target_node = target_node = node.name
7672 if target_node == instance.primary_node:
7673 raise errors.OpPrereqError("Instance %s is already on the node %s" %
7674 (instance.name, target_node),
7677 bep = self.cfg.GetClusterInfo().FillBE(instance)
7679 for idx, dsk in enumerate(instance.disks):
7680 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
7681 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
7682 " cannot copy" % idx, errors.ECODE_STATE)
7684 _CheckNodeOnline(self, target_node)
7685 _CheckNodeNotDrained(self, target_node)
7686 _CheckNodeVmCapable(self, target_node)
7687 ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(),
7688 self.cfg.GetNodeGroup(node.group))
7689 _CheckTargetNodeIPolicy(self, ipolicy, instance, node,
7690 ignore=self.op.ignore_ipolicy)
7692 if instance.admin_state == constants.ADMINST_UP:
7693 # check memory requirements on the secondary node
7694 _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
7695 instance.name, bep[constants.BE_MAXMEM],
7696 instance.hypervisor)
7698 self.LogInfo("Not checking memory on the secondary node as"
7699 " instance will not be started")
7701 # check bridge existance
7702 _CheckInstanceBridgesExist(self, instance, node=target_node)
7704 def Exec(self, feedback_fn):
7705 """Move an instance.
7707 The move is done by shutting it down on its present node, copying
7708 the data over (slow) and starting it on the new node.
7711 instance = self.instance
7713 source_node = instance.primary_node
7714 target_node = self.target_node
7716 self.LogInfo("Shutting down instance %s on source node %s",
7717 instance.name, source_node)
7719 assert (self.owned_locks(locking.LEVEL_NODE) ==
7720 self.owned_locks(locking.LEVEL_NODE_RES))
7722 result = self.rpc.call_instance_shutdown(source_node, instance,
7723 self.op.shutdown_timeout)
7724 msg = result.fail_msg
7726 if self.op.ignore_consistency:
7727 self.proc.LogWarning("Could not shutdown instance %s on node %s."
7728 " Proceeding anyway. Please make sure node"
7729 " %s is down. Error details: %s",
7730 instance.name, source_node, source_node, msg)
7732 raise errors.OpExecError("Could not shutdown instance %s on"
7734 (instance.name, source_node, msg))
7736 # create the target disks
7738 _CreateDisks(self, instance, target_node=target_node)
7739 except errors.OpExecError:
7740 self.LogWarning("Device creation failed, reverting...")
7742 _RemoveDisks(self, instance, target_node=target_node)
7744 self.cfg.ReleaseDRBDMinors(instance.name)
7747 cluster_name = self.cfg.GetClusterInfo().cluster_name
7750 # activate, get path, copy the data over
7751 for idx, disk in enumerate(instance.disks):
7752 self.LogInfo("Copying data for disk %d", idx)
7753 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
7754 instance.name, True, idx)
7756 self.LogWarning("Can't assemble newly created disk %d: %s",
7757 idx, result.fail_msg)
7758 errs.append(result.fail_msg)
7760 dev_path = result.payload
7761 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
7762 target_node, dev_path,
7765 self.LogWarning("Can't copy data over for disk %d: %s",
7766 idx, result.fail_msg)
7767 errs.append(result.fail_msg)
7771 self.LogWarning("Some disks failed to copy, aborting")
7773 _RemoveDisks(self, instance, target_node=target_node)
7775 self.cfg.ReleaseDRBDMinors(instance.name)
7776 raise errors.OpExecError("Errors during disk copy: %s" %
7779 instance.primary_node = target_node
7780 self.cfg.Update(instance, feedback_fn)
7782 self.LogInfo("Removing the disks on the original node")
7783 _RemoveDisks(self, instance, target_node=source_node)
7785 # Only start the instance if it's marked as up
7786 if instance.admin_state == constants.ADMINST_UP:
7787 self.LogInfo("Starting instance %s on node %s",
7788 instance.name, target_node)
7790 disks_ok, _ = _AssembleInstanceDisks(self, instance,
7791 ignore_secondaries=True)
7793 _ShutdownInstanceDisks(self, instance)
7794 raise errors.OpExecError("Can't activate the instance's disks")
7796 result = self.rpc.call_instance_start(target_node,
7797 (instance, None, None), False)
7798 msg = result.fail_msg
7800 _ShutdownInstanceDisks(self, instance)
7801 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
7802 (instance.name, target_node, msg))
7805 class LUNodeMigrate(LogicalUnit):
7806 """Migrate all instances from a node.
7809 HPATH = "node-migrate"
7810 HTYPE = constants.HTYPE_NODE
7813 def CheckArguments(self):
7816 def ExpandNames(self):
7817 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
7819 self.share_locks = _ShareAll()
7820 self.needed_locks = {
7821 locking.LEVEL_NODE: [self.op.node_name],
7824 def BuildHooksEnv(self):
7827 This runs on the master, the primary and all the secondaries.
7831 "NODE_NAME": self.op.node_name,
7832 "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
7835 def BuildHooksNodes(self):
7836 """Build hooks nodes.
7839 nl = [self.cfg.GetMasterNode()]
7842 def CheckPrereq(self):
7845 def Exec(self, feedback_fn):
7846 # Prepare jobs for migration instances
7847 allow_runtime_changes = self.op.allow_runtime_changes
7849 [opcodes.OpInstanceMigrate(instance_name=inst.name,
7852 iallocator=self.op.iallocator,
7853 target_node=self.op.target_node,
7854 allow_runtime_changes=allow_runtime_changes,
7855 ignore_ipolicy=self.op.ignore_ipolicy)]
7856 for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)
7859 # TODO: Run iallocator in this opcode and pass correct placement options to
7860 # OpInstanceMigrate. Since other jobs can modify the cluster between
7861 # running the iallocator and the actual migration, a good consistency model
7862 # will have to be found.
7864 assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
7865 frozenset([self.op.node_name]))
7867 return ResultWithJobs(jobs)
7870 class TLMigrateInstance(Tasklet):
7871 """Tasklet class for instance migration.
7874 @ivar live: whether the migration will be done live or non-live;
7875 this variable is initalized only after CheckPrereq has run
7876 @type cleanup: boolean
7877 @ivar cleanup: Wheater we cleanup from a failed migration
7878 @type iallocator: string
7879 @ivar iallocator: The iallocator used to determine target_node
7880 @type target_node: string
7881 @ivar target_node: If given, the target_node to reallocate the instance to
7882 @type failover: boolean
7883 @ivar failover: Whether operation results in failover or migration
7884 @type fallback: boolean
7885 @ivar fallback: Whether fallback to failover is allowed if migration not
7887 @type ignore_consistency: boolean
7888 @ivar ignore_consistency: Wheter we should ignore consistency between source
7890 @type shutdown_timeout: int
7891 @ivar shutdown_timeout: In case of failover timeout of the shutdown
7892 @type ignore_ipolicy: bool
7893 @ivar ignore_ipolicy: If true, we can ignore instance policy when migrating
7898 _MIGRATION_POLL_INTERVAL = 1 # seconds
7899 _MIGRATION_FEEDBACK_INTERVAL = 10 # seconds
7901 def __init__(self, lu, instance_name, cleanup=False,
7902 failover=False, fallback=False,
7903 ignore_consistency=False,
7904 allow_runtime_changes=True,
7905 shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT,
7906 ignore_ipolicy=False):
7907 """Initializes this class.
7910 Tasklet.__init__(self, lu)
7913 self.instance_name = instance_name
7914 self.cleanup = cleanup
7915 self.live = False # will be overridden later
7916 self.failover = failover
7917 self.fallback = fallback
7918 self.ignore_consistency = ignore_consistency
7919 self.shutdown_timeout = shutdown_timeout
7920 self.ignore_ipolicy = ignore_ipolicy
7921 self.allow_runtime_changes = allow_runtime_changes
7923 def CheckPrereq(self):
7924 """Check prerequisites.
7926 This checks that the instance is in the cluster.
7929 instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
7930 instance = self.cfg.GetInstanceInfo(instance_name)
7931 assert instance is not None
7932 self.instance = instance
7933 cluster = self.cfg.GetClusterInfo()
7935 if (not self.cleanup and
7936 not instance.admin_state == constants.ADMINST_UP and
7937 not self.failover and self.fallback):
7938 self.lu.LogInfo("Instance is marked down or offline, fallback allowed,"
7939 " switching to failover")
7940 self.failover = True
7942 if instance.disk_template not in constants.DTS_MIRRORED:
7947 raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
7948 " %s" % (instance.disk_template, text),
7951 if instance.disk_template in constants.DTS_EXT_MIRROR:
7952 _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
7954 if self.lu.op.iallocator:
7955 self._RunAllocator()
7957 # We set set self.target_node as it is required by
7959 self.target_node = self.lu.op.target_node
7961 # Check that the target node is correct in terms of instance policy
7962 nodeinfo = self.cfg.GetNodeInfo(self.target_node)
7963 group_info = self.cfg.GetNodeGroup(nodeinfo.group)
7964 ipolicy = _CalculateGroupIPolicy(cluster, group_info)
7965 _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
7966 ignore=self.ignore_ipolicy)
7968 # self.target_node is already populated, either directly or by the
7970 target_node = self.target_node
7971 if self.target_node == instance.primary_node:
7972 raise errors.OpPrereqError("Cannot migrate instance %s"
7973 " to its primary (%s)" %
7974 (instance.name, instance.primary_node))
7976 if len(self.lu.tasklets) == 1:
7977 # It is safe to release locks only when we're the only tasklet
7979 _ReleaseLocks(self.lu, locking.LEVEL_NODE,
7980 keep=[instance.primary_node, self.target_node])
7983 secondary_nodes = instance.secondary_nodes
7984 if not secondary_nodes:
7985 raise errors.ConfigurationError("No secondary node but using"
7986 " %s disk template" %
7987 instance.disk_template)
7988 target_node = secondary_nodes[0]
7989 if self.lu.op.iallocator or (self.lu.op.target_node and
7990 self.lu.op.target_node != target_node):
7992 text = "failed over"
7995 raise errors.OpPrereqError("Instances with disk template %s cannot"
7996 " be %s to arbitrary nodes"
7997 " (neither an iallocator nor a target"
7998 " node can be passed)" %
7999 (instance.disk_template, text),
8001 nodeinfo = self.cfg.GetNodeInfo(target_node)
8002 group_info = self.cfg.GetNodeGroup(nodeinfo.group)
8003 ipolicy = _CalculateGroupIPolicy(cluster, group_info)
8004 _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
8005 ignore=self.ignore_ipolicy)
8007 i_be = cluster.FillBE(instance)
8009 # check memory requirements on the secondary node
8010 if (not self.cleanup and
8011 (not self.failover or instance.admin_state == constants.ADMINST_UP)):
8012 self.tgt_free_mem = _CheckNodeFreeMemory(self.lu, target_node,
8013 "migrating instance %s" %
8015 i_be[constants.BE_MINMEM],
8016 instance.hypervisor)
8018 self.lu.LogInfo("Not checking memory on the secondary node as"
8019 " instance will not be started")
8021 # check if failover must be forced instead of migration
8022 if (not self.cleanup and not self.failover and
8023 i_be[constants.BE_ALWAYS_FAILOVER]):
8025 self.lu.LogInfo("Instance configured to always failover; fallback"
8027 self.failover = True
8029 raise errors.OpPrereqError("This instance has been configured to"
8030 " always failover, please allow failover",
8033 # check bridge existance
8034 _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
8036 if not self.cleanup:
8037 _CheckNodeNotDrained(self.lu, target_node)
8038 if not self.failover:
8039 result = self.rpc.call_instance_migratable(instance.primary_node,
8041 if result.fail_msg and self.fallback:
8042 self.lu.LogInfo("Can't migrate, instance offline, fallback to"
8044 self.failover = True
8046 result.Raise("Can't migrate, please use failover",
8047 prereq=True, ecode=errors.ECODE_STATE)
8049 assert not (self.failover and self.cleanup)
8051 if not self.failover:
8052 if self.lu.op.live is not None and self.lu.op.mode is not None:
8053 raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
8054 " parameters are accepted",
8056 if self.lu.op.live is not None:
8058 self.lu.op.mode = constants.HT_MIGRATION_LIVE
8060 self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
8061 # reset the 'live' parameter to None so that repeated
8062 # invocations of CheckPrereq do not raise an exception
8063 self.lu.op.live = None
8064 elif self.lu.op.mode is None:
8065 # read the default value from the hypervisor
8066 i_hv = cluster.FillHV(self.instance, skip_globals=False)
8067 self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
8069 self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
8071 # Failover is never live
8074 if not (self.failover or self.cleanup):
8075 remote_info = self.rpc.call_instance_info(instance.primary_node,
8077 instance.hypervisor)
8078 remote_info.Raise("Error checking instance on node %s" %
8079 instance.primary_node)
8080 instance_running = bool(remote_info.payload)
8081 if instance_running:
8082 self.current_mem = int(remote_info.payload["memory"])
8084 def _RunAllocator(self):
8085 """Run the allocator based on input opcode.
8088 # FIXME: add a self.ignore_ipolicy option
8089 ial = IAllocator(self.cfg, self.rpc,
8090 mode=constants.IALLOCATOR_MODE_RELOC,
8091 name=self.instance_name,
8092 relocate_from=[self.instance.primary_node],
8095 ial.Run(self.lu.op.iallocator)
8098 raise errors.OpPrereqError("Can't compute nodes using"
8099 " iallocator '%s': %s" %
8100 (self.lu.op.iallocator, ial.info),
8102 if len(ial.result) != ial.required_nodes:
8103 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8104 " of nodes (%s), required %s" %
8105 (self.lu.op.iallocator, len(ial.result),
8106 ial.required_nodes), errors.ECODE_FAULT)
8107 self.target_node = ial.result[0]
8108 self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
8109 self.instance_name, self.lu.op.iallocator,
8110 utils.CommaJoin(ial.result))
8112 def _WaitUntilSync(self):
8113 """Poll with custom rpc for disk sync.
8115 This uses our own step-based rpc call.
8118 self.feedback_fn("* wait until resync is done")
8122 result = self.rpc.call_drbd_wait_sync(self.all_nodes,
8124 (self.instance.disks,
8127 for node, nres in result.items():
8128 nres.Raise("Cannot resync disks on node %s" % node)
8129 node_done, node_percent = nres.payload
8130 all_done = all_done and node_done
8131 if node_percent is not None:
8132 min_percent = min(min_percent, node_percent)
8134 if min_percent < 100:
8135 self.feedback_fn(" - progress: %.1f%%" % min_percent)
8138 def _EnsureSecondary(self, node):
8139 """Demote a node to secondary.
8142 self.feedback_fn("* switching node %s to secondary mode" % node)
8144 for dev in self.instance.disks:
8145 self.cfg.SetDiskID(dev, node)
8147 result = self.rpc.call_blockdev_close(node, self.instance.name,
8148 self.instance.disks)
8149 result.Raise("Cannot change disk to secondary on node %s" % node)
8151 def _GoStandalone(self):
8152 """Disconnect from the network.
8155 self.feedback_fn("* changing into standalone mode")
8156 result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
8157 self.instance.disks)
8158 for node, nres in result.items():
8159 nres.Raise("Cannot disconnect disks node %s" % node)
8161 def _GoReconnect(self, multimaster):
8162 """Reconnect to the network.
8168 msg = "single-master"
8169 self.feedback_fn("* changing disks into %s mode" % msg)
8170 result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
8171 (self.instance.disks, self.instance),
8172 self.instance.name, multimaster)
8173 for node, nres in result.items():
8174 nres.Raise("Cannot change disks config on node %s" % node)
8176 def _ExecCleanup(self):
8177 """Try to cleanup after a failed migration.
8179 The cleanup is done by:
8180 - check that the instance is running only on one node
8181 (and update the config if needed)
8182 - change disks on its secondary node to secondary
8183 - wait until disks are fully synchronized
8184 - disconnect from the network
8185 - change disks into single-master mode
8186 - wait again until disks are fully synchronized
8189 instance = self.instance
8190 target_node = self.target_node
8191 source_node = self.source_node
8193 # check running on only one node
8194 self.feedback_fn("* checking where the instance actually runs"
8195 " (if this hangs, the hypervisor might be in"
8197 ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
8198 for node, result in ins_l.items():
8199 result.Raise("Can't contact node %s" % node)
8201 runningon_source = instance.name in ins_l[source_node].payload
8202 runningon_target = instance.name in ins_l[target_node].payload
8204 if runningon_source and runningon_target:
8205 raise errors.OpExecError("Instance seems to be running on two nodes,"
8206 " or the hypervisor is confused; you will have"
8207 " to ensure manually that it runs only on one"
8208 " and restart this operation")
8210 if not (runningon_source or runningon_target):
8211 raise errors.OpExecError("Instance does not seem to be running at all;"
8212 " in this case it's safer to repair by"
8213 " running 'gnt-instance stop' to ensure disk"
8214 " shutdown, and then restarting it")
8216 if runningon_target:
8217 # the migration has actually succeeded, we need to update the config
8218 self.feedback_fn("* instance running on secondary node (%s),"
8219 " updating config" % target_node)
8220 instance.primary_node = target_node
8221 self.cfg.Update(instance, self.feedback_fn)
8222 demoted_node = source_node
8224 self.feedback_fn("* instance confirmed to be running on its"
8225 " primary node (%s)" % source_node)
8226 demoted_node = target_node
8228 if instance.disk_template in constants.DTS_INT_MIRROR:
8229 self._EnsureSecondary(demoted_node)
8231 self._WaitUntilSync()
8232 except errors.OpExecError:
8233 # we ignore here errors, since if the device is standalone, it
8234 # won't be able to sync
8236 self._GoStandalone()
8237 self._GoReconnect(False)
8238 self._WaitUntilSync()
8240 self.feedback_fn("* done")
8242 def _RevertDiskStatus(self):
8243 """Try to revert the disk status after a failed migration.
8246 target_node = self.target_node
8247 if self.instance.disk_template in constants.DTS_EXT_MIRROR:
8251 self._EnsureSecondary(target_node)
8252 self._GoStandalone()
8253 self._GoReconnect(False)
8254 self._WaitUntilSync()
8255 except errors.OpExecError, err:
8256 self.lu.LogWarning("Migration failed and I can't reconnect the drives,"
8257 " please try to recover the instance manually;"
8258 " error '%s'" % str(err))
8260 def _AbortMigration(self):
8261 """Call the hypervisor code to abort a started migration.
8264 instance = self.instance
8265 target_node = self.target_node
8266 source_node = self.source_node
8267 migration_info = self.migration_info
8269 abort_result = self.rpc.call_instance_finalize_migration_dst(target_node,
8273 abort_msg = abort_result.fail_msg
8275 logging.error("Aborting migration failed on target node %s: %s",
8276 target_node, abort_msg)
8277 # Don't raise an exception here, as we stil have to try to revert the
8278 # disk status, even if this step failed.
8280 abort_result = self.rpc.call_instance_finalize_migration_src(source_node,
8281 instance, False, self.live)
8282 abort_msg = abort_result.fail_msg
8284 logging.error("Aborting migration failed on source node %s: %s",
8285 source_node, abort_msg)
8287 def _ExecMigration(self):
8288 """Migrate an instance.
8290 The migrate is done by:
8291 - change the disks into dual-master mode
8292 - wait until disks are fully synchronized again
8293 - migrate the instance
8294 - change disks on the new secondary node (the old primary) to secondary
8295 - wait until disks are fully synchronized
8296 - change disks into single-master mode
8299 instance = self.instance
8300 target_node = self.target_node
8301 source_node = self.source_node
8303 # Check for hypervisor version mismatch and warn the user.
8304 nodeinfo = self.rpc.call_node_info([source_node, target_node],
8305 None, [self.instance.hypervisor])
8306 for ninfo in nodeinfo.values():
8307 ninfo.Raise("Unable to retrieve node information from node '%s'" %
8309 (_, _, (src_info, )) = nodeinfo[source_node].payload
8310 (_, _, (dst_info, )) = nodeinfo[target_node].payload
8312 if ((constants.HV_NODEINFO_KEY_VERSION in src_info) and
8313 (constants.HV_NODEINFO_KEY_VERSION in dst_info)):
8314 src_version = src_info[constants.HV_NODEINFO_KEY_VERSION]
8315 dst_version = dst_info[constants.HV_NODEINFO_KEY_VERSION]
8316 if src_version != dst_version:
8317 self.feedback_fn("* warning: hypervisor version mismatch between"
8318 " source (%s) and target (%s) node" %
8319 (src_version, dst_version))
8321 self.feedback_fn("* checking disk consistency between source and target")
8322 for (idx, dev) in enumerate(instance.disks):
8323 if not _CheckDiskConsistency(self.lu, instance, dev, target_node, False):
8324 raise errors.OpExecError("Disk %s is degraded or not fully"
8325 " synchronized on target node,"
8326 " aborting migration" % idx)
8328 if self.current_mem > self.tgt_free_mem:
8329 if not self.allow_runtime_changes:
8330 raise errors.OpExecError("Memory ballooning not allowed and not enough"
8331 " free memory to fit instance %s on target"
8332 " node %s (have %dMB, need %dMB)" %
8333 (instance.name, target_node,
8334 self.tgt_free_mem, self.current_mem))
8335 self.feedback_fn("* setting instance memory to %s" % self.tgt_free_mem)
8336 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
8339 rpcres.Raise("Cannot modify instance runtime memory")
8341 # First get the migration information from the remote node
8342 result = self.rpc.call_migration_info(source_node, instance)
8343 msg = result.fail_msg
8345 log_err = ("Failed fetching source migration information from %s: %s" %
8347 logging.error(log_err)
8348 raise errors.OpExecError(log_err)
8350 self.migration_info = migration_info = result.payload
8352 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
8353 # Then switch the disks to master/master mode
8354 self._EnsureSecondary(target_node)
8355 self._GoStandalone()
8356 self._GoReconnect(True)
8357 self._WaitUntilSync()
8359 self.feedback_fn("* preparing %s to accept the instance" % target_node)
8360 result = self.rpc.call_accept_instance(target_node,
8363 self.nodes_ip[target_node])
8365 msg = result.fail_msg
8367 logging.error("Instance pre-migration failed, trying to revert"
8368 " disk status: %s", msg)
8369 self.feedback_fn("Pre-migration failed, aborting")
8370 self._AbortMigration()
8371 self._RevertDiskStatus()
8372 raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
8373 (instance.name, msg))
8375 self.feedback_fn("* migrating instance to %s" % target_node)
8376 result = self.rpc.call_instance_migrate(source_node, instance,
8377 self.nodes_ip[target_node],
8379 msg = result.fail_msg
8381 logging.error("Instance migration failed, trying to revert"
8382 " disk status: %s", msg)
8383 self.feedback_fn("Migration failed, aborting")
8384 self._AbortMigration()
8385 self._RevertDiskStatus()
8386 raise errors.OpExecError("Could not migrate instance %s: %s" %
8387 (instance.name, msg))
8389 self.feedback_fn("* starting memory transfer")
8390 last_feedback = time.time()
8392 result = self.rpc.call_instance_get_migration_status(source_node,
8394 msg = result.fail_msg
8395 ms = result.payload # MigrationStatus instance
8396 if msg or (ms.status in constants.HV_MIGRATION_FAILED_STATUSES):
8397 logging.error("Instance migration failed, trying to revert"
8398 " disk status: %s", msg)
8399 self.feedback_fn("Migration failed, aborting")
8400 self._AbortMigration()
8401 self._RevertDiskStatus()
8402 raise errors.OpExecError("Could not migrate instance %s: %s" %
8403 (instance.name, msg))
8405 if result.payload.status != constants.HV_MIGRATION_ACTIVE:
8406 self.feedback_fn("* memory transfer complete")
8409 if (utils.TimeoutExpired(last_feedback,
8410 self._MIGRATION_FEEDBACK_INTERVAL) and
8411 ms.transferred_ram is not None):
8412 mem_progress = 100 * float(ms.transferred_ram) / float(ms.total_ram)
8413 self.feedback_fn("* memory transfer progress: %.2f %%" % mem_progress)
8414 last_feedback = time.time()
8416 time.sleep(self._MIGRATION_POLL_INTERVAL)
8418 result = self.rpc.call_instance_finalize_migration_src(source_node,
8422 msg = result.fail_msg
8424 logging.error("Instance migration succeeded, but finalization failed"
8425 " on the source node: %s", msg)
8426 raise errors.OpExecError("Could not finalize instance migration: %s" %
8429 instance.primary_node = target_node
8431 # distribute new instance config to the other nodes
8432 self.cfg.Update(instance, self.feedback_fn)
8434 result = self.rpc.call_instance_finalize_migration_dst(target_node,
8438 msg = result.fail_msg
8440 logging.error("Instance migration succeeded, but finalization failed"
8441 " on the target node: %s", msg)
8442 raise errors.OpExecError("Could not finalize instance migration: %s" %
8445 if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
8446 self._EnsureSecondary(source_node)
8447 self._WaitUntilSync()
8448 self._GoStandalone()
8449 self._GoReconnect(False)
8450 self._WaitUntilSync()
8452 # If the instance's disk template is `rbd' and there was a successful
8453 # migration, unmap the device from the source node.
8454 if self.instance.disk_template == constants.DT_RBD:
8455 disks = _ExpandCheckDisks(instance, instance.disks)
8456 self.feedback_fn("* unmapping instance's disks from %s" % source_node)
8458 result = self.rpc.call_blockdev_shutdown(source_node, disk)
8459 msg = result.fail_msg
8461 logging.error("Migration was successful, but couldn't unmap the"
8462 " block device %s on source node %s: %s",
8463 disk.iv_name, source_node, msg)
8464 logging.error("You need to unmap the device %s manually on %s",
8465 disk.iv_name, source_node)
8467 self.feedback_fn("* done")
8469 def _ExecFailover(self):
8470 """Failover an instance.
8472 The failover is done by shutting it down on its present node and
8473 starting it on the secondary.
8476 instance = self.instance
8477 primary_node = self.cfg.GetNodeInfo(instance.primary_node)
8479 source_node = instance.primary_node
8480 target_node = self.target_node
8482 if instance.admin_state == constants.ADMINST_UP:
8483 self.feedback_fn("* checking disk consistency between source and target")
8484 for (idx, dev) in enumerate(instance.disks):
8485 # for drbd, these are drbd over lvm
8486 if not _CheckDiskConsistency(self.lu, instance, dev, target_node,
8488 if primary_node.offline:
8489 self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
8491 (primary_node.name, idx, target_node))
8492 elif not self.ignore_consistency:
8493 raise errors.OpExecError("Disk %s is degraded on target node,"
8494 " aborting failover" % idx)
8496 self.feedback_fn("* not checking disk consistency as instance is not"
8499 self.feedback_fn("* shutting down instance on source node")
8500 logging.info("Shutting down instance %s on node %s",
8501 instance.name, source_node)
8503 result = self.rpc.call_instance_shutdown(source_node, instance,
8504 self.shutdown_timeout)
8505 msg = result.fail_msg
8507 if self.ignore_consistency or primary_node.offline:
8508 self.lu.LogWarning("Could not shutdown instance %s on node %s,"
8509 " proceeding anyway; please make sure node"
8510 " %s is down; error details: %s",
8511 instance.name, source_node, source_node, msg)
8513 raise errors.OpExecError("Could not shutdown instance %s on"
8515 (instance.name, source_node, msg))
8517 self.feedback_fn("* deactivating the instance's disks on source node")
8518 if not _ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
8519 raise errors.OpExecError("Can't shut down the instance's disks")
8521 instance.primary_node = target_node
8522 # distribute new instance config to the other nodes
8523 self.cfg.Update(instance, self.feedback_fn)
8525 # Only start the instance if it's marked as up
8526 if instance.admin_state == constants.ADMINST_UP:
8527 self.feedback_fn("* activating the instance's disks on target node %s" %
8529 logging.info("Starting instance %s on node %s",
8530 instance.name, target_node)
8532 disks_ok, _ = _AssembleInstanceDisks(self.lu, instance,
8533 ignore_secondaries=True)
8535 _ShutdownInstanceDisks(self.lu, instance)
8536 raise errors.OpExecError("Can't activate the instance's disks")
8538 self.feedback_fn("* starting the instance on the target node %s" %
8540 result = self.rpc.call_instance_start(target_node, (instance, None, None),
8542 msg = result.fail_msg
8544 _ShutdownInstanceDisks(self.lu, instance)
8545 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
8546 (instance.name, target_node, msg))
8548 def Exec(self, feedback_fn):
8549 """Perform the migration.
8552 self.feedback_fn = feedback_fn
8553 self.source_node = self.instance.primary_node
8555 # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
8556 if self.instance.disk_template in constants.DTS_INT_MIRROR:
8557 self.target_node = self.instance.secondary_nodes[0]
8558 # Otherwise self.target_node has been populated either
8559 # directly, or through an iallocator.
8561 self.all_nodes = [self.source_node, self.target_node]
8562 self.nodes_ip = dict((name, node.secondary_ip) for (name, node)
8563 in self.cfg.GetMultiNodeInfo(self.all_nodes))
8566 feedback_fn("Failover instance %s" % self.instance.name)
8567 self._ExecFailover()
8569 feedback_fn("Migrating instance %s" % self.instance.name)
8572 return self._ExecCleanup()
8574 return self._ExecMigration()
8577 def _CreateBlockDev(lu, node, instance, device, force_create,
8579 """Create a tree of block devices on a given node.
8581 If this device type has to be created on secondaries, create it and
8584 If not, just recurse to children keeping the same 'force' value.
8586 @param lu: the lu on whose behalf we execute
8587 @param node: the node on which to create the device
8588 @type instance: L{objects.Instance}
8589 @param instance: the instance which owns the device
8590 @type device: L{objects.Disk}
8591 @param device: the device to create
8592 @type force_create: boolean
8593 @param force_create: whether to force creation of this device; this
8594 will be change to True whenever we find a device which has
8595 CreateOnSecondary() attribute
8596 @param info: the extra 'metadata' we should attach to the device
8597 (this will be represented as a LVM tag)
8598 @type force_open: boolean
8599 @param force_open: this parameter will be passes to the
8600 L{backend.BlockdevCreate} function where it specifies
8601 whether we run on primary or not, and it affects both
8602 the child assembly and the device own Open() execution
8605 if device.CreateOnSecondary():
8609 for child in device.children:
8610 _CreateBlockDev(lu, node, instance, child, force_create,
8613 if not force_create:
8616 _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
8619 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
8620 """Create a single block device on a given node.
8622 This will not recurse over children of the device, so they must be
8625 @param lu: the lu on whose behalf we execute
8626 @param node: the node on which to create the device
8627 @type instance: L{objects.Instance}
8628 @param instance: the instance which owns the device
8629 @type device: L{objects.Disk}
8630 @param device: the device to create
8631 @param info: the extra 'metadata' we should attach to the device
8632 (this will be represented as a LVM tag)
8633 @type force_open: boolean
8634 @param force_open: this parameter will be passes to the
8635 L{backend.BlockdevCreate} function where it specifies
8636 whether we run on primary or not, and it affects both
8637 the child assembly and the device own Open() execution
8640 lu.cfg.SetDiskID(device, node)
8641 result = lu.rpc.call_blockdev_create(node, device, device.size,
8642 instance.name, force_open, info)
8643 result.Raise("Can't create block device %s on"
8644 " node %s for instance %s" % (device, node, instance.name))
8645 if device.physical_id is None:
8646 device.physical_id = result.payload
8649 def _GenerateUniqueNames(lu, exts):
8650 """Generate a suitable LV name.
8652 This will generate a logical volume name for the given instance.
8657 new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
8658 results.append("%s%s" % (new_id, val))
8662 def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
8663 iv_name, p_minor, s_minor):
8664 """Generate a drbd8 device complete with its children.
8667 assert len(vgnames) == len(names) == 2
8668 port = lu.cfg.AllocatePort()
8669 shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
8671 dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
8672 logical_id=(vgnames[0], names[0]),
8674 dev_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
8675 logical_id=(vgnames[1], names[1]),
8677 drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
8678 logical_id=(primary, secondary, port,
8681 children=[dev_data, dev_meta],
8682 iv_name=iv_name, params={})
8686 _DISK_TEMPLATE_NAME_PREFIX = {
8687 constants.DT_PLAIN: "",
8688 constants.DT_RBD: ".rbd",
8692 _DISK_TEMPLATE_DEVICE_TYPE = {
8693 constants.DT_PLAIN: constants.LD_LV,
8694 constants.DT_FILE: constants.LD_FILE,
8695 constants.DT_SHARED_FILE: constants.LD_FILE,
8696 constants.DT_BLOCK: constants.LD_BLOCKDEV,
8697 constants.DT_RBD: constants.LD_RBD,
8701 def _GenerateDiskTemplate(lu, template_name, instance_name, primary_node,
8702 secondary_nodes, disk_info, file_storage_dir, file_driver, base_index,
8703 feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage,
8704 _req_shr_file_storage=opcodes.RequireSharedFileStorage):
8705 """Generate the entire disk layout for a given template type.
8708 #TODO: compute space requirements
8710 vgname = lu.cfg.GetVGName()
8711 disk_count = len(disk_info)
8714 if template_name == constants.DT_DISKLESS:
8716 elif template_name == constants.DT_DRBD8:
8717 if len(secondary_nodes) != 1:
8718 raise errors.ProgrammerError("Wrong template configuration")
8719 remote_node = secondary_nodes[0]
8720 minors = lu.cfg.AllocateDRBDMinor(
8721 [primary_node, remote_node] * len(disk_info), instance_name)
8723 (drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
8725 drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
8728 for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
8729 for i in range(disk_count)]):
8730 names.append(lv_prefix + "_data")
8731 names.append(lv_prefix + "_meta")
8732 for idx, disk in enumerate(disk_info):
8733 disk_index = idx + base_index
8734 data_vg = disk.get(constants.IDISK_VG, vgname)
8735 meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
8736 disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
8737 disk[constants.IDISK_SIZE],
8739 names[idx * 2:idx * 2 + 2],
8740 "disk/%d" % disk_index,
8741 minors[idx * 2], minors[idx * 2 + 1])
8742 disk_dev.mode = disk[constants.IDISK_MODE]
8743 disks.append(disk_dev)
8746 raise errors.ProgrammerError("Wrong template configuration")
8748 if template_name == constants.DT_FILE:
8750 elif template_name == constants.DT_SHARED_FILE:
8751 _req_shr_file_storage()
8753 name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
8754 if name_prefix is None:
8757 names = _GenerateUniqueNames(lu, ["%s.disk%s" %
8758 (name_prefix, base_index + i)
8759 for i in range(disk_count)])
8761 if template_name == constants.DT_PLAIN:
8762 def logical_id_fn(idx, _, disk):
8763 vg = disk.get(constants.IDISK_VG, vgname)
8764 return (vg, names[idx])
8765 elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
8767 lambda _, disk_index, disk: (file_driver,
8768 "%s/disk%d" % (file_storage_dir,
8770 elif template_name == constants.DT_BLOCK:
8772 lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
8773 disk[constants.IDISK_ADOPT])
8774 elif template_name == constants.DT_RBD:
8775 logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
8777 raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
8779 dev_type = _DISK_TEMPLATE_DEVICE_TYPE[template_name]
8781 for idx, disk in enumerate(disk_info):
8782 disk_index = idx + base_index
8783 size = disk[constants.IDISK_SIZE]
8784 feedback_fn("* disk %s, size %s" %
8785 (disk_index, utils.FormatUnit(size, "h")))
8786 disks.append(objects.Disk(dev_type=dev_type, size=size,
8787 logical_id=logical_id_fn(idx, disk_index, disk),
8788 iv_name="disk/%d" % disk_index,
8789 mode=disk[constants.IDISK_MODE],
8795 def _GetInstanceInfoText(instance):
8796 """Compute that text that should be added to the disk's metadata.
8799 return "originstname+%s" % instance.name
8802 def _CalcEta(time_taken, written, total_size):
8803 """Calculates the ETA based on size written and total size.
8805 @param time_taken: The time taken so far
8806 @param written: amount written so far
8807 @param total_size: The total size of data to be written
8808 @return: The remaining time in seconds
8811 avg_time = time_taken / float(written)
8812 return (total_size - written) * avg_time
8815 def _WipeDisks(lu, instance):
8816 """Wipes instance disks.
8818 @type lu: L{LogicalUnit}
8819 @param lu: the logical unit on whose behalf we execute
8820 @type instance: L{objects.Instance}
8821 @param instance: the instance whose disks we should create
8822 @return: the success of the wipe
8825 node = instance.primary_node
8827 for device in instance.disks:
8828 lu.cfg.SetDiskID(device, node)
8830 logging.info("Pause sync of instance %s disks", instance.name)
8831 result = lu.rpc.call_blockdev_pause_resume_sync(node,
8832 (instance.disks, instance),
8835 for idx, success in enumerate(result.payload):
8837 logging.warn("pause-sync of instance %s for disks %d failed",
8841 for idx, device in enumerate(instance.disks):
8842 # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
8843 # MAX_WIPE_CHUNK at max
8844 wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
8845 constants.MIN_WIPE_CHUNK_PERCENT)
8846 # we _must_ make this an int, otherwise rounding errors will
8848 wipe_chunk_size = int(wipe_chunk_size)
8850 lu.LogInfo("* Wiping disk %d", idx)
8851 logging.info("Wiping disk %d for instance %s, node %s using"
8852 " chunk size %s", idx, instance.name, node, wipe_chunk_size)
8857 start_time = time.time()
8859 while offset < size:
8860 wipe_size = min(wipe_chunk_size, size - offset)
8861 logging.debug("Wiping disk %d, offset %s, chunk %s",
8862 idx, offset, wipe_size)
8863 result = lu.rpc.call_blockdev_wipe(node, (device, instance), offset,
8865 result.Raise("Could not wipe disk %d at offset %d for size %d" %
8866 (idx, offset, wipe_size))
8869 if now - last_output >= 60:
8870 eta = _CalcEta(now - start_time, offset, size)
8871 lu.LogInfo(" - done: %.1f%% ETA: %s" %
8872 (offset / float(size) * 100, utils.FormatSeconds(eta)))
8875 logging.info("Resume sync of instance %s disks", instance.name)
8877 result = lu.rpc.call_blockdev_pause_resume_sync(node,
8878 (instance.disks, instance),
8881 for idx, success in enumerate(result.payload):
8883 lu.LogWarning("Resume sync of disk %d failed, please have a"
8884 " look at the status and troubleshoot the issue", idx)
8885 logging.warn("resume-sync of instance %s for disks %d failed",
8889 def _CreateDisks(lu, instance, to_skip=None, target_node=None):
8890 """Create all disks for an instance.
8892 This abstracts away some work from AddInstance.
8894 @type lu: L{LogicalUnit}
8895 @param lu: the logical unit on whose behalf we execute
8896 @type instance: L{objects.Instance}
8897 @param instance: the instance whose disks we should create
8899 @param to_skip: list of indices to skip
8900 @type target_node: string
8901 @param target_node: if passed, overrides the target node for creation
8903 @return: the success of the creation
8906 info = _GetInstanceInfoText(instance)
8907 if target_node is None:
8908 pnode = instance.primary_node
8909 all_nodes = instance.all_nodes
8914 if instance.disk_template in constants.DTS_FILEBASED:
8915 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
8916 result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
8918 result.Raise("Failed to create directory '%s' on"
8919 " node %s" % (file_storage_dir, pnode))
8921 # Note: this needs to be kept in sync with adding of disks in
8922 # LUInstanceSetParams
8923 for idx, device in enumerate(instance.disks):
8924 if to_skip and idx in to_skip:
8926 logging.info("Creating disk %s for instance '%s'", idx, instance.name)
8928 for node in all_nodes:
8929 f_create = node == pnode
8930 _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
8933 def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
8934 """Remove all disks for an instance.
8936 This abstracts away some work from `AddInstance()` and
8937 `RemoveInstance()`. Note that in case some of the devices couldn't
8938 be removed, the removal will continue with the other ones (compare
8939 with `_CreateDisks()`).
8941 @type lu: L{LogicalUnit}
8942 @param lu: the logical unit on whose behalf we execute
8943 @type instance: L{objects.Instance}
8944 @param instance: the instance whose disks we should remove
8945 @type target_node: string
8946 @param target_node: used to override the node on which to remove the disks
8948 @return: the success of the removal
8951 logging.info("Removing block devices for instance %s", instance.name)
8954 ports_to_release = set()
8955 for (idx, device) in enumerate(instance.disks):
8957 edata = [(target_node, device)]
8959 edata = device.ComputeNodeTree(instance.primary_node)
8960 for node, disk in edata:
8961 lu.cfg.SetDiskID(disk, node)
8962 msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
8964 lu.LogWarning("Could not remove disk %s on node %s,"
8965 " continuing anyway: %s", idx, node, msg)
8968 # if this is a DRBD disk, return its port to the pool
8969 if device.dev_type in constants.LDS_DRBD:
8970 ports_to_release.add(device.logical_id[2])
8972 if all_result or ignore_failures:
8973 for port in ports_to_release:
8974 lu.cfg.AddTcpUdpPort(port)
8976 if instance.disk_template == constants.DT_FILE:
8977 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
8981 tgt = instance.primary_node
8982 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
8984 lu.LogWarning("Could not remove directory '%s' on node %s: %s",
8985 file_storage_dir, instance.primary_node, result.fail_msg)
8991 def _ComputeDiskSizePerVG(disk_template, disks):
8992 """Compute disk size requirements in the volume group
8995 def _compute(disks, payload):
8996 """Universal algorithm.
9001 vgs[disk[constants.IDISK_VG]] = \
9002 vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
9006 # Required free disk space as a function of disk and swap space
9008 constants.DT_DISKLESS: {},
9009 constants.DT_PLAIN: _compute(disks, 0),
9010 # 128 MB are added for drbd metadata for each disk
9011 constants.DT_DRBD8: _compute(disks, DRBD_META_SIZE),
9012 constants.DT_FILE: {},
9013 constants.DT_SHARED_FILE: {},
9016 if disk_template not in req_size_dict:
9017 raise errors.ProgrammerError("Disk template '%s' size requirement"
9018 " is unknown" % disk_template)
9020 return req_size_dict[disk_template]
9023 def _ComputeDiskSize(disk_template, disks):
9024 """Compute disk size requirements in the volume group
9027 # Required free disk space as a function of disk and swap space
9029 constants.DT_DISKLESS: None,
9030 constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks),
9031 # 128 MB are added for drbd metadata for each disk
9033 sum(d[constants.IDISK_SIZE] + DRBD_META_SIZE for d in disks),
9034 constants.DT_FILE: None,
9035 constants.DT_SHARED_FILE: 0,
9036 constants.DT_BLOCK: 0,
9037 constants.DT_RBD: 0,
9040 if disk_template not in req_size_dict:
9041 raise errors.ProgrammerError("Disk template '%s' size requirement"
9042 " is unknown" % disk_template)
9044 return req_size_dict[disk_template]
9047 def _FilterVmNodes(lu, nodenames):
9048 """Filters out non-vm_capable nodes from a list.
9050 @type lu: L{LogicalUnit}
9051 @param lu: the logical unit for which we check
9052 @type nodenames: list
9053 @param nodenames: the list of nodes on which we should check
9055 @return: the list of vm-capable nodes
9058 vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
9059 return [name for name in nodenames if name not in vm_nodes]
9062 def _CheckHVParams(lu, nodenames, hvname, hvparams):
9063 """Hypervisor parameter validation.
9065 This function abstract the hypervisor parameter validation to be
9066 used in both instance create and instance modify.
9068 @type lu: L{LogicalUnit}
9069 @param lu: the logical unit for which we check
9070 @type nodenames: list
9071 @param nodenames: the list of nodes on which we should check
9072 @type hvname: string
9073 @param hvname: the name of the hypervisor we should use
9074 @type hvparams: dict
9075 @param hvparams: the parameters which we need to check
9076 @raise errors.OpPrereqError: if the parameters are not valid
9079 nodenames = _FilterVmNodes(lu, nodenames)
9081 cluster = lu.cfg.GetClusterInfo()
9082 hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
9084 hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames, hvname, hvfull)
9085 for node in nodenames:
9089 info.Raise("Hypervisor parameter validation failed on node %s" % node)
9092 def _CheckOSParams(lu, required, nodenames, osname, osparams):
9093 """OS parameters validation.
9095 @type lu: L{LogicalUnit}
9096 @param lu: the logical unit for which we check
9097 @type required: boolean
9098 @param required: whether the validation should fail if the OS is not
9100 @type nodenames: list
9101 @param nodenames: the list of nodes on which we should check
9102 @type osname: string
9103 @param osname: the name of the hypervisor we should use
9104 @type osparams: dict
9105 @param osparams: the parameters which we need to check
9106 @raise errors.OpPrereqError: if the parameters are not valid
9109 nodenames = _FilterVmNodes(lu, nodenames)
9110 result = lu.rpc.call_os_validate(nodenames, required, osname,
9111 [constants.OS_VALIDATE_PARAMETERS],
9113 for node, nres in result.items():
9114 # we don't check for offline cases since this should be run only
9115 # against the master node and/or an instance's nodes
9116 nres.Raise("OS Parameters validation failed on node %s" % node)
9117 if not nres.payload:
9118 lu.LogInfo("OS %s not found on node %s, validation skipped",
9122 class LUInstanceCreate(LogicalUnit):
9123 """Create an instance.
9126 HPATH = "instance-add"
9127 HTYPE = constants.HTYPE_INSTANCE
9130 def CheckArguments(self):
9134 # do not require name_check to ease forward/backward compatibility
9136 if self.op.no_install and self.op.start:
9137 self.LogInfo("No-installation mode selected, disabling startup")
9138 self.op.start = False
9139 # validate/normalize the instance name
9140 self.op.instance_name = \
9141 netutils.Hostname.GetNormalizedName(self.op.instance_name)
9143 if self.op.ip_check and not self.op.name_check:
9144 # TODO: make the ip check more flexible and not depend on the name check
9145 raise errors.OpPrereqError("Cannot do IP address check without a name"
9146 " check", errors.ECODE_INVAL)
9148 # check nics' parameter names
9149 for nic in self.op.nics:
9150 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
9152 # check disks. parameter names and consistent adopt/no-adopt strategy
9153 has_adopt = has_no_adopt = False
9154 for disk in self.op.disks:
9155 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
9156 if constants.IDISK_ADOPT in disk:
9160 if has_adopt and has_no_adopt:
9161 raise errors.OpPrereqError("Either all disks are adopted or none is",
9164 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
9165 raise errors.OpPrereqError("Disk adoption is not supported for the"
9166 " '%s' disk template" %
9167 self.op.disk_template,
9169 if self.op.iallocator is not None:
9170 raise errors.OpPrereqError("Disk adoption not allowed with an"
9171 " iallocator script", errors.ECODE_INVAL)
9172 if self.op.mode == constants.INSTANCE_IMPORT:
9173 raise errors.OpPrereqError("Disk adoption not allowed for"
9174 " instance import", errors.ECODE_INVAL)
9176 if self.op.disk_template in constants.DTS_MUST_ADOPT:
9177 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
9178 " but no 'adopt' parameter given" %
9179 self.op.disk_template,
9182 self.adopt_disks = has_adopt
9184 # instance name verification
9185 if self.op.name_check:
9186 self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
9187 self.op.instance_name = self.hostname1.name
9188 # used in CheckPrereq for ip ping check
9189 self.check_ip = self.hostname1.ip
9191 self.check_ip = None
9193 # file storage checks
9194 if (self.op.file_driver and
9195 not self.op.file_driver in constants.FILE_DRIVER):
9196 raise errors.OpPrereqError("Invalid file driver name '%s'" %
9197 self.op.file_driver, errors.ECODE_INVAL)
9199 if self.op.disk_template == constants.DT_FILE:
9200 opcodes.RequireFileStorage()
9201 elif self.op.disk_template == constants.DT_SHARED_FILE:
9202 opcodes.RequireSharedFileStorage()
9204 ### Node/iallocator related checks
9205 _CheckIAllocatorOrNode(self, "iallocator", "pnode")
9207 if self.op.pnode is not None:
9208 if self.op.disk_template in constants.DTS_INT_MIRROR:
9209 if self.op.snode is None:
9210 raise errors.OpPrereqError("The networked disk templates need"
9211 " a mirror node", errors.ECODE_INVAL)
9213 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
9215 self.op.snode = None
9217 self._cds = _GetClusterDomainSecret()
9219 if self.op.mode == constants.INSTANCE_IMPORT:
9220 # On import force_variant must be True, because if we forced it at
9221 # initial install, our only chance when importing it back is that it
9223 self.op.force_variant = True
9225 if self.op.no_install:
9226 self.LogInfo("No-installation mode has no effect during import")
9228 elif self.op.mode == constants.INSTANCE_CREATE:
9229 if self.op.os_type is None:
9230 raise errors.OpPrereqError("No guest OS specified",
9232 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
9233 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
9234 " installation" % self.op.os_type,
9236 if self.op.disk_template is None:
9237 raise errors.OpPrereqError("No disk template specified",
9240 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
9241 # Check handshake to ensure both clusters have the same domain secret
9242 src_handshake = self.op.source_handshake
9243 if not src_handshake:
9244 raise errors.OpPrereqError("Missing source handshake",
9247 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
9250 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
9253 # Load and check source CA
9254 self.source_x509_ca_pem = self.op.source_x509_ca
9255 if not self.source_x509_ca_pem:
9256 raise errors.OpPrereqError("Missing source X509 CA",
9260 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
9262 except OpenSSL.crypto.Error, err:
9263 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
9264 (err, ), errors.ECODE_INVAL)
9266 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
9267 if errcode is not None:
9268 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
9271 self.source_x509_ca = cert
9273 src_instance_name = self.op.source_instance_name
9274 if not src_instance_name:
9275 raise errors.OpPrereqError("Missing source instance name",
9278 self.source_instance_name = \
9279 netutils.GetHostname(name=src_instance_name).name
9282 raise errors.OpPrereqError("Invalid instance creation mode %r" %
9283 self.op.mode, errors.ECODE_INVAL)
9285 def ExpandNames(self):
9286 """ExpandNames for CreateInstance.
9288 Figure out the right locks for instance creation.
9291 self.needed_locks = {}
9293 instance_name = self.op.instance_name
9294 # this is just a preventive check, but someone might still add this
9295 # instance in the meantime, and creation will fail at lock-add time
9296 if instance_name in self.cfg.GetInstanceList():
9297 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
9298 instance_name, errors.ECODE_EXISTS)
9300 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
9302 if self.op.iallocator:
9303 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
9304 # specifying a group on instance creation and then selecting nodes from
9306 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9307 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
9309 self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
9310 nodelist = [self.op.pnode]
9311 if self.op.snode is not None:
9312 self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
9313 nodelist.append(self.op.snode)
9314 self.needed_locks[locking.LEVEL_NODE] = nodelist
9315 # Lock resources of instance's primary and secondary nodes (copy to
9316 # prevent accidential modification)
9317 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodelist)
9319 # in case of import lock the source node too
9320 if self.op.mode == constants.INSTANCE_IMPORT:
9321 src_node = self.op.src_node
9322 src_path = self.op.src_path
9324 if src_path is None:
9325 self.op.src_path = src_path = self.op.instance_name
9327 if src_node is None:
9328 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9329 self.op.src_node = None
9330 if os.path.isabs(src_path):
9331 raise errors.OpPrereqError("Importing an instance from a path"
9332 " requires a source node option",
9335 self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
9336 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
9337 self.needed_locks[locking.LEVEL_NODE].append(src_node)
9338 if not os.path.isabs(src_path):
9339 self.op.src_path = src_path = \
9340 utils.PathJoin(constants.EXPORT_DIR, src_path)
9342 def _RunAllocator(self):
9343 """Run the allocator based on input opcode.
9346 nics = [n.ToDict() for n in self.nics]
9347 ial = IAllocator(self.cfg, self.rpc,
9348 mode=constants.IALLOCATOR_MODE_ALLOC,
9349 name=self.op.instance_name,
9350 disk_template=self.op.disk_template,
9353 vcpus=self.be_full[constants.BE_VCPUS],
9354 memory=self.be_full[constants.BE_MAXMEM],
9355 spindle_use=self.be_full[constants.BE_SPINDLE_USE],
9358 hypervisor=self.op.hypervisor,
9361 ial.Run(self.op.iallocator)
9364 raise errors.OpPrereqError("Can't compute nodes using"
9365 " iallocator '%s': %s" %
9366 (self.op.iallocator, ial.info),
9368 if len(ial.result) != ial.required_nodes:
9369 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
9370 " of nodes (%s), required %s" %
9371 (self.op.iallocator, len(ial.result),
9372 ial.required_nodes), errors.ECODE_FAULT)
9373 self.op.pnode = ial.result[0]
9374 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
9375 self.op.instance_name, self.op.iallocator,
9376 utils.CommaJoin(ial.result))
9377 if ial.required_nodes == 2:
9378 self.op.snode = ial.result[1]
9380 def BuildHooksEnv(self):
9383 This runs on master, primary and secondary nodes of the instance.
9387 "ADD_MODE": self.op.mode,
9389 if self.op.mode == constants.INSTANCE_IMPORT:
9390 env["SRC_NODE"] = self.op.src_node
9391 env["SRC_PATH"] = self.op.src_path
9392 env["SRC_IMAGES"] = self.src_images
9394 env.update(_BuildInstanceHookEnv(
9395 name=self.op.instance_name,
9396 primary_node=self.op.pnode,
9397 secondary_nodes=self.secondaries,
9398 status=self.op.start,
9399 os_type=self.op.os_type,
9400 minmem=self.be_full[constants.BE_MINMEM],
9401 maxmem=self.be_full[constants.BE_MAXMEM],
9402 vcpus=self.be_full[constants.BE_VCPUS],
9403 nics=_NICListToTuple(self, self.nics),
9404 disk_template=self.op.disk_template,
9405 disks=[(d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
9406 for d in self.disks],
9409 hypervisor_name=self.op.hypervisor,
9415 def BuildHooksNodes(self):
9416 """Build hooks nodes.
9419 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
9422 def _ReadExportInfo(self):
9423 """Reads the export information from disk.
9425 It will override the opcode source node and path with the actual
9426 information, if these two were not specified before.
9428 @return: the export information
9431 assert self.op.mode == constants.INSTANCE_IMPORT
9433 src_node = self.op.src_node
9434 src_path = self.op.src_path
9436 if src_node is None:
9437 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
9438 exp_list = self.rpc.call_export_list(locked_nodes)
9440 for node in exp_list:
9441 if exp_list[node].fail_msg:
9443 if src_path in exp_list[node].payload:
9445 self.op.src_node = src_node = node
9446 self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
9450 raise errors.OpPrereqError("No export found for relative path %s" %
9451 src_path, errors.ECODE_INVAL)
9453 _CheckNodeOnline(self, src_node)
9454 result = self.rpc.call_export_info(src_node, src_path)
9455 result.Raise("No export or invalid export found in dir %s" % src_path)
9457 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
9458 if not export_info.has_section(constants.INISECT_EXP):
9459 raise errors.ProgrammerError("Corrupted export config",
9460 errors.ECODE_ENVIRON)
9462 ei_version = export_info.get(constants.INISECT_EXP, "version")
9463 if (int(ei_version) != constants.EXPORT_VERSION):
9464 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
9465 (ei_version, constants.EXPORT_VERSION),
9466 errors.ECODE_ENVIRON)
9469 def _ReadExportParams(self, einfo):
9470 """Use export parameters as defaults.
9472 In case the opcode doesn't specify (as in override) some instance
9473 parameters, then try to use them from the export information, if
9477 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
9479 if self.op.disk_template is None:
9480 if einfo.has_option(constants.INISECT_INS, "disk_template"):
9481 self.op.disk_template = einfo.get(constants.INISECT_INS,
9483 if self.op.disk_template not in constants.DISK_TEMPLATES:
9484 raise errors.OpPrereqError("Disk template specified in configuration"
9485 " file is not one of the allowed values:"
9486 " %s" % " ".join(constants.DISK_TEMPLATES))
9488 raise errors.OpPrereqError("No disk template specified and the export"
9489 " is missing the disk_template information",
9492 if not self.op.disks:
9494 # TODO: import the disk iv_name too
9495 for idx in range(constants.MAX_DISKS):
9496 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
9497 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
9498 disks.append({constants.IDISK_SIZE: disk_sz})
9499 self.op.disks = disks
9500 if not disks and self.op.disk_template != constants.DT_DISKLESS:
9501 raise errors.OpPrereqError("No disk info specified and the export"
9502 " is missing the disk information",
9505 if not self.op.nics:
9507 for idx in range(constants.MAX_NICS):
9508 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
9510 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
9511 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
9518 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
9519 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
9521 if (self.op.hypervisor is None and
9522 einfo.has_option(constants.INISECT_INS, "hypervisor")):
9523 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
9525 if einfo.has_section(constants.INISECT_HYP):
9526 # use the export parameters but do not override the ones
9527 # specified by the user
9528 for name, value in einfo.items(constants.INISECT_HYP):
9529 if name not in self.op.hvparams:
9530 self.op.hvparams[name] = value
9532 if einfo.has_section(constants.INISECT_BEP):
9533 # use the parameters, without overriding
9534 for name, value in einfo.items(constants.INISECT_BEP):
9535 if name not in self.op.beparams:
9536 self.op.beparams[name] = value
9537 # Compatibility for the old "memory" be param
9538 if name == constants.BE_MEMORY:
9539 if constants.BE_MAXMEM not in self.op.beparams:
9540 self.op.beparams[constants.BE_MAXMEM] = value
9541 if constants.BE_MINMEM not in self.op.beparams:
9542 self.op.beparams[constants.BE_MINMEM] = value
9544 # try to read the parameters old style, from the main section
9545 for name in constants.BES_PARAMETERS:
9546 if (name not in self.op.beparams and
9547 einfo.has_option(constants.INISECT_INS, name)):
9548 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
9550 if einfo.has_section(constants.INISECT_OSP):
9551 # use the parameters, without overriding
9552 for name, value in einfo.items(constants.INISECT_OSP):
9553 if name not in self.op.osparams:
9554 self.op.osparams[name] = value
9556 def _RevertToDefaults(self, cluster):
9557 """Revert the instance parameters to the default values.
9561 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
9562 for name in self.op.hvparams.keys():
9563 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
9564 del self.op.hvparams[name]
9566 be_defs = cluster.SimpleFillBE({})
9567 for name in self.op.beparams.keys():
9568 if name in be_defs and be_defs[name] == self.op.beparams[name]:
9569 del self.op.beparams[name]
9571 nic_defs = cluster.SimpleFillNIC({})
9572 for nic in self.op.nics:
9573 for name in constants.NICS_PARAMETERS:
9574 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
9577 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
9578 for name in self.op.osparams.keys():
9579 if name in os_defs and os_defs[name] == self.op.osparams[name]:
9580 del self.op.osparams[name]
9582 def _CalculateFileStorageDir(self):
9583 """Calculate final instance file storage dir.
9586 # file storage dir calculation/check
9587 self.instance_file_storage_dir = None
9588 if self.op.disk_template in constants.DTS_FILEBASED:
9589 # build the full file storage dir path
9592 if self.op.disk_template == constants.DT_SHARED_FILE:
9593 get_fsd_fn = self.cfg.GetSharedFileStorageDir
9595 get_fsd_fn = self.cfg.GetFileStorageDir
9597 cfg_storagedir = get_fsd_fn()
9598 if not cfg_storagedir:
9599 raise errors.OpPrereqError("Cluster file storage dir not defined")
9600 joinargs.append(cfg_storagedir)
9602 if self.op.file_storage_dir is not None:
9603 joinargs.append(self.op.file_storage_dir)
9605 joinargs.append(self.op.instance_name)
9607 # pylint: disable=W0142
9608 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
9610 def CheckPrereq(self): # pylint: disable=R0914
9611 """Check prerequisites.
9614 self._CalculateFileStorageDir()
9616 if self.op.mode == constants.INSTANCE_IMPORT:
9617 export_info = self._ReadExportInfo()
9618 self._ReadExportParams(export_info)
9620 if (not self.cfg.GetVGName() and
9621 self.op.disk_template not in constants.DTS_NOT_LVM):
9622 raise errors.OpPrereqError("Cluster does not support lvm-based"
9623 " instances", errors.ECODE_STATE)
9625 if (self.op.hypervisor is None or
9626 self.op.hypervisor == constants.VALUE_AUTO):
9627 self.op.hypervisor = self.cfg.GetHypervisorType()
9629 cluster = self.cfg.GetClusterInfo()
9630 enabled_hvs = cluster.enabled_hypervisors
9631 if self.op.hypervisor not in enabled_hvs:
9632 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
9633 " cluster (%s)" % (self.op.hypervisor,
9634 ",".join(enabled_hvs)),
9637 # Check tag validity
9638 for tag in self.op.tags:
9639 objects.TaggableObject.ValidateTag(tag)
9641 # check hypervisor parameter syntax (locally)
9642 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
9643 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
9645 hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
9646 hv_type.CheckParameterSyntax(filled_hvp)
9647 self.hv_full = filled_hvp
9648 # check that we don't specify global parameters on an instance
9649 _CheckGlobalHvParams(self.op.hvparams)
9651 # fill and remember the beparams dict
9652 default_beparams = cluster.beparams[constants.PP_DEFAULT]
9653 for param, value in self.op.beparams.iteritems():
9654 if value == constants.VALUE_AUTO:
9655 self.op.beparams[param] = default_beparams[param]
9656 objects.UpgradeBeParams(self.op.beparams)
9657 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
9658 self.be_full = cluster.SimpleFillBE(self.op.beparams)
9660 # build os parameters
9661 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
9663 # now that hvp/bep are in final format, let's reset to defaults,
9665 if self.op.identify_defaults:
9666 self._RevertToDefaults(cluster)
9670 for idx, nic in enumerate(self.op.nics):
9671 nic_mode_req = nic.get(constants.INIC_MODE, None)
9672 nic_mode = nic_mode_req
9673 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
9674 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
9676 # in routed mode, for the first nic, the default ip is 'auto'
9677 if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
9678 default_ip_mode = constants.VALUE_AUTO
9680 default_ip_mode = constants.VALUE_NONE
9682 # ip validity checks
9683 ip = nic.get(constants.INIC_IP, default_ip_mode)
9684 if ip is None or ip.lower() == constants.VALUE_NONE:
9686 elif ip.lower() == constants.VALUE_AUTO:
9687 if not self.op.name_check:
9688 raise errors.OpPrereqError("IP address set to auto but name checks"
9689 " have been skipped",
9691 nic_ip = self.hostname1.ip
9693 if not netutils.IPAddress.IsValid(ip):
9694 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
9698 # TODO: check the ip address for uniqueness
9699 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
9700 raise errors.OpPrereqError("Routed nic mode requires an ip address",
9703 # MAC address verification
9704 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
9705 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9706 mac = utils.NormalizeAndValidateMac(mac)
9709 self.cfg.ReserveMAC(mac, self.proc.GetECId())
9710 except errors.ReservationError:
9711 raise errors.OpPrereqError("MAC address %s already in use"
9712 " in cluster" % mac,
9713 errors.ECODE_NOTUNIQUE)
9715 # Build nic parameters
9716 link = nic.get(constants.INIC_LINK, None)
9717 if link == constants.VALUE_AUTO:
9718 link = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_LINK]
9721 nicparams[constants.NIC_MODE] = nic_mode
9723 nicparams[constants.NIC_LINK] = link
9725 check_params = cluster.SimpleFillNIC(nicparams)
9726 objects.NIC.CheckParameterSyntax(check_params)
9727 self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
9729 # disk checks/pre-build
9730 default_vg = self.cfg.GetVGName()
9732 for disk in self.op.disks:
9733 mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
9734 if mode not in constants.DISK_ACCESS_SET:
9735 raise errors.OpPrereqError("Invalid disk access mode '%s'" %
9736 mode, errors.ECODE_INVAL)
9737 size = disk.get(constants.IDISK_SIZE, None)
9739 raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
9742 except (TypeError, ValueError):
9743 raise errors.OpPrereqError("Invalid disk size '%s'" % size,
9746 data_vg = disk.get(constants.IDISK_VG, default_vg)
9748 constants.IDISK_SIZE: size,
9749 constants.IDISK_MODE: mode,
9750 constants.IDISK_VG: data_vg,
9752 if constants.IDISK_METAVG in disk:
9753 new_disk[constants.IDISK_METAVG] = disk[constants.IDISK_METAVG]
9754 if constants.IDISK_ADOPT in disk:
9755 new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
9756 self.disks.append(new_disk)
9758 if self.op.mode == constants.INSTANCE_IMPORT:
9760 for idx in range(len(self.disks)):
9761 option = "disk%d_dump" % idx
9762 if export_info.has_option(constants.INISECT_INS, option):
9763 # FIXME: are the old os-es, disk sizes, etc. useful?
9764 export_name = export_info.get(constants.INISECT_INS, option)
9765 image = utils.PathJoin(self.op.src_path, export_name)
9766 disk_images.append(image)
9768 disk_images.append(False)
9770 self.src_images = disk_images
9772 old_name = export_info.get(constants.INISECT_INS, "name")
9773 if self.op.instance_name == old_name:
9774 for idx, nic in enumerate(self.nics):
9775 if nic.mac == constants.VALUE_AUTO:
9776 nic_mac_ini = "nic%d_mac" % idx
9777 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
9779 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
9781 # ip ping checks (we use the same ip that was resolved in ExpandNames)
9782 if self.op.ip_check:
9783 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
9784 raise errors.OpPrereqError("IP %s of instance %s already in use" %
9785 (self.check_ip, self.op.instance_name),
9786 errors.ECODE_NOTUNIQUE)
9788 #### mac address generation
9789 # By generating here the mac address both the allocator and the hooks get
9790 # the real final mac address rather than the 'auto' or 'generate' value.
9791 # There is a race condition between the generation and the instance object
9792 # creation, which means that we know the mac is valid now, but we're not
9793 # sure it will be when we actually add the instance. If things go bad
9794 # adding the instance will abort because of a duplicate mac, and the
9795 # creation job will fail.
9796 for nic in self.nics:
9797 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9798 nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
9802 if self.op.iallocator is not None:
9803 self._RunAllocator()
9805 # Release all unneeded node locks
9806 _ReleaseLocks(self, locking.LEVEL_NODE,
9807 keep=filter(None, [self.op.pnode, self.op.snode,
9809 _ReleaseLocks(self, locking.LEVEL_NODE_RES,
9810 keep=filter(None, [self.op.pnode, self.op.snode,
9813 #### node related checks
9815 # check primary node
9816 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
9817 assert self.pnode is not None, \
9818 "Cannot retrieve locked node %s" % self.op.pnode
9820 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
9821 pnode.name, errors.ECODE_STATE)
9823 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
9824 pnode.name, errors.ECODE_STATE)
9825 if not pnode.vm_capable:
9826 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
9827 " '%s'" % pnode.name, errors.ECODE_STATE)
9829 self.secondaries = []
9831 # mirror node verification
9832 if self.op.disk_template in constants.DTS_INT_MIRROR:
9833 if self.op.snode == pnode.name:
9834 raise errors.OpPrereqError("The secondary node cannot be the"
9835 " primary node", errors.ECODE_INVAL)
9836 _CheckNodeOnline(self, self.op.snode)
9837 _CheckNodeNotDrained(self, self.op.snode)
9838 _CheckNodeVmCapable(self, self.op.snode)
9839 self.secondaries.append(self.op.snode)
9841 snode = self.cfg.GetNodeInfo(self.op.snode)
9842 if pnode.group != snode.group:
9843 self.LogWarning("The primary and secondary nodes are in two"
9844 " different node groups; the disk parameters"
9845 " from the first disk's node group will be"
9848 nodenames = [pnode.name] + self.secondaries
9850 # Verify instance specs
9851 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
9853 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
9854 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
9855 constants.ISPEC_DISK_COUNT: len(self.disks),
9856 constants.ISPEC_DISK_SIZE: [disk["size"] for disk in self.disks],
9857 constants.ISPEC_NIC_COUNT: len(self.nics),
9858 constants.ISPEC_SPINDLE_USE: spindle_use,
9861 group_info = self.cfg.GetNodeGroup(pnode.group)
9862 ipolicy = _CalculateGroupIPolicy(cluster, group_info)
9863 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
9864 if not self.op.ignore_ipolicy and res:
9865 raise errors.OpPrereqError(("Instance allocation to group %s violates"
9866 " policy: %s") % (pnode.group,
9867 utils.CommaJoin(res)),
9870 if not self.adopt_disks:
9871 if self.op.disk_template == constants.DT_RBD:
9872 # _CheckRADOSFreeSpace() is just a placeholder.
9873 # Any function that checks prerequisites can be placed here.
9874 # Check if there is enough space on the RADOS cluster.
9875 _CheckRADOSFreeSpace()
9877 # Check lv size requirements, if not adopting
9878 req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
9879 _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
9881 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
9882 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
9883 disk[constants.IDISK_ADOPT])
9884 for disk in self.disks])
9885 if len(all_lvs) != len(self.disks):
9886 raise errors.OpPrereqError("Duplicate volume names given for adoption",
9888 for lv_name in all_lvs:
9890 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
9891 # to ReserveLV uses the same syntax
9892 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
9893 except errors.ReservationError:
9894 raise errors.OpPrereqError("LV named %s used by another instance" %
9895 lv_name, errors.ECODE_NOTUNIQUE)
9897 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
9898 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
9900 node_lvs = self.rpc.call_lv_list([pnode.name],
9901 vg_names.payload.keys())[pnode.name]
9902 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
9903 node_lvs = node_lvs.payload
9905 delta = all_lvs.difference(node_lvs.keys())
9907 raise errors.OpPrereqError("Missing logical volume(s): %s" %
9908 utils.CommaJoin(delta),
9910 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
9912 raise errors.OpPrereqError("Online logical volumes found, cannot"
9913 " adopt: %s" % utils.CommaJoin(online_lvs),
9915 # update the size of disk based on what is found
9916 for dsk in self.disks:
9917 dsk[constants.IDISK_SIZE] = \
9918 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
9919 dsk[constants.IDISK_ADOPT])][0]))
9921 elif self.op.disk_template == constants.DT_BLOCK:
9922 # Normalize and de-duplicate device paths
9923 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
9924 for disk in self.disks])
9925 if len(all_disks) != len(self.disks):
9926 raise errors.OpPrereqError("Duplicate disk names given for adoption",
9928 baddisks = [d for d in all_disks
9929 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
9931 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
9932 " cannot be adopted" %
9933 (", ".join(baddisks),
9934 constants.ADOPTABLE_BLOCKDEV_ROOT),
9937 node_disks = self.rpc.call_bdev_sizes([pnode.name],
9938 list(all_disks))[pnode.name]
9939 node_disks.Raise("Cannot get block device information from node %s" %
9941 node_disks = node_disks.payload
9942 delta = all_disks.difference(node_disks.keys())
9944 raise errors.OpPrereqError("Missing block device(s): %s" %
9945 utils.CommaJoin(delta),
9947 for dsk in self.disks:
9948 dsk[constants.IDISK_SIZE] = \
9949 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
9951 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
9953 _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
9954 # check OS parameters (remotely)
9955 _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
9957 _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
9959 # memory check on primary node
9960 #TODO(dynmem): use MINMEM for checking
9962 _CheckNodeFreeMemory(self, self.pnode.name,
9963 "creating instance %s" % self.op.instance_name,
9964 self.be_full[constants.BE_MAXMEM],
9967 self.dry_run_result = list(nodenames)
9969 def Exec(self, feedback_fn):
9970 """Create and add the instance to the cluster.
9973 instance = self.op.instance_name
9974 pnode_name = self.pnode.name
9976 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
9977 self.owned_locks(locking.LEVEL_NODE)), \
9978 "Node locks differ from node resource locks"
9980 ht_kind = self.op.hypervisor
9981 if ht_kind in constants.HTS_REQ_PORT:
9982 network_port = self.cfg.AllocatePort()
9986 # This is ugly but we got a chicken-egg problem here
9987 # We can only take the group disk parameters, as the instance
9988 # has no disks yet (we are generating them right here).
9989 node = self.cfg.GetNodeInfo(pnode_name)
9990 nodegroup = self.cfg.GetNodeGroup(node.group)
9991 disks = _GenerateDiskTemplate(self,
9992 self.op.disk_template,
9993 instance, pnode_name,
9996 self.instance_file_storage_dir,
9997 self.op.file_driver,
10000 self.cfg.GetGroupDiskParams(nodegroup))
10002 iobj = objects.Instance(name=instance, os=self.op.os_type,
10003 primary_node=pnode_name,
10004 nics=self.nics, disks=disks,
10005 disk_template=self.op.disk_template,
10006 admin_state=constants.ADMINST_DOWN,
10007 network_port=network_port,
10008 beparams=self.op.beparams,
10009 hvparams=self.op.hvparams,
10010 hypervisor=self.op.hypervisor,
10011 osparams=self.op.osparams,
10015 for tag in self.op.tags:
10018 if self.adopt_disks:
10019 if self.op.disk_template == constants.DT_PLAIN:
10020 # rename LVs to the newly-generated names; we need to construct
10021 # 'fake' LV disks with the old data, plus the new unique_id
10022 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
10024 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
10025 rename_to.append(t_dsk.logical_id)
10026 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
10027 self.cfg.SetDiskID(t_dsk, pnode_name)
10028 result = self.rpc.call_blockdev_rename(pnode_name,
10029 zip(tmp_disks, rename_to))
10030 result.Raise("Failed to rename adoped LVs")
10032 feedback_fn("* creating instance disks...")
10034 _CreateDisks(self, iobj)
10035 except errors.OpExecError:
10036 self.LogWarning("Device creation failed, reverting...")
10038 _RemoveDisks(self, iobj)
10040 self.cfg.ReleaseDRBDMinors(instance)
10043 feedback_fn("adding instance %s to cluster config" % instance)
10045 self.cfg.AddInstance(iobj, self.proc.GetECId())
10047 # Declare that we don't want to remove the instance lock anymore, as we've
10048 # added the instance to the config
10049 del self.remove_locks[locking.LEVEL_INSTANCE]
10051 if self.op.mode == constants.INSTANCE_IMPORT:
10052 # Release unused nodes
10053 _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
10055 # Release all nodes
10056 _ReleaseLocks(self, locking.LEVEL_NODE)
10059 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
10060 feedback_fn("* wiping instance disks...")
10062 _WipeDisks(self, iobj)
10063 except errors.OpExecError, err:
10064 logging.exception("Wiping disks failed")
10065 self.LogWarning("Wiping instance disks failed (%s)", err)
10069 # Something is already wrong with the disks, don't do anything else
10071 elif self.op.wait_for_sync:
10072 disk_abort = not _WaitForSync(self, iobj)
10073 elif iobj.disk_template in constants.DTS_INT_MIRROR:
10074 # make sure the disks are not degraded (still sync-ing is ok)
10075 feedback_fn("* checking mirrors status")
10076 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
10081 _RemoveDisks(self, iobj)
10082 self.cfg.RemoveInstance(iobj.name)
10083 # Make sure the instance lock gets removed
10084 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
10085 raise errors.OpExecError("There are some degraded disks for"
10088 # Release all node resource locks
10089 _ReleaseLocks(self, locking.LEVEL_NODE_RES)
10091 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
10092 if self.op.mode == constants.INSTANCE_CREATE:
10093 if not self.op.no_install:
10094 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
10095 not self.op.wait_for_sync)
10097 feedback_fn("* pausing disk sync to install instance OS")
10098 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
10101 for idx, success in enumerate(result.payload):
10103 logging.warn("pause-sync of instance %s for disk %d failed",
10106 feedback_fn("* running the instance OS create scripts...")
10107 # FIXME: pass debug option from opcode to backend
10109 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
10110 self.op.debug_level)
10112 feedback_fn("* resuming disk sync")
10113 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
10116 for idx, success in enumerate(result.payload):
10118 logging.warn("resume-sync of instance %s for disk %d failed",
10121 os_add_result.Raise("Could not add os for instance %s"
10122 " on node %s" % (instance, pnode_name))
10124 elif self.op.mode == constants.INSTANCE_IMPORT:
10125 feedback_fn("* running the instance OS import scripts...")
10129 for idx, image in enumerate(self.src_images):
10133 # FIXME: pass debug option from opcode to backend
10134 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
10135 constants.IEIO_FILE, (image, ),
10136 constants.IEIO_SCRIPT,
10137 (iobj.disks[idx], idx),
10139 transfers.append(dt)
10142 masterd.instance.TransferInstanceData(self, feedback_fn,
10143 self.op.src_node, pnode_name,
10144 self.pnode.secondary_ip,
10146 if not compat.all(import_result):
10147 self.LogWarning("Some disks for instance %s on node %s were not"
10148 " imported successfully" % (instance, pnode_name))
10150 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
10151 feedback_fn("* preparing remote import...")
10152 # The source cluster will stop the instance before attempting to make a
10153 # connection. In some cases stopping an instance can take a long time,
10154 # hence the shutdown timeout is added to the connection timeout.
10155 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
10156 self.op.source_shutdown_timeout)
10157 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
10159 assert iobj.primary_node == self.pnode.name
10161 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
10162 self.source_x509_ca,
10163 self._cds, timeouts)
10164 if not compat.all(disk_results):
10165 # TODO: Should the instance still be started, even if some disks
10166 # failed to import (valid for local imports, too)?
10167 self.LogWarning("Some disks for instance %s on node %s were not"
10168 " imported successfully" % (instance, pnode_name))
10170 # Run rename script on newly imported instance
10171 assert iobj.name == instance
10172 feedback_fn("Running rename script for %s" % instance)
10173 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
10174 self.source_instance_name,
10175 self.op.debug_level)
10176 if result.fail_msg:
10177 self.LogWarning("Failed to run rename script for %s on node"
10178 " %s: %s" % (instance, pnode_name, result.fail_msg))
10181 # also checked in the prereq part
10182 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
10185 assert not self.owned_locks(locking.LEVEL_NODE_RES)
10188 iobj.admin_state = constants.ADMINST_UP
10189 self.cfg.Update(iobj, feedback_fn)
10190 logging.info("Starting instance %s on node %s", instance, pnode_name)
10191 feedback_fn("* starting instance...")
10192 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
10194 result.Raise("Could not start instance")
10196 return list(iobj.all_nodes)
10199 def _CheckRADOSFreeSpace():
10200 """Compute disk size requirements inside the RADOS cluster.
10203 # For the RADOS cluster we assume there is always enough space.
10207 class LUInstanceConsole(NoHooksLU):
10208 """Connect to an instance's console.
10210 This is somewhat special in that it returns the command line that
10211 you need to run on the master node in order to connect to the
10217 def ExpandNames(self):
10218 self.share_locks = _ShareAll()
10219 self._ExpandAndLockInstance()
10221 def CheckPrereq(self):
10222 """Check prerequisites.
10224 This checks that the instance is in the cluster.
10227 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
10228 assert self.instance is not None, \
10229 "Cannot retrieve locked instance %s" % self.op.instance_name
10230 _CheckNodeOnline(self, self.instance.primary_node)
10232 def Exec(self, feedback_fn):
10233 """Connect to the console of an instance
10236 instance = self.instance
10237 node = instance.primary_node
10239 node_insts = self.rpc.call_instance_list([node],
10240 [instance.hypervisor])[node]
10241 node_insts.Raise("Can't get node information from %s" % node)
10243 if instance.name not in node_insts.payload:
10244 if instance.admin_state == constants.ADMINST_UP:
10245 state = constants.INSTST_ERRORDOWN
10246 elif instance.admin_state == constants.ADMINST_DOWN:
10247 state = constants.INSTST_ADMINDOWN
10249 state = constants.INSTST_ADMINOFFLINE
10250 raise errors.OpExecError("Instance %s is not running (state %s)" %
10251 (instance.name, state))
10253 logging.debug("Connecting to console of %s on %s", instance.name, node)
10255 return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
10258 def _GetInstanceConsole(cluster, instance):
10259 """Returns console information for an instance.
10261 @type cluster: L{objects.Cluster}
10262 @type instance: L{objects.Instance}
10266 hyper = hypervisor.GetHypervisor(instance.hypervisor)
10267 # beparams and hvparams are passed separately, to avoid editing the
10268 # instance and then saving the defaults in the instance itself.
10269 hvparams = cluster.FillHV(instance)
10270 beparams = cluster.FillBE(instance)
10271 console = hyper.GetInstanceConsole(instance, hvparams, beparams)
10273 assert console.instance == instance.name
10274 assert console.Validate()
10276 return console.ToDict()
10279 class LUInstanceReplaceDisks(LogicalUnit):
10280 """Replace the disks of an instance.
10283 HPATH = "mirrors-replace"
10284 HTYPE = constants.HTYPE_INSTANCE
10287 def CheckArguments(self):
10288 TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
10289 self.op.iallocator)
10291 def ExpandNames(self):
10292 self._ExpandAndLockInstance()
10294 assert locking.LEVEL_NODE not in self.needed_locks
10295 assert locking.LEVEL_NODE_RES not in self.needed_locks
10296 assert locking.LEVEL_NODEGROUP not in self.needed_locks
10298 assert self.op.iallocator is None or self.op.remote_node is None, \
10299 "Conflicting options"
10301 if self.op.remote_node is not None:
10302 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
10304 # Warning: do not remove the locking of the new secondary here
10305 # unless DRBD8.AddChildren is changed to work in parallel;
10306 # currently it doesn't since parallel invocations of
10307 # FindUnusedMinor will conflict
10308 self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
10309 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
10311 self.needed_locks[locking.LEVEL_NODE] = []
10312 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10314 if self.op.iallocator is not None:
10315 # iallocator will select a new node in the same group
10316 self.needed_locks[locking.LEVEL_NODEGROUP] = []
10318 self.needed_locks[locking.LEVEL_NODE_RES] = []
10320 self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
10321 self.op.iallocator, self.op.remote_node,
10322 self.op.disks, False, self.op.early_release,
10323 self.op.ignore_ipolicy)
10325 self.tasklets = [self.replacer]
10327 def DeclareLocks(self, level):
10328 if level == locking.LEVEL_NODEGROUP:
10329 assert self.op.remote_node is None
10330 assert self.op.iallocator is not None
10331 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
10333 self.share_locks[locking.LEVEL_NODEGROUP] = 1
10334 # Lock all groups used by instance optimistically; this requires going
10335 # via the node before it's locked, requiring verification later on
10336 self.needed_locks[locking.LEVEL_NODEGROUP] = \
10337 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
10339 elif level == locking.LEVEL_NODE:
10340 if self.op.iallocator is not None:
10341 assert self.op.remote_node is None
10342 assert not self.needed_locks[locking.LEVEL_NODE]
10344 # Lock member nodes of all locked groups
10345 self.needed_locks[locking.LEVEL_NODE] = [node_name
10346 for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
10347 for node_name in self.cfg.GetNodeGroup(group_uuid).members]
10349 self._LockInstancesNodes()
10350 elif level == locking.LEVEL_NODE_RES:
10352 self.needed_locks[locking.LEVEL_NODE_RES] = \
10353 self.needed_locks[locking.LEVEL_NODE]
10355 def BuildHooksEnv(self):
10356 """Build hooks env.
10358 This runs on the master, the primary and all the secondaries.
10361 instance = self.replacer.instance
10363 "MODE": self.op.mode,
10364 "NEW_SECONDARY": self.op.remote_node,
10365 "OLD_SECONDARY": instance.secondary_nodes[0],
10367 env.update(_BuildInstanceHookEnvByObject(self, instance))
10370 def BuildHooksNodes(self):
10371 """Build hooks nodes.
10374 instance = self.replacer.instance
10376 self.cfg.GetMasterNode(),
10377 instance.primary_node,
10379 if self.op.remote_node is not None:
10380 nl.append(self.op.remote_node)
10383 def CheckPrereq(self):
10384 """Check prerequisites.
10387 assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
10388 self.op.iallocator is None)
10390 # Verify if node group locks are still correct
10391 owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
10393 _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
10395 return LogicalUnit.CheckPrereq(self)
10398 class TLReplaceDisks(Tasklet):
10399 """Replaces disks for an instance.
10401 Note: Locking is not within the scope of this class.
10404 def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
10405 disks, delay_iallocator, early_release, ignore_ipolicy):
10406 """Initializes this class.
10409 Tasklet.__init__(self, lu)
10412 self.instance_name = instance_name
10414 self.iallocator_name = iallocator_name
10415 self.remote_node = remote_node
10417 self.delay_iallocator = delay_iallocator
10418 self.early_release = early_release
10419 self.ignore_ipolicy = ignore_ipolicy
10422 self.instance = None
10423 self.new_node = None
10424 self.target_node = None
10425 self.other_node = None
10426 self.remote_node_info = None
10427 self.node_secondary_ip = None
10430 def CheckArguments(mode, remote_node, iallocator):
10431 """Helper function for users of this class.
10434 # check for valid parameter combination
10435 if mode == constants.REPLACE_DISK_CHG:
10436 if remote_node is None and iallocator is None:
10437 raise errors.OpPrereqError("When changing the secondary either an"
10438 " iallocator script must be used or the"
10439 " new node given", errors.ECODE_INVAL)
10441 if remote_node is not None and iallocator is not None:
10442 raise errors.OpPrereqError("Give either the iallocator or the new"
10443 " secondary, not both", errors.ECODE_INVAL)
10445 elif remote_node is not None or iallocator is not None:
10446 # Not replacing the secondary
10447 raise errors.OpPrereqError("The iallocator and new node options can"
10448 " only be used when changing the"
10449 " secondary node", errors.ECODE_INVAL)
10452 def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
10453 """Compute a new secondary node using an IAllocator.
10456 ial = IAllocator(lu.cfg, lu.rpc,
10457 mode=constants.IALLOCATOR_MODE_RELOC,
10458 name=instance_name,
10459 relocate_from=list(relocate_from))
10461 ial.Run(iallocator_name)
10463 if not ial.success:
10464 raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
10465 " %s" % (iallocator_name, ial.info),
10466 errors.ECODE_NORES)
10468 if len(ial.result) != ial.required_nodes:
10469 raise errors.OpPrereqError("iallocator '%s' returned invalid number"
10470 " of nodes (%s), required %s" %
10472 len(ial.result), ial.required_nodes),
10473 errors.ECODE_FAULT)
10475 remote_node_name = ial.result[0]
10477 lu.LogInfo("Selected new secondary for instance '%s': %s",
10478 instance_name, remote_node_name)
10480 return remote_node_name
10482 def _FindFaultyDisks(self, node_name):
10483 """Wrapper for L{_FindFaultyInstanceDisks}.
10486 return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
10489 def _CheckDisksActivated(self, instance):
10490 """Checks if the instance disks are activated.
10492 @param instance: The instance to check disks
10493 @return: True if they are activated, False otherwise
10496 nodes = instance.all_nodes
10498 for idx, dev in enumerate(instance.disks):
10500 self.lu.LogInfo("Checking disk/%d on %s", idx, node)
10501 self.cfg.SetDiskID(dev, node)
10503 result = self.rpc.call_blockdev_find(node, dev)
10507 elif result.fail_msg or not result.payload:
10512 def CheckPrereq(self):
10513 """Check prerequisites.
10515 This checks that the instance is in the cluster.
10518 self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
10519 assert instance is not None, \
10520 "Cannot retrieve locked instance %s" % self.instance_name
10522 if instance.disk_template != constants.DT_DRBD8:
10523 raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
10524 " instances", errors.ECODE_INVAL)
10526 if len(instance.secondary_nodes) != 1:
10527 raise errors.OpPrereqError("The instance has a strange layout,"
10528 " expected one secondary but found %d" %
10529 len(instance.secondary_nodes),
10530 errors.ECODE_FAULT)
10532 if not self.delay_iallocator:
10533 self._CheckPrereq2()
10535 def _CheckPrereq2(self):
10536 """Check prerequisites, second part.
10538 This function should always be part of CheckPrereq. It was separated and is
10539 now called from Exec because during node evacuation iallocator was only
10540 called with an unmodified cluster model, not taking planned changes into
10544 instance = self.instance
10545 secondary_node = instance.secondary_nodes[0]
10547 if self.iallocator_name is None:
10548 remote_node = self.remote_node
10550 remote_node = self._RunAllocator(self.lu, self.iallocator_name,
10551 instance.name, instance.secondary_nodes)
10553 if remote_node is None:
10554 self.remote_node_info = None
10556 assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \
10557 "Remote node '%s' is not locked" % remote_node
10559 self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
10560 assert self.remote_node_info is not None, \
10561 "Cannot retrieve locked node %s" % remote_node
10563 if remote_node == self.instance.primary_node:
10564 raise errors.OpPrereqError("The specified node is the primary node of"
10565 " the instance", errors.ECODE_INVAL)
10567 if remote_node == secondary_node:
10568 raise errors.OpPrereqError("The specified node is already the"
10569 " secondary node of the instance",
10570 errors.ECODE_INVAL)
10572 if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
10573 constants.REPLACE_DISK_CHG):
10574 raise errors.OpPrereqError("Cannot specify disks to be replaced",
10575 errors.ECODE_INVAL)
10577 if self.mode == constants.REPLACE_DISK_AUTO:
10578 if not self._CheckDisksActivated(instance):
10579 raise errors.OpPrereqError("Please run activate-disks on instance %s"
10580 " first" % self.instance_name,
10581 errors.ECODE_STATE)
10582 faulty_primary = self._FindFaultyDisks(instance.primary_node)
10583 faulty_secondary = self._FindFaultyDisks(secondary_node)
10585 if faulty_primary and faulty_secondary:
10586 raise errors.OpPrereqError("Instance %s has faulty disks on more than"
10587 " one node and can not be repaired"
10588 " automatically" % self.instance_name,
10589 errors.ECODE_STATE)
10592 self.disks = faulty_primary
10593 self.target_node = instance.primary_node
10594 self.other_node = secondary_node
10595 check_nodes = [self.target_node, self.other_node]
10596 elif faulty_secondary:
10597 self.disks = faulty_secondary
10598 self.target_node = secondary_node
10599 self.other_node = instance.primary_node
10600 check_nodes = [self.target_node, self.other_node]
10606 # Non-automatic modes
10607 if self.mode == constants.REPLACE_DISK_PRI:
10608 self.target_node = instance.primary_node
10609 self.other_node = secondary_node
10610 check_nodes = [self.target_node, self.other_node]
10612 elif self.mode == constants.REPLACE_DISK_SEC:
10613 self.target_node = secondary_node
10614 self.other_node = instance.primary_node
10615 check_nodes = [self.target_node, self.other_node]
10617 elif self.mode == constants.REPLACE_DISK_CHG:
10618 self.new_node = remote_node
10619 self.other_node = instance.primary_node
10620 self.target_node = secondary_node
10621 check_nodes = [self.new_node, self.other_node]
10623 _CheckNodeNotDrained(self.lu, remote_node)
10624 _CheckNodeVmCapable(self.lu, remote_node)
10626 old_node_info = self.cfg.GetNodeInfo(secondary_node)
10627 assert old_node_info is not None
10628 if old_node_info.offline and not self.early_release:
10629 # doesn't make sense to delay the release
10630 self.early_release = True
10631 self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
10632 " early-release mode", secondary_node)
10635 raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
10638 # If not specified all disks should be replaced
10640 self.disks = range(len(self.instance.disks))
10642 # TODO: This is ugly, but right now we can't distinguish between internal
10643 # submitted opcode and external one. We should fix that.
10644 if self.remote_node_info:
10645 # We change the node, lets verify it still meets instance policy
10646 new_group_info = self.cfg.GetNodeGroup(self.remote_node_info.group)
10647 ipolicy = _CalculateGroupIPolicy(self.cfg.GetClusterInfo(),
10649 _CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
10650 ignore=self.ignore_ipolicy)
10652 for node in check_nodes:
10653 _CheckNodeOnline(self.lu, node)
10655 touched_nodes = frozenset(node_name for node_name in [self.new_node,
10658 if node_name is not None)
10660 # Release unneeded node and node resource locks
10661 _ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
10662 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
10664 # Release any owned node group
10665 if self.lu.glm.is_owned(locking.LEVEL_NODEGROUP):
10666 _ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
10668 # Check whether disks are valid
10669 for disk_idx in self.disks:
10670 instance.FindDisk(disk_idx)
10672 # Get secondary node IP addresses
10673 self.node_secondary_ip = dict((name, node.secondary_ip) for (name, node)
10674 in self.cfg.GetMultiNodeInfo(touched_nodes))
10676 def Exec(self, feedback_fn):
10677 """Execute disk replacement.
10679 This dispatches the disk replacement to the appropriate handler.
10682 if self.delay_iallocator:
10683 self._CheckPrereq2()
10686 # Verify owned locks before starting operation
10687 owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
10688 assert set(owned_nodes) == set(self.node_secondary_ip), \
10689 ("Incorrect node locks, owning %s, expected %s" %
10690 (owned_nodes, self.node_secondary_ip.keys()))
10691 assert (self.lu.owned_locks(locking.LEVEL_NODE) ==
10692 self.lu.owned_locks(locking.LEVEL_NODE_RES))
10694 owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
10695 assert list(owned_instances) == [self.instance_name], \
10696 "Instance '%s' not locked" % self.instance_name
10698 assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
10699 "Should not own any node group lock at this point"
10702 feedback_fn("No disks need replacement")
10705 feedback_fn("Replacing disk(s) %s for %s" %
10706 (utils.CommaJoin(self.disks), self.instance.name))
10708 activate_disks = (self.instance.admin_state != constants.ADMINST_UP)
10710 # Activate the instance disks if we're replacing them on a down instance
10712 _StartInstanceDisks(self.lu, self.instance, True)
10715 # Should we replace the secondary node?
10716 if self.new_node is not None:
10717 fn = self._ExecDrbd8Secondary
10719 fn = self._ExecDrbd8DiskOnly
10721 result = fn(feedback_fn)
10723 # Deactivate the instance disks if we're replacing them on a
10726 _SafeShutdownInstanceDisks(self.lu, self.instance)
10728 assert not self.lu.owned_locks(locking.LEVEL_NODE)
10731 # Verify owned locks
10732 owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE_RES)
10733 nodes = frozenset(self.node_secondary_ip)
10734 assert ((self.early_release and not owned_nodes) or
10735 (not self.early_release and not (set(owned_nodes) - nodes))), \
10736 ("Not owning the correct locks, early_release=%s, owned=%r,"
10737 " nodes=%r" % (self.early_release, owned_nodes, nodes))
10741 def _CheckVolumeGroup(self, nodes):
10742 self.lu.LogInfo("Checking volume groups")
10744 vgname = self.cfg.GetVGName()
10746 # Make sure volume group exists on all involved nodes
10747 results = self.rpc.call_vg_list(nodes)
10749 raise errors.OpExecError("Can't list volume groups on the nodes")
10752 res = results[node]
10753 res.Raise("Error checking node %s" % node)
10754 if vgname not in res.payload:
10755 raise errors.OpExecError("Volume group '%s' not found on node %s" %
10758 def _CheckDisksExistence(self, nodes):
10759 # Check disk existence
10760 for idx, dev in enumerate(self.instance.disks):
10761 if idx not in self.disks:
10765 self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
10766 self.cfg.SetDiskID(dev, node)
10768 result = self.rpc.call_blockdev_find(node, dev)
10770 msg = result.fail_msg
10771 if msg or not result.payload:
10773 msg = "disk not found"
10774 raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
10777 def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
10778 for idx, dev in enumerate(self.instance.disks):
10779 if idx not in self.disks:
10782 self.lu.LogInfo("Checking disk/%d consistency on node %s" %
10785 if not _CheckDiskConsistency(self.lu, self.instance, dev, node_name,
10786 on_primary, ldisk=ldisk):
10787 raise errors.OpExecError("Node %s has degraded storage, unsafe to"
10788 " replace disks for instance %s" %
10789 (node_name, self.instance.name))
10791 def _CreateNewStorage(self, node_name):
10792 """Create new storage on the primary or secondary node.
10794 This is only used for same-node replaces, not for changing the
10795 secondary node, hence we don't want to modify the existing disk.
10800 for idx, dev in enumerate(self.instance.disks):
10801 if idx not in self.disks:
10804 self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
10806 self.cfg.SetDiskID(dev, node_name)
10808 lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
10809 names = _GenerateUniqueNames(self.lu, lv_names)
10811 vg_data = dev.children[0].logical_id[0]
10812 lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
10813 logical_id=(vg_data, names[0]), params={})
10814 vg_meta = dev.children[1].logical_id[0]
10815 lv_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
10816 logical_id=(vg_meta, names[1]), params={})
10818 new_lvs = [lv_data, lv_meta]
10819 old_lvs = [child.Copy() for child in dev.children]
10820 iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
10822 # we pass force_create=True to force the LVM creation
10823 for new_lv in new_lvs:
10824 _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
10825 _GetInstanceInfoText(self.instance), False)
10829 def _CheckDevices(self, node_name, iv_names):
10830 for name, (dev, _, _) in iv_names.iteritems():
10831 self.cfg.SetDiskID(dev, node_name)
10833 result = self.rpc.call_blockdev_find(node_name, dev)
10835 msg = result.fail_msg
10836 if msg or not result.payload:
10838 msg = "disk not found"
10839 raise errors.OpExecError("Can't find DRBD device %s: %s" %
10842 if result.payload.is_degraded:
10843 raise errors.OpExecError("DRBD device %s is degraded!" % name)
10845 def _RemoveOldStorage(self, node_name, iv_names):
10846 for name, (_, old_lvs, _) in iv_names.iteritems():
10847 self.lu.LogInfo("Remove logical volumes for %s" % name)
10850 self.cfg.SetDiskID(lv, node_name)
10852 msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
10854 self.lu.LogWarning("Can't remove old LV: %s" % msg,
10855 hint="remove unused LVs manually")
10857 def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
10858 """Replace a disk on the primary or secondary for DRBD 8.
10860 The algorithm for replace is quite complicated:
10862 1. for each disk to be replaced:
10864 1. create new LVs on the target node with unique names
10865 1. detach old LVs from the drbd device
10866 1. rename old LVs to name_replaced.<time_t>
10867 1. rename new LVs to old LVs
10868 1. attach the new LVs (with the old names now) to the drbd device
10870 1. wait for sync across all devices
10872 1. for each modified disk:
10874 1. remove old LVs (which have the name name_replaces.<time_t>)
10876 Failures are not very well handled.
10881 # Step: check device activation
10882 self.lu.LogStep(1, steps_total, "Check device existence")
10883 self._CheckDisksExistence([self.other_node, self.target_node])
10884 self._CheckVolumeGroup([self.target_node, self.other_node])
10886 # Step: check other node consistency
10887 self.lu.LogStep(2, steps_total, "Check peer consistency")
10888 self._CheckDisksConsistency(self.other_node,
10889 self.other_node == self.instance.primary_node,
10892 # Step: create new storage
10893 self.lu.LogStep(3, steps_total, "Allocate new storage")
10894 iv_names = self._CreateNewStorage(self.target_node)
10896 # Step: for each lv, detach+rename*2+attach
10897 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
10898 for dev, old_lvs, new_lvs in iv_names.itervalues():
10899 self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
10901 result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
10903 result.Raise("Can't detach drbd from local storage on node"
10904 " %s for device %s" % (self.target_node, dev.iv_name))
10906 #cfg.Update(instance)
10908 # ok, we created the new LVs, so now we know we have the needed
10909 # storage; as such, we proceed on the target node to rename
10910 # old_lv to _old, and new_lv to old_lv; note that we rename LVs
10911 # using the assumption that logical_id == physical_id (which in
10912 # turn is the unique_id on that node)
10914 # FIXME(iustin): use a better name for the replaced LVs
10915 temp_suffix = int(time.time())
10916 ren_fn = lambda d, suff: (d.physical_id[0],
10917 d.physical_id[1] + "_replaced-%s" % suff)
10919 # Build the rename list based on what LVs exist on the node
10920 rename_old_to_new = []
10921 for to_ren in old_lvs:
10922 result = self.rpc.call_blockdev_find(self.target_node, to_ren)
10923 if not result.fail_msg and result.payload:
10925 rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
10927 self.lu.LogInfo("Renaming the old LVs on the target node")
10928 result = self.rpc.call_blockdev_rename(self.target_node,
10930 result.Raise("Can't rename old LVs on node %s" % self.target_node)
10932 # Now we rename the new LVs to the old LVs
10933 self.lu.LogInfo("Renaming the new LVs on the target node")
10934 rename_new_to_old = [(new, old.physical_id)
10935 for old, new in zip(old_lvs, new_lvs)]
10936 result = self.rpc.call_blockdev_rename(self.target_node,
10938 result.Raise("Can't rename new LVs on node %s" % self.target_node)
10940 # Intermediate steps of in memory modifications
10941 for old, new in zip(old_lvs, new_lvs):
10942 new.logical_id = old.logical_id
10943 self.cfg.SetDiskID(new, self.target_node)
10945 # We need to modify old_lvs so that removal later removes the
10946 # right LVs, not the newly added ones; note that old_lvs is a
10948 for disk in old_lvs:
10949 disk.logical_id = ren_fn(disk, temp_suffix)
10950 self.cfg.SetDiskID(disk, self.target_node)
10952 # Now that the new lvs have the old name, we can add them to the device
10953 self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
10954 result = self.rpc.call_blockdev_addchildren(self.target_node,
10955 (dev, self.instance),
10956 (new_lvs, self.instance))
10957 msg = result.fail_msg
10959 for new_lv in new_lvs:
10960 msg2 = self.rpc.call_blockdev_remove(self.target_node,
10963 self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
10964 hint=("cleanup manually the unused logical"
10966 raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
10968 cstep = itertools.count(5)
10970 if self.early_release:
10971 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
10972 self._RemoveOldStorage(self.target_node, iv_names)
10973 # TODO: Check if releasing locks early still makes sense
10974 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
10976 # Release all resource locks except those used by the instance
10977 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
10978 keep=self.node_secondary_ip.keys())
10980 # Release all node locks while waiting for sync
10981 _ReleaseLocks(self.lu, locking.LEVEL_NODE)
10983 # TODO: Can the instance lock be downgraded here? Take the optional disk
10984 # shutdown in the caller into consideration.
10987 # This can fail as the old devices are degraded and _WaitForSync
10988 # does a combined result over all disks, so we don't check its return value
10989 self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
10990 _WaitForSync(self.lu, self.instance)
10992 # Check all devices manually
10993 self._CheckDevices(self.instance.primary_node, iv_names)
10995 # Step: remove old storage
10996 if not self.early_release:
10997 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
10998 self._RemoveOldStorage(self.target_node, iv_names)
11000 def _ExecDrbd8Secondary(self, feedback_fn):
11001 """Replace the secondary node for DRBD 8.
11003 The algorithm for replace is quite complicated:
11004 - for all disks of the instance:
11005 - create new LVs on the new node with same names
11006 - shutdown the drbd device on the old secondary
11007 - disconnect the drbd network on the primary
11008 - create the drbd device on the new secondary
11009 - network attach the drbd on the primary, using an artifice:
11010 the drbd code for Attach() will connect to the network if it
11011 finds a device which is connected to the good local disks but
11012 not network enabled
11013 - wait for sync across all devices
11014 - remove all disks from the old secondary
11016 Failures are not very well handled.
11021 pnode = self.instance.primary_node
11023 # Step: check device activation
11024 self.lu.LogStep(1, steps_total, "Check device existence")
11025 self._CheckDisksExistence([self.instance.primary_node])
11026 self._CheckVolumeGroup([self.instance.primary_node])
11028 # Step: check other node consistency
11029 self.lu.LogStep(2, steps_total, "Check peer consistency")
11030 self._CheckDisksConsistency(self.instance.primary_node, True, True)
11032 # Step: create new storage
11033 self.lu.LogStep(3, steps_total, "Allocate new storage")
11034 for idx, dev in enumerate(self.instance.disks):
11035 self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
11036 (self.new_node, idx))
11037 # we pass force_create=True to force LVM creation
11038 for new_lv in dev.children:
11039 _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
11040 _GetInstanceInfoText(self.instance), False)
11042 # Step 4: dbrd minors and drbd setups changes
11043 # after this, we must manually remove the drbd minors on both the
11044 # error and the success paths
11045 self.lu.LogStep(4, steps_total, "Changing drbd configuration")
11046 minors = self.cfg.AllocateDRBDMinor([self.new_node
11047 for dev in self.instance.disks],
11048 self.instance.name)
11049 logging.debug("Allocated minors %r", minors)
11052 for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
11053 self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
11054 (self.new_node, idx))
11055 # create new devices on new_node; note that we create two IDs:
11056 # one without port, so the drbd will be activated without
11057 # networking information on the new node at this stage, and one
11058 # with network, for the latter activation in step 4
11059 (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
11060 if self.instance.primary_node == o_node1:
11063 assert self.instance.primary_node == o_node2, "Three-node instance?"
11066 new_alone_id = (self.instance.primary_node, self.new_node, None,
11067 p_minor, new_minor, o_secret)
11068 new_net_id = (self.instance.primary_node, self.new_node, o_port,
11069 p_minor, new_minor, o_secret)
11071 iv_names[idx] = (dev, dev.children, new_net_id)
11072 logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
11074 new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
11075 logical_id=new_alone_id,
11076 children=dev.children,
11080 _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
11081 _GetInstanceInfoText(self.instance), False)
11082 except errors.GenericError:
11083 self.cfg.ReleaseDRBDMinors(self.instance.name)
11086 # We have new devices, shutdown the drbd on the old secondary
11087 for idx, dev in enumerate(self.instance.disks):
11088 self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
11089 self.cfg.SetDiskID(dev, self.target_node)
11090 msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
11092 self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
11093 "node: %s" % (idx, msg),
11094 hint=("Please cleanup this device manually as"
11095 " soon as possible"))
11097 self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
11098 result = self.rpc.call_drbd_disconnect_net([pnode], self.node_secondary_ip,
11099 self.instance.disks)[pnode]
11101 msg = result.fail_msg
11103 # detaches didn't succeed (unlikely)
11104 self.cfg.ReleaseDRBDMinors(self.instance.name)
11105 raise errors.OpExecError("Can't detach the disks from the network on"
11106 " old node: %s" % (msg,))
11108 # if we managed to detach at least one, we update all the disks of
11109 # the instance to point to the new secondary
11110 self.lu.LogInfo("Updating instance configuration")
11111 for dev, _, new_logical_id in iv_names.itervalues():
11112 dev.logical_id = new_logical_id
11113 self.cfg.SetDiskID(dev, self.instance.primary_node)
11115 self.cfg.Update(self.instance, feedback_fn)
11117 # Release all node locks (the configuration has been updated)
11118 _ReleaseLocks(self.lu, locking.LEVEL_NODE)
11120 # and now perform the drbd attach
11121 self.lu.LogInfo("Attaching primary drbds to new secondary"
11122 " (standalone => connected)")
11123 result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
11125 self.node_secondary_ip,
11126 (self.instance.disks, self.instance),
11127 self.instance.name,
11129 for to_node, to_result in result.items():
11130 msg = to_result.fail_msg
11132 self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
11134 hint=("please do a gnt-instance info to see the"
11135 " status of disks"))
11137 cstep = itertools.count(5)
11139 if self.early_release:
11140 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11141 self._RemoveOldStorage(self.target_node, iv_names)
11142 # TODO: Check if releasing locks early still makes sense
11143 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
11145 # Release all resource locks except those used by the instance
11146 _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
11147 keep=self.node_secondary_ip.keys())
11149 # TODO: Can the instance lock be downgraded here? Take the optional disk
11150 # shutdown in the caller into consideration.
11153 # This can fail as the old devices are degraded and _WaitForSync
11154 # does a combined result over all disks, so we don't check its return value
11155 self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
11156 _WaitForSync(self.lu, self.instance)
11158 # Check all devices manually
11159 self._CheckDevices(self.instance.primary_node, iv_names)
11161 # Step: remove old storage
11162 if not self.early_release:
11163 self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
11164 self._RemoveOldStorage(self.target_node, iv_names)
11167 class LURepairNodeStorage(NoHooksLU):
11168 """Repairs the volume group on a node.
11173 def CheckArguments(self):
11174 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
11176 storage_type = self.op.storage_type
11178 if (constants.SO_FIX_CONSISTENCY not in
11179 constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
11180 raise errors.OpPrereqError("Storage units of type '%s' can not be"
11181 " repaired" % storage_type,
11182 errors.ECODE_INVAL)
11184 def ExpandNames(self):
11185 self.needed_locks = {
11186 locking.LEVEL_NODE: [self.op.node_name],
11189 def _CheckFaultyDisks(self, instance, node_name):
11190 """Ensure faulty disks abort the opcode or at least warn."""
11192 if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
11194 raise errors.OpPrereqError("Instance '%s' has faulty disks on"
11195 " node '%s'" % (instance.name, node_name),
11196 errors.ECODE_STATE)
11197 except errors.OpPrereqError, err:
11198 if self.op.ignore_consistency:
11199 self.proc.LogWarning(str(err.args[0]))
11203 def CheckPrereq(self):
11204 """Check prerequisites.
11207 # Check whether any instance on this node has faulty disks
11208 for inst in _GetNodeInstances(self.cfg, self.op.node_name):
11209 if inst.admin_state != constants.ADMINST_UP:
11211 check_nodes = set(inst.all_nodes)
11212 check_nodes.discard(self.op.node_name)
11213 for inst_node_name in check_nodes:
11214 self._CheckFaultyDisks(inst, inst_node_name)
11216 def Exec(self, feedback_fn):
11217 feedback_fn("Repairing storage unit '%s' on %s ..." %
11218 (self.op.name, self.op.node_name))
11220 st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
11221 result = self.rpc.call_storage_execute(self.op.node_name,
11222 self.op.storage_type, st_args,
11224 constants.SO_FIX_CONSISTENCY)
11225 result.Raise("Failed to repair storage unit '%s' on %s" %
11226 (self.op.name, self.op.node_name))
11229 class LUNodeEvacuate(NoHooksLU):
11230 """Evacuates instances off a list of nodes.
11235 _MODE2IALLOCATOR = {
11236 constants.NODE_EVAC_PRI: constants.IALLOCATOR_NEVAC_PRI,
11237 constants.NODE_EVAC_SEC: constants.IALLOCATOR_NEVAC_SEC,
11238 constants.NODE_EVAC_ALL: constants.IALLOCATOR_NEVAC_ALL,
11240 assert frozenset(_MODE2IALLOCATOR.keys()) == constants.NODE_EVAC_MODES
11241 assert (frozenset(_MODE2IALLOCATOR.values()) ==
11242 constants.IALLOCATOR_NEVAC_MODES)
11244 def CheckArguments(self):
11245 _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
11247 def ExpandNames(self):
11248 self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
11250 if self.op.remote_node is not None:
11251 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
11252 assert self.op.remote_node
11254 if self.op.remote_node == self.op.node_name:
11255 raise errors.OpPrereqError("Can not use evacuated node as a new"
11256 " secondary node", errors.ECODE_INVAL)
11258 if self.op.mode != constants.NODE_EVAC_SEC:
11259 raise errors.OpPrereqError("Without the use of an iallocator only"
11260 " secondary instances can be evacuated",
11261 errors.ECODE_INVAL)
11264 self.share_locks = _ShareAll()
11265 self.needed_locks = {
11266 locking.LEVEL_INSTANCE: [],
11267 locking.LEVEL_NODEGROUP: [],
11268 locking.LEVEL_NODE: [],
11271 # Determine nodes (via group) optimistically, needs verification once locks
11272 # have been acquired
11273 self.lock_nodes = self._DetermineNodes()
11275 def _DetermineNodes(self):
11276 """Gets the list of nodes to operate on.
11279 if self.op.remote_node is None:
11280 # Iallocator will choose any node(s) in the same group
11281 group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_name])
11283 group_nodes = frozenset([self.op.remote_node])
11285 # Determine nodes to be locked
11286 return set([self.op.node_name]) | group_nodes
11288 def _DetermineInstances(self):
11289 """Builds list of instances to operate on.
11292 assert self.op.mode in constants.NODE_EVAC_MODES
11294 if self.op.mode == constants.NODE_EVAC_PRI:
11295 # Primary instances only
11296 inst_fn = _GetNodePrimaryInstances
11297 assert self.op.remote_node is None, \
11298 "Evacuating primary instances requires iallocator"
11299 elif self.op.mode == constants.NODE_EVAC_SEC:
11300 # Secondary instances only
11301 inst_fn = _GetNodeSecondaryInstances
11304 assert self.op.mode == constants.NODE_EVAC_ALL
11305 inst_fn = _GetNodeInstances
11306 # TODO: In 2.6, change the iallocator interface to take an evacuation mode
11308 raise errors.OpPrereqError("Due to an issue with the iallocator"
11309 " interface it is not possible to evacuate"
11310 " all instances at once; specify explicitly"
11311 " whether to evacuate primary or secondary"
11313 errors.ECODE_INVAL)
11315 return inst_fn(self.cfg, self.op.node_name)
11317 def DeclareLocks(self, level):
11318 if level == locking.LEVEL_INSTANCE:
11319 # Lock instances optimistically, needs verification once node and group
11320 # locks have been acquired
11321 self.needed_locks[locking.LEVEL_INSTANCE] = \
11322 set(i.name for i in self._DetermineInstances())
11324 elif level == locking.LEVEL_NODEGROUP:
11325 # Lock node groups for all potential target nodes optimistically, needs
11326 # verification once nodes have been acquired
11327 self.needed_locks[locking.LEVEL_NODEGROUP] = \
11328 self.cfg.GetNodeGroupsFromNodes(self.lock_nodes)
11330 elif level == locking.LEVEL_NODE:
11331 self.needed_locks[locking.LEVEL_NODE] = self.lock_nodes
11333 def CheckPrereq(self):
11335 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
11336 owned_nodes = self.owned_locks(locking.LEVEL_NODE)
11337 owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
11339 need_nodes = self._DetermineNodes()
11341 if not owned_nodes.issuperset(need_nodes):
11342 raise errors.OpPrereqError("Nodes in same group as '%s' changed since"
11343 " locks were acquired, current nodes are"
11344 " are '%s', used to be '%s'; retry the"
11346 (self.op.node_name,
11347 utils.CommaJoin(need_nodes),
11348 utils.CommaJoin(owned_nodes)),
11349 errors.ECODE_STATE)
11351 wanted_groups = self.cfg.GetNodeGroupsFromNodes(owned_nodes)
11352 if owned_groups != wanted_groups:
11353 raise errors.OpExecError("Node groups changed since locks were acquired,"
11354 " current groups are '%s', used to be '%s';"
11355 " retry the operation" %
11356 (utils.CommaJoin(wanted_groups),
11357 utils.CommaJoin(owned_groups)))
11359 # Determine affected instances
11360 self.instances = self._DetermineInstances()
11361 self.instance_names = [i.name for i in self.instances]
11363 if set(self.instance_names) != owned_instances:
11364 raise errors.OpExecError("Instances on node '%s' changed since locks"
11365 " were acquired, current instances are '%s',"
11366 " used to be '%s'; retry the operation" %
11367 (self.op.node_name,
11368 utils.CommaJoin(self.instance_names),
11369 utils.CommaJoin(owned_instances)))
11371 if self.instance_names:
11372 self.LogInfo("Evacuating instances from node '%s': %s",
11374 utils.CommaJoin(utils.NiceSort(self.instance_names)))
11376 self.LogInfo("No instances to evacuate from node '%s'",
11379 if self.op.remote_node is not None:
11380 for i in self.instances:
11381 if i.primary_node == self.op.remote_node:
11382 raise errors.OpPrereqError("Node %s is the primary node of"
11383 " instance %s, cannot use it as"
11385 (self.op.remote_node, i.name),
11386 errors.ECODE_INVAL)
11388 def Exec(self, feedback_fn):
11389 assert (self.op.iallocator is not None) ^ (self.op.remote_node is not None)
11391 if not self.instance_names:
11392 # No instances to evacuate
11395 elif self.op.iallocator is not None:
11396 # TODO: Implement relocation to other group
11397 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_NODE_EVAC,
11398 evac_mode=self._MODE2IALLOCATOR[self.op.mode],
11399 instances=list(self.instance_names))
11401 ial.Run(self.op.iallocator)
11403 if not ial.success:
11404 raise errors.OpPrereqError("Can't compute node evacuation using"
11405 " iallocator '%s': %s" %
11406 (self.op.iallocator, ial.info),
11407 errors.ECODE_NORES)
11409 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, True)
11411 elif self.op.remote_node is not None:
11412 assert self.op.mode == constants.NODE_EVAC_SEC
11414 [opcodes.OpInstanceReplaceDisks(instance_name=instance_name,
11415 remote_node=self.op.remote_node,
11417 mode=constants.REPLACE_DISK_CHG,
11418 early_release=self.op.early_release)]
11419 for instance_name in self.instance_names
11423 raise errors.ProgrammerError("No iallocator or remote node")
11425 return ResultWithJobs(jobs)
11428 def _SetOpEarlyRelease(early_release, op):
11429 """Sets C{early_release} flag on opcodes if available.
11433 op.early_release = early_release
11434 except AttributeError:
11435 assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
11440 def _NodeEvacDest(use_nodes, group, nodes):
11441 """Returns group or nodes depending on caller's choice.
11445 return utils.CommaJoin(nodes)
11450 def _LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
11451 """Unpacks the result of change-group and node-evacuate iallocator requests.
11453 Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
11454 L{constants.IALLOCATOR_MODE_CHG_GROUP}.
11456 @type lu: L{LogicalUnit}
11457 @param lu: Logical unit instance
11458 @type alloc_result: tuple/list
11459 @param alloc_result: Result from iallocator
11460 @type early_release: bool
11461 @param early_release: Whether to release locks early if possible
11462 @type use_nodes: bool
11463 @param use_nodes: Whether to display node names instead of groups
11466 (moved, failed, jobs) = alloc_result
11469 failreason = utils.CommaJoin("%s (%s)" % (name, reason)
11470 for (name, reason) in failed)
11471 lu.LogWarning("Unable to evacuate instances %s", failreason)
11472 raise errors.OpExecError("Unable to evacuate instances %s" % failreason)
11475 lu.LogInfo("Instances to be moved: %s",
11476 utils.CommaJoin("%s (to %s)" %
11477 (name, _NodeEvacDest(use_nodes, group, nodes))
11478 for (name, group, nodes) in moved))
11480 return [map(compat.partial(_SetOpEarlyRelease, early_release),
11481 map(opcodes.OpCode.LoadOpCode, ops))
11485 class LUInstanceGrowDisk(LogicalUnit):
11486 """Grow a disk of an instance.
11489 HPATH = "disk-grow"
11490 HTYPE = constants.HTYPE_INSTANCE
11493 def ExpandNames(self):
11494 self._ExpandAndLockInstance()
11495 self.needed_locks[locking.LEVEL_NODE] = []
11496 self.needed_locks[locking.LEVEL_NODE_RES] = []
11497 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
11498 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
11500 def DeclareLocks(self, level):
11501 if level == locking.LEVEL_NODE:
11502 self._LockInstancesNodes()
11503 elif level == locking.LEVEL_NODE_RES:
11505 self.needed_locks[locking.LEVEL_NODE_RES] = \
11506 self.needed_locks[locking.LEVEL_NODE][:]
11508 def BuildHooksEnv(self):
11509 """Build hooks env.
11511 This runs on the master, the primary and all the secondaries.
11515 "DISK": self.op.disk,
11516 "AMOUNT": self.op.amount,
11517 "ABSOLUTE": self.op.absolute,
11519 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
11522 def BuildHooksNodes(self):
11523 """Build hooks nodes.
11526 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
11529 def CheckPrereq(self):
11530 """Check prerequisites.
11532 This checks that the instance is in the cluster.
11535 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
11536 assert instance is not None, \
11537 "Cannot retrieve locked instance %s" % self.op.instance_name
11538 nodenames = list(instance.all_nodes)
11539 for node in nodenames:
11540 _CheckNodeOnline(self, node)
11542 self.instance = instance
11544 if instance.disk_template not in constants.DTS_GROWABLE:
11545 raise errors.OpPrereqError("Instance's disk layout does not support"
11546 " growing", errors.ECODE_INVAL)
11548 self.disk = instance.FindDisk(self.op.disk)
11550 if self.op.absolute:
11551 self.target = self.op.amount
11552 self.delta = self.target - self.disk.size
11554 raise errors.OpPrereqError("Requested size (%s) is smaller than "
11555 "current disk size (%s)" %
11556 (utils.FormatUnit(self.target, "h"),
11557 utils.FormatUnit(self.disk.size, "h")),
11558 errors.ECODE_STATE)
11560 self.delta = self.op.amount
11561 self.target = self.disk.size + self.delta
11563 raise errors.OpPrereqError("Requested increment (%s) is negative" %
11564 utils.FormatUnit(self.delta, "h"),
11565 errors.ECODE_INVAL)
11567 if instance.disk_template not in (constants.DT_FILE,
11568 constants.DT_SHARED_FILE,
11570 # TODO: check the free disk space for file, when that feature will be
11572 _CheckNodesFreeDiskPerVG(self, nodenames,
11573 self.disk.ComputeGrowth(self.delta))
11575 def Exec(self, feedback_fn):
11576 """Execute disk grow.
11579 instance = self.instance
11582 assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
11583 assert (self.owned_locks(locking.LEVEL_NODE) ==
11584 self.owned_locks(locking.LEVEL_NODE_RES))
11586 disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
11588 raise errors.OpExecError("Cannot activate block device to grow")
11590 feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
11591 (self.op.disk, instance.name,
11592 utils.FormatUnit(self.delta, "h"),
11593 utils.FormatUnit(self.target, "h")))
11595 # First run all grow ops in dry-run mode
11596 for node in instance.all_nodes:
11597 self.cfg.SetDiskID(disk, node)
11598 result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
11600 result.Raise("Grow request failed to node %s" % node)
11602 # We know that (as far as we can test) operations across different
11603 # nodes will succeed, time to run it for real
11604 for node in instance.all_nodes:
11605 self.cfg.SetDiskID(disk, node)
11606 result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
11608 result.Raise("Grow request failed to node %s" % node)
11610 # TODO: Rewrite code to work properly
11611 # DRBD goes into sync mode for a short amount of time after executing the
11612 # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
11613 # calling "resize" in sync mode fails. Sleeping for a short amount of
11614 # time is a work-around.
11617 disk.RecordGrow(self.delta)
11618 self.cfg.Update(instance, feedback_fn)
11620 # Changes have been recorded, release node lock
11621 _ReleaseLocks(self, locking.LEVEL_NODE)
11623 # Downgrade lock while waiting for sync
11624 self.glm.downgrade(locking.LEVEL_INSTANCE)
11626 if self.op.wait_for_sync:
11627 disk_abort = not _WaitForSync(self, instance, disks=[disk])
11629 self.proc.LogWarning("Disk sync-ing has not returned a good"
11630 " status; please check the instance")
11631 if instance.admin_state != constants.ADMINST_UP:
11632 _SafeShutdownInstanceDisks(self, instance, disks=[disk])
11633 elif instance.admin_state != constants.ADMINST_UP:
11634 self.proc.LogWarning("Not shutting down the disk even if the instance is"
11635 " not supposed to be running because no wait for"
11636 " sync mode was requested")
11638 assert self.owned_locks(locking.LEVEL_NODE_RES)
11639 assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
11642 class LUInstanceQueryData(NoHooksLU):
11643 """Query runtime instance data.
11648 def ExpandNames(self):
11649 self.needed_locks = {}
11651 # Use locking if requested or when non-static information is wanted
11652 if not (self.op.static or self.op.use_locking):
11653 self.LogWarning("Non-static data requested, locks need to be acquired")
11654 self.op.use_locking = True
11656 if self.op.instances or not self.op.use_locking:
11657 # Expand instance names right here
11658 self.wanted_names = _GetWantedInstances(self, self.op.instances)
11660 # Will use acquired locks
11661 self.wanted_names = None
11663 if self.op.use_locking:
11664 self.share_locks = _ShareAll()
11666 if self.wanted_names is None:
11667 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
11669 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
11671 self.needed_locks[locking.LEVEL_NODEGROUP] = []
11672 self.needed_locks[locking.LEVEL_NODE] = []
11673 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
11675 def DeclareLocks(self, level):
11676 if self.op.use_locking:
11677 if level == locking.LEVEL_NODEGROUP:
11678 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
11680 # Lock all groups used by instances optimistically; this requires going
11681 # via the node before it's locked, requiring verification later on
11682 self.needed_locks[locking.LEVEL_NODEGROUP] = \
11683 frozenset(group_uuid
11684 for instance_name in owned_instances
11686 self.cfg.GetInstanceNodeGroups(instance_name))
11688 elif level == locking.LEVEL_NODE:
11689 self._LockInstancesNodes()
11691 def CheckPrereq(self):
11692 """Check prerequisites.
11694 This only checks the optional instance list against the existing names.
11697 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
11698 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
11699 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
11701 if self.wanted_names is None:
11702 assert self.op.use_locking, "Locking was not used"
11703 self.wanted_names = owned_instances
11705 instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
11707 if self.op.use_locking:
11708 _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
11711 assert not (owned_instances or owned_groups or owned_nodes)
11713 self.wanted_instances = instances.values()
11715 def _ComputeBlockdevStatus(self, node, instance, dev):
11716 """Returns the status of a block device
11719 if self.op.static or not node:
11722 self.cfg.SetDiskID(dev, node)
11724 result = self.rpc.call_blockdev_find(node, dev)
11728 result.Raise("Can't compute disk status for %s" % instance.name)
11730 status = result.payload
11734 return (status.dev_path, status.major, status.minor,
11735 status.sync_percent, status.estimated_time,
11736 status.is_degraded, status.ldisk_status)
11738 def _ComputeDiskStatus(self, instance, snode, dev):
11739 """Compute block device status.
11742 if dev.dev_type in constants.LDS_DRBD:
11743 # we change the snode then (otherwise we use the one passed in)
11744 if dev.logical_id[0] == instance.primary_node:
11745 snode = dev.logical_id[1]
11747 snode = dev.logical_id[0]
11749 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
11751 dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
11754 dev_children = map(compat.partial(self._ComputeDiskStatus,
11761 "iv_name": dev.iv_name,
11762 "dev_type": dev.dev_type,
11763 "logical_id": dev.logical_id,
11764 "physical_id": dev.physical_id,
11765 "pstatus": dev_pstatus,
11766 "sstatus": dev_sstatus,
11767 "children": dev_children,
11772 def Exec(self, feedback_fn):
11773 """Gather and return data"""
11776 cluster = self.cfg.GetClusterInfo()
11778 node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
11779 nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
11781 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
11782 for node in nodes.values()))
11784 group2name_fn = lambda uuid: groups[uuid].name
11786 for instance in self.wanted_instances:
11787 pnode = nodes[instance.primary_node]
11789 if self.op.static or pnode.offline:
11790 remote_state = None
11792 self.LogWarning("Primary node %s is marked offline, returning static"
11793 " information only for instance %s" %
11794 (pnode.name, instance.name))
11796 remote_info = self.rpc.call_instance_info(instance.primary_node,
11798 instance.hypervisor)
11799 remote_info.Raise("Error checking node %s" % instance.primary_node)
11800 remote_info = remote_info.payload
11801 if remote_info and "state" in remote_info:
11802 remote_state = "up"
11804 if instance.admin_state == constants.ADMINST_UP:
11805 remote_state = "down"
11807 remote_state = instance.admin_state
11809 disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
11812 snodes_group_uuids = [nodes[snode_name].group
11813 for snode_name in instance.secondary_nodes]
11815 result[instance.name] = {
11816 "name": instance.name,
11817 "config_state": instance.admin_state,
11818 "run_state": remote_state,
11819 "pnode": instance.primary_node,
11820 "pnode_group_uuid": pnode.group,
11821 "pnode_group_name": group2name_fn(pnode.group),
11822 "snodes": instance.secondary_nodes,
11823 "snodes_group_uuids": snodes_group_uuids,
11824 "snodes_group_names": map(group2name_fn, snodes_group_uuids),
11826 # this happens to be the same format used for hooks
11827 "nics": _NICListToTuple(self, instance.nics),
11828 "disk_template": instance.disk_template,
11830 "hypervisor": instance.hypervisor,
11831 "network_port": instance.network_port,
11832 "hv_instance": instance.hvparams,
11833 "hv_actual": cluster.FillHV(instance, skip_globals=True),
11834 "be_instance": instance.beparams,
11835 "be_actual": cluster.FillBE(instance),
11836 "os_instance": instance.osparams,
11837 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
11838 "serial_no": instance.serial_no,
11839 "mtime": instance.mtime,
11840 "ctime": instance.ctime,
11841 "uuid": instance.uuid,
11847 def PrepareContainerMods(mods, private_fn):
11848 """Prepares a list of container modifications by adding a private data field.
11850 @type mods: list of tuples; (operation, index, parameters)
11851 @param mods: List of modifications
11852 @type private_fn: callable or None
11853 @param private_fn: Callable for constructing a private data field for a
11858 if private_fn is None:
11863 return [(op, idx, params, fn()) for (op, idx, params) in mods]
11866 #: Type description for changes as returned by L{ApplyContainerMods}'s
11868 _TApplyContModsCbChanges = \
11869 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
11870 ht.TNonEmptyString,
11875 def ApplyContainerMods(kind, container, chgdesc, mods,
11876 create_fn, modify_fn, remove_fn):
11877 """Applies descriptions in C{mods} to C{container}.
11880 @param kind: One-word item description
11881 @type container: list
11882 @param container: Container to modify
11883 @type chgdesc: None or list
11884 @param chgdesc: List of applied changes
11886 @param mods: Modifications as returned by L{PrepareContainerMods}
11887 @type create_fn: callable
11888 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
11889 receives absolute item index, parameters and private data object as added
11890 by L{PrepareContainerMods}, returns tuple containing new item and changes
11892 @type modify_fn: callable
11893 @param modify_fn: Callback for modifying an existing item
11894 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
11895 and private data object as added by L{PrepareContainerMods}, returns
11897 @type remove_fn: callable
11898 @param remove_fn: Callback on removing item; receives absolute item index,
11899 item and private data object as added by L{PrepareContainerMods}
11902 for (op, idx, params, private) in mods:
11905 absidx = len(container) - 1
11907 raise IndexError("Not accepting negative indices other than -1")
11908 elif idx > len(container):
11909 raise IndexError("Got %s index %s, but there are only %s" %
11910 (kind, idx, len(container)))
11916 if op == constants.DDM_ADD:
11917 # Calculate where item will be added
11919 addidx = len(container)
11923 if create_fn is None:
11926 (item, changes) = create_fn(addidx, params, private)
11929 container.append(item)
11932 assert idx <= len(container)
11933 # list.insert does so before the specified index
11934 container.insert(idx, item)
11936 # Retrieve existing item
11938 item = container[absidx]
11940 raise IndexError("Invalid %s index %s" % (kind, idx))
11942 if op == constants.DDM_REMOVE:
11945 if remove_fn is not None:
11946 remove_fn(absidx, item, private)
11948 changes = [("%s/%s" % (kind, absidx), "remove")]
11950 assert container[absidx] == item
11951 del container[absidx]
11952 elif op == constants.DDM_MODIFY:
11953 if modify_fn is not None:
11954 changes = modify_fn(absidx, item, params, private)
11956 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
11958 assert _TApplyContModsCbChanges(changes)
11960 if not (chgdesc is None or changes is None):
11961 chgdesc.extend(changes)
11964 def _UpdateIvNames(base_index, disks):
11965 """Updates the C{iv_name} attribute of disks.
11967 @type disks: list of L{objects.Disk}
11970 for (idx, disk) in enumerate(disks):
11971 disk.iv_name = "disk/%s" % (base_index + idx, )
11974 class _InstNicModPrivate:
11975 """Data structure for network interface modifications.
11977 Used by L{LUInstanceSetParams}.
11980 def __init__(self):
11985 class LUInstanceSetParams(LogicalUnit):
11986 """Modifies an instances's parameters.
11989 HPATH = "instance-modify"
11990 HTYPE = constants.HTYPE_INSTANCE
11994 def _UpgradeDiskNicMods(kind, mods, verify_fn):
11995 assert ht.TList(mods)
11996 assert not mods or len(mods[0]) in (2, 3)
11998 if mods and len(mods[0]) == 2:
12002 for op, params in mods:
12003 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
12004 result.append((op, -1, params))
12008 raise errors.OpPrereqError("Only one %s add or remove operation is"
12009 " supported at a time" % kind,
12010 errors.ECODE_INVAL)
12012 result.append((constants.DDM_MODIFY, op, params))
12014 assert verify_fn(result)
12021 def _CheckMods(kind, mods, key_types, item_fn):
12022 """Ensures requested disk/NIC modifications are valid.
12025 for (op, _, params) in mods:
12026 assert ht.TDict(params)
12028 utils.ForceDictType(params, key_types)
12030 if op == constants.DDM_REMOVE:
12032 raise errors.OpPrereqError("No settings should be passed when"
12033 " removing a %s" % kind,
12034 errors.ECODE_INVAL)
12035 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
12036 item_fn(op, params)
12038 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
12041 def _VerifyDiskModification(op, params):
12042 """Verifies a disk modification.
12045 if op == constants.DDM_ADD:
12046 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
12047 if mode not in constants.DISK_ACCESS_SET:
12048 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
12049 errors.ECODE_INVAL)
12051 size = params.get(constants.IDISK_SIZE, None)
12053 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
12054 constants.IDISK_SIZE, errors.ECODE_INVAL)
12058 except (TypeError, ValueError), err:
12059 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
12060 errors.ECODE_INVAL)
12062 params[constants.IDISK_SIZE] = size
12064 elif op == constants.DDM_MODIFY and constants.IDISK_SIZE in params:
12065 raise errors.OpPrereqError("Disk size change not possible, use"
12066 " grow-disk", errors.ECODE_INVAL)
12069 def _VerifyNicModification(op, params):
12070 """Verifies a network interface modification.
12073 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
12074 ip = params.get(constants.INIC_IP, None)
12077 elif ip.lower() == constants.VALUE_NONE:
12078 params[constants.INIC_IP] = None
12079 elif not netutils.IPAddress.IsValid(ip):
12080 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
12081 errors.ECODE_INVAL)
12083 bridge = params.get("bridge", None)
12084 link = params.get(constants.INIC_LINK, None)
12085 if bridge and link:
12086 raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
12087 " at the same time", errors.ECODE_INVAL)
12088 elif bridge and bridge.lower() == constants.VALUE_NONE:
12089 params["bridge"] = None
12090 elif link and link.lower() == constants.VALUE_NONE:
12091 params[constants.INIC_LINK] = None
12093 if op == constants.DDM_ADD:
12094 macaddr = params.get(constants.INIC_MAC, None)
12095 if macaddr is None:
12096 params[constants.INIC_MAC] = constants.VALUE_AUTO
12098 if constants.INIC_MAC in params:
12099 macaddr = params[constants.INIC_MAC]
12100 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
12101 macaddr = utils.NormalizeAndValidateMac(macaddr)
12103 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
12104 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
12105 " modifying an existing NIC",
12106 errors.ECODE_INVAL)
12108 def CheckArguments(self):
12109 if not (self.op.nics or self.op.disks or self.op.disk_template or
12110 self.op.hvparams or self.op.beparams or self.op.os_name or
12111 self.op.offline is not None or self.op.runtime_mem):
12112 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
12114 if self.op.hvparams:
12115 _CheckGlobalHvParams(self.op.hvparams)
12118 self._UpgradeDiskNicMods("disk", self.op.disks,
12119 opcodes.OpInstanceSetParams.TestDiskModifications)
12121 self._UpgradeDiskNicMods("NIC", self.op.nics,
12122 opcodes.OpInstanceSetParams.TestNicModifications)
12124 # Check disk modifications
12125 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
12126 self._VerifyDiskModification)
12128 if self.op.disks and self.op.disk_template is not None:
12129 raise errors.OpPrereqError("Disk template conversion and other disk"
12130 " changes not supported at the same time",
12131 errors.ECODE_INVAL)
12133 if (self.op.disk_template and
12134 self.op.disk_template in constants.DTS_INT_MIRROR and
12135 self.op.remote_node is None):
12136 raise errors.OpPrereqError("Changing the disk template to a mirrored"
12137 " one requires specifying a secondary node",
12138 errors.ECODE_INVAL)
12140 # Check NIC modifications
12141 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
12142 self._VerifyNicModification)
12144 def ExpandNames(self):
12145 self._ExpandAndLockInstance()
12146 # Can't even acquire node locks in shared mode as upcoming changes in
12147 # Ganeti 2.6 will start to modify the node object on disk conversion
12148 self.needed_locks[locking.LEVEL_NODE] = []
12149 self.needed_locks[locking.LEVEL_NODE_RES] = []
12150 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
12152 def DeclareLocks(self, level):
12153 # TODO: Acquire group lock in shared mode (disk parameters)
12154 if level == locking.LEVEL_NODE:
12155 self._LockInstancesNodes()
12156 if self.op.disk_template and self.op.remote_node:
12157 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
12158 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
12159 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
12161 self.needed_locks[locking.LEVEL_NODE_RES] = \
12162 self.needed_locks[locking.LEVEL_NODE][:]
12164 def BuildHooksEnv(self):
12165 """Build hooks env.
12167 This runs on the master, primary and secondaries.
12171 if constants.BE_MINMEM in self.be_new:
12172 args["minmem"] = self.be_new[constants.BE_MINMEM]
12173 if constants.BE_MAXMEM in self.be_new:
12174 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
12175 if constants.BE_VCPUS in self.be_new:
12176 args["vcpus"] = self.be_new[constants.BE_VCPUS]
12177 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
12178 # information at all.
12180 if self._new_nics is not None:
12183 for nic in self._new_nics:
12184 nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
12185 mode = nicparams[constants.NIC_MODE]
12186 link = nicparams[constants.NIC_LINK]
12187 nics.append((nic.ip, nic.mac, mode, link))
12189 args["nics"] = nics
12191 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
12192 if self.op.disk_template:
12193 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
12194 if self.op.runtime_mem:
12195 env["RUNTIME_MEMORY"] = self.op.runtime_mem
12199 def BuildHooksNodes(self):
12200 """Build hooks nodes.
12203 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
12206 def _PrepareNicModification(self, params, private, old_ip, old_params,
12208 update_params_dict = dict([(key, params[key])
12209 for key in constants.NICS_PARAMETERS
12212 if "bridge" in params:
12213 update_params_dict[constants.NIC_LINK] = params["bridge"]
12215 new_params = _GetUpdatedParams(old_params, update_params_dict)
12216 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
12218 new_filled_params = cluster.SimpleFillNIC(new_params)
12219 objects.NIC.CheckParameterSyntax(new_filled_params)
12221 new_mode = new_filled_params[constants.NIC_MODE]
12222 if new_mode == constants.NIC_MODE_BRIDGED:
12223 bridge = new_filled_params[constants.NIC_LINK]
12224 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
12226 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
12228 self.warn.append(msg)
12230 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
12232 elif new_mode == constants.NIC_MODE_ROUTED:
12233 ip = params.get(constants.INIC_IP, old_ip)
12235 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
12236 " on a routed NIC", errors.ECODE_INVAL)
12238 if constants.INIC_MAC in params:
12239 mac = params[constants.INIC_MAC]
12241 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
12242 errors.ECODE_INVAL)
12243 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
12244 # otherwise generate the MAC address
12245 params[constants.INIC_MAC] = \
12246 self.cfg.GenerateMAC(self.proc.GetECId())
12248 # or validate/reserve the current one
12250 self.cfg.ReserveMAC(mac, self.proc.GetECId())
12251 except errors.ReservationError:
12252 raise errors.OpPrereqError("MAC address '%s' already in use"
12253 " in cluster" % mac,
12254 errors.ECODE_NOTUNIQUE)
12256 private.params = new_params
12257 private.filled = new_filled_params
12259 return (None, None)
12261 def CheckPrereq(self):
12262 """Check prerequisites.
12264 This only checks the instance list against the existing names.
12267 # checking the new params on the primary/secondary nodes
12269 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
12270 cluster = self.cluster = self.cfg.GetClusterInfo()
12271 assert self.instance is not None, \
12272 "Cannot retrieve locked instance %s" % self.op.instance_name
12273 pnode = instance.primary_node
12274 nodelist = list(instance.all_nodes)
12275 pnode_info = self.cfg.GetNodeInfo(pnode)
12276 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
12278 # Prepare disk/NIC modifications
12279 self.diskmod = PrepareContainerMods(self.op.disks, None)
12280 self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
12283 if self.op.os_name and not self.op.force:
12284 _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
12285 self.op.force_variant)
12286 instance_os = self.op.os_name
12288 instance_os = instance.os
12290 assert not (self.op.disk_template and self.op.disks), \
12291 "Can't modify disk template and apply disk changes at the same time"
12293 if self.op.disk_template:
12294 if instance.disk_template == self.op.disk_template:
12295 raise errors.OpPrereqError("Instance already has disk template %s" %
12296 instance.disk_template, errors.ECODE_INVAL)
12298 if (instance.disk_template,
12299 self.op.disk_template) not in self._DISK_CONVERSIONS:
12300 raise errors.OpPrereqError("Unsupported disk template conversion from"
12301 " %s to %s" % (instance.disk_template,
12302 self.op.disk_template),
12303 errors.ECODE_INVAL)
12304 _CheckInstanceState(self, instance, INSTANCE_DOWN,
12305 msg="cannot change disk template")
12306 if self.op.disk_template in constants.DTS_INT_MIRROR:
12307 if self.op.remote_node == pnode:
12308 raise errors.OpPrereqError("Given new secondary node %s is the same"
12309 " as the primary node of the instance" %
12310 self.op.remote_node, errors.ECODE_STATE)
12311 _CheckNodeOnline(self, self.op.remote_node)
12312 _CheckNodeNotDrained(self, self.op.remote_node)
12313 # FIXME: here we assume that the old instance type is DT_PLAIN
12314 assert instance.disk_template == constants.DT_PLAIN
12315 disks = [{constants.IDISK_SIZE: d.size,
12316 constants.IDISK_VG: d.logical_id[0]}
12317 for d in instance.disks]
12318 required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
12319 _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
12321 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
12322 snode_group = self.cfg.GetNodeGroup(snode_info.group)
12323 ipolicy = _CalculateGroupIPolicy(cluster, snode_group)
12324 _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
12325 ignore=self.op.ignore_ipolicy)
12326 if pnode_info.group != snode_info.group:
12327 self.LogWarning("The primary and secondary nodes are in two"
12328 " different node groups; the disk parameters"
12329 " from the first disk's node group will be"
12332 # hvparams processing
12333 if self.op.hvparams:
12334 hv_type = instance.hypervisor
12335 i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
12336 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
12337 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
12340 hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
12341 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
12342 self.hv_proposed = self.hv_new = hv_new # the new actual values
12343 self.hv_inst = i_hvdict # the new dict (without defaults)
12345 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
12347 self.hv_new = self.hv_inst = {}
12349 # beparams processing
12350 if self.op.beparams:
12351 i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
12353 objects.UpgradeBeParams(i_bedict)
12354 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
12355 be_new = cluster.SimpleFillBE(i_bedict)
12356 self.be_proposed = self.be_new = be_new # the new actual values
12357 self.be_inst = i_bedict # the new dict (without defaults)
12359 self.be_new = self.be_inst = {}
12360 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
12361 be_old = cluster.FillBE(instance)
12363 # CPU param validation -- checking every time a paramtere is
12364 # changed to cover all cases where either CPU mask or vcpus have
12366 if (constants.BE_VCPUS in self.be_proposed and
12367 constants.HV_CPU_MASK in self.hv_proposed):
12369 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
12370 # Verify mask is consistent with number of vCPUs. Can skip this
12371 # test if only 1 entry in the CPU mask, which means same mask
12372 # is applied to all vCPUs.
12373 if (len(cpu_list) > 1 and
12374 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
12375 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
12377 (self.be_proposed[constants.BE_VCPUS],
12378 self.hv_proposed[constants.HV_CPU_MASK]),
12379 errors.ECODE_INVAL)
12381 # Only perform this test if a new CPU mask is given
12382 if constants.HV_CPU_MASK in self.hv_new:
12383 # Calculate the largest CPU number requested
12384 max_requested_cpu = max(map(max, cpu_list))
12385 # Check that all of the instance's nodes have enough physical CPUs to
12386 # satisfy the requested CPU mask
12387 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
12388 max_requested_cpu + 1, instance.hypervisor)
12390 # osparams processing
12391 if self.op.osparams:
12392 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
12393 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
12394 self.os_inst = i_osdict # the new dict (without defaults)
12400 #TODO(dynmem): do the appropriate check involving MINMEM
12401 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
12402 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
12403 mem_check_list = [pnode]
12404 if be_new[constants.BE_AUTO_BALANCE]:
12405 # either we changed auto_balance to yes or it was from before
12406 mem_check_list.extend(instance.secondary_nodes)
12407 instance_info = self.rpc.call_instance_info(pnode, instance.name,
12408 instance.hypervisor)
12409 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
12410 [instance.hypervisor])
12411 pninfo = nodeinfo[pnode]
12412 msg = pninfo.fail_msg
12414 # Assume the primary node is unreachable and go ahead
12415 self.warn.append("Can't get info from primary node %s: %s" %
12418 (_, _, (pnhvinfo, )) = pninfo.payload
12419 if not isinstance(pnhvinfo.get("memory_free", None), int):
12420 self.warn.append("Node data from primary node %s doesn't contain"
12421 " free memory information" % pnode)
12422 elif instance_info.fail_msg:
12423 self.warn.append("Can't get instance runtime information: %s" %
12424 instance_info.fail_msg)
12426 if instance_info.payload:
12427 current_mem = int(instance_info.payload["memory"])
12429 # Assume instance not running
12430 # (there is a slight race condition here, but it's not very
12431 # probable, and we have no other way to check)
12432 # TODO: Describe race condition
12434 #TODO(dynmem): do the appropriate check involving MINMEM
12435 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
12436 pnhvinfo["memory_free"])
12438 raise errors.OpPrereqError("This change will prevent the instance"
12439 " from starting, due to %d MB of memory"
12440 " missing on its primary node" %
12442 errors.ECODE_NORES)
12444 if be_new[constants.BE_AUTO_BALANCE]:
12445 for node, nres in nodeinfo.items():
12446 if node not in instance.secondary_nodes:
12448 nres.Raise("Can't get info from secondary node %s" % node,
12449 prereq=True, ecode=errors.ECODE_STATE)
12450 (_, _, (nhvinfo, )) = nres.payload
12451 if not isinstance(nhvinfo.get("memory_free", None), int):
12452 raise errors.OpPrereqError("Secondary node %s didn't return free"
12453 " memory information" % node,
12454 errors.ECODE_STATE)
12455 #TODO(dynmem): do the appropriate check involving MINMEM
12456 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
12457 raise errors.OpPrereqError("This change will prevent the instance"
12458 " from failover to its secondary node"
12459 " %s, due to not enough memory" % node,
12460 errors.ECODE_STATE)
12462 if self.op.runtime_mem:
12463 remote_info = self.rpc.call_instance_info(instance.primary_node,
12465 instance.hypervisor)
12466 remote_info.Raise("Error checking node %s" % instance.primary_node)
12467 if not remote_info.payload: # not running already
12468 raise errors.OpPrereqError("Instance %s is not running" % instance.name,
12469 errors.ECODE_STATE)
12471 current_memory = remote_info.payload["memory"]
12472 if (not self.op.force and
12473 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
12474 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
12475 raise errors.OpPrereqError("Instance %s must have memory between %d"
12476 " and %d MB of memory unless --force is"
12477 " given" % (instance.name,
12478 self.be_proposed[constants.BE_MINMEM],
12479 self.be_proposed[constants.BE_MAXMEM]),
12480 errors.ECODE_INVAL)
12482 if self.op.runtime_mem > current_memory:
12483 _CheckNodeFreeMemory(self, instance.primary_node,
12484 "ballooning memory for instance %s" %
12486 self.op.memory - current_memory,
12487 instance.hypervisor)
12489 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
12490 raise errors.OpPrereqError("Disk operations not supported for"
12491 " diskless instances",
12492 errors.ECODE_INVAL)
12494 def _PrepareNicCreate(_, params, private):
12495 return self._PrepareNicModification(params, private, None, {},
12498 def _PrepareNicMod(_, nic, params, private):
12499 return self._PrepareNicModification(params, private, nic.ip,
12500 nic.nicparams, cluster, pnode)
12502 # Verify NIC changes (operating on copy)
12503 nics = instance.nics[:]
12504 ApplyContainerMods("NIC", nics, None, self.nicmod,
12505 _PrepareNicCreate, _PrepareNicMod, None)
12506 if len(nics) > constants.MAX_NICS:
12507 raise errors.OpPrereqError("Instance has too many network interfaces"
12508 " (%d), cannot add more" % constants.MAX_NICS,
12509 errors.ECODE_STATE)
12511 # Verify disk changes (operating on a copy)
12512 disks = instance.disks[:]
12513 ApplyContainerMods("disk", disks, None, self.diskmod, None, None, None)
12514 if len(disks) > constants.MAX_DISKS:
12515 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
12516 " more" % constants.MAX_DISKS,
12517 errors.ECODE_STATE)
12519 if self.op.offline is not None:
12520 if self.op.offline:
12521 msg = "can't change to offline"
12523 msg = "can't change to online"
12524 _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE, msg=msg)
12526 # Pre-compute NIC changes (necessary to use result in hooks)
12527 self._nic_chgdesc = []
12529 # Operate on copies as this is still in prereq
12530 nics = [nic.Copy() for nic in instance.nics]
12531 ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
12532 self._CreateNewNic, self._ApplyNicMods, None)
12533 self._new_nics = nics
12535 self._new_nics = None
12537 def _ConvertPlainToDrbd(self, feedback_fn):
12538 """Converts an instance from plain to drbd.
12541 feedback_fn("Converting template to drbd")
12542 instance = self.instance
12543 pnode = instance.primary_node
12544 snode = self.op.remote_node
12546 assert instance.disk_template == constants.DT_PLAIN
12548 # create a fake disk info for _GenerateDiskTemplate
12549 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
12550 constants.IDISK_VG: d.logical_id[0]}
12551 for d in instance.disks]
12552 new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
12553 instance.name, pnode, [snode],
12554 disk_info, None, None, 0, feedback_fn,
12556 info = _GetInstanceInfoText(instance)
12557 feedback_fn("Creating additional volumes...")
12558 # first, create the missing data and meta devices
12559 for disk in new_disks:
12560 # unfortunately this is... not too nice
12561 _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
12563 for child in disk.children:
12564 _CreateSingleBlockDev(self, snode, instance, child, info, True)
12565 # at this stage, all new LVs have been created, we can rename the
12567 feedback_fn("Renaming original volumes...")
12568 rename_list = [(o, n.children[0].logical_id)
12569 for (o, n) in zip(instance.disks, new_disks)]
12570 result = self.rpc.call_blockdev_rename(pnode, rename_list)
12571 result.Raise("Failed to rename original LVs")
12573 feedback_fn("Initializing DRBD devices...")
12574 # all child devices are in place, we can now create the DRBD devices
12575 for disk in new_disks:
12576 for node in [pnode, snode]:
12577 f_create = node == pnode
12578 _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
12580 # at this point, the instance has been modified
12581 instance.disk_template = constants.DT_DRBD8
12582 instance.disks = new_disks
12583 self.cfg.Update(instance, feedback_fn)
12585 # Release node locks while waiting for sync
12586 _ReleaseLocks(self, locking.LEVEL_NODE)
12588 # disks are created, waiting for sync
12589 disk_abort = not _WaitForSync(self, instance,
12590 oneshot=not self.op.wait_for_sync)
12592 raise errors.OpExecError("There are some degraded disks for"
12593 " this instance, please cleanup manually")
12595 # Node resource locks will be released by caller
12597 def _ConvertDrbdToPlain(self, feedback_fn):
12598 """Converts an instance from drbd to plain.
12601 instance = self.instance
12603 assert len(instance.secondary_nodes) == 1
12604 assert instance.disk_template == constants.DT_DRBD8
12606 pnode = instance.primary_node
12607 snode = instance.secondary_nodes[0]
12608 feedback_fn("Converting template to plain")
12610 old_disks = instance.disks
12611 new_disks = [d.children[0] for d in old_disks]
12613 # copy over size and mode
12614 for parent, child in zip(old_disks, new_disks):
12615 child.size = parent.size
12616 child.mode = parent.mode
12618 # this is a DRBD disk, return its port to the pool
12619 # NOTE: this must be done right before the call to cfg.Update!
12620 for disk in old_disks:
12621 tcp_port = disk.logical_id[2]
12622 self.cfg.AddTcpUdpPort(tcp_port)
12624 # update instance structure
12625 instance.disks = new_disks
12626 instance.disk_template = constants.DT_PLAIN
12627 self.cfg.Update(instance, feedback_fn)
12629 # Release locks in case removing disks takes a while
12630 _ReleaseLocks(self, locking.LEVEL_NODE)
12632 feedback_fn("Removing volumes on the secondary node...")
12633 for disk in old_disks:
12634 self.cfg.SetDiskID(disk, snode)
12635 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
12637 self.LogWarning("Could not remove block device %s on node %s,"
12638 " continuing anyway: %s", disk.iv_name, snode, msg)
12640 feedback_fn("Removing unneeded volumes on the primary node...")
12641 for idx, disk in enumerate(old_disks):
12642 meta = disk.children[1]
12643 self.cfg.SetDiskID(meta, pnode)
12644 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
12646 self.LogWarning("Could not remove metadata for disk %d on node %s,"
12647 " continuing anyway: %s", idx, pnode, msg)
12649 def _CreateNewDisk(self, idx, params, _):
12650 """Creates a new disk.
12653 instance = self.instance
12656 if instance.disk_template in constants.DTS_FILEBASED:
12657 (file_driver, file_path) = instance.disks[0].logical_id
12658 file_path = os.path.dirname(file_path)
12660 file_driver = file_path = None
12663 _GenerateDiskTemplate(self, instance.disk_template, instance.name,
12664 instance.primary_node, instance.secondary_nodes,
12665 [params], file_path, file_driver, idx,
12666 self.Log, self.diskparams)[0]
12668 info = _GetInstanceInfoText(instance)
12670 logging.info("Creating volume %s for instance %s",
12671 disk.iv_name, instance.name)
12672 # Note: this needs to be kept in sync with _CreateDisks
12674 for node in instance.all_nodes:
12675 f_create = (node == instance.primary_node)
12677 _CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
12678 except errors.OpExecError, err:
12679 self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
12680 disk.iv_name, disk, node, err)
12683 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
12687 def _ModifyDisk(idx, disk, params, _):
12688 """Modifies a disk.
12691 disk.mode = params[constants.IDISK_MODE]
12694 ("disk.mode/%d" % idx, disk.mode),
12697 def _RemoveDisk(self, idx, root, _):
12701 for node, disk in root.ComputeNodeTree(self.instance.primary_node):
12702 self.cfg.SetDiskID(disk, node)
12703 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
12705 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
12706 " continuing anyway", idx, node, msg)
12708 # if this is a DRBD disk, return its port to the pool
12709 if root.dev_type in constants.LDS_DRBD:
12710 self.cfg.AddTcpUdpPort(root.logical_id[2])
12713 def _CreateNewNic(idx, params, private):
12714 """Creates data structure for a new network interface.
12717 mac = params[constants.INIC_MAC]
12718 ip = params.get(constants.INIC_IP, None)
12719 nicparams = private.params
12721 return (objects.NIC(mac=mac, ip=ip, nicparams=nicparams), [
12723 "add:mac=%s,ip=%s,mode=%s,link=%s" %
12724 (mac, ip, private.filled[constants.NIC_MODE],
12725 private.filled[constants.NIC_LINK])),
12729 def _ApplyNicMods(idx, nic, params, private):
12730 """Modifies a network interface.
12735 for key in [constants.INIC_MAC, constants.INIC_IP]:
12737 changes.append(("nic.%s/%d" % (key, idx), params[key]))
12738 setattr(nic, key, params[key])
12741 nic.nicparams = private.params
12743 for (key, val) in params.items():
12744 changes.append(("nic.%s/%d" % (key, idx), val))
12748 def Exec(self, feedback_fn):
12749 """Modifies an instance.
12751 All parameters take effect only at the next restart of the instance.
12754 # Process here the warnings from CheckPrereq, as we don't have a
12755 # feedback_fn there.
12756 # TODO: Replace with self.LogWarning
12757 for warn in self.warn:
12758 feedback_fn("WARNING: %s" % warn)
12760 assert ((self.op.disk_template is None) ^
12761 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
12762 "Not owning any node resource locks"
12765 instance = self.instance
12768 if self.op.runtime_mem:
12769 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
12771 self.op.runtime_mem)
12772 rpcres.Raise("Cannot modify instance runtime memory")
12773 result.append(("runtime_memory", self.op.runtime_mem))
12775 # Apply disk changes
12776 ApplyContainerMods("disk", instance.disks, result, self.diskmod,
12777 self._CreateNewDisk, self._ModifyDisk, self._RemoveDisk)
12778 _UpdateIvNames(0, instance.disks)
12780 if self.op.disk_template:
12782 check_nodes = set(instance.all_nodes)
12783 if self.op.remote_node:
12784 check_nodes.add(self.op.remote_node)
12785 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
12786 owned = self.owned_locks(level)
12787 assert not (check_nodes - owned), \
12788 ("Not owning the correct locks, owning %r, expected at least %r" %
12789 (owned, check_nodes))
12791 r_shut = _ShutdownInstanceDisks(self, instance)
12793 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
12794 " proceed with disk template conversion")
12795 mode = (instance.disk_template, self.op.disk_template)
12797 self._DISK_CONVERSIONS[mode](self, feedback_fn)
12799 self.cfg.ReleaseDRBDMinors(instance.name)
12801 result.append(("disk_template", self.op.disk_template))
12803 assert instance.disk_template == self.op.disk_template, \
12804 ("Expected disk template '%s', found '%s'" %
12805 (self.op.disk_template, instance.disk_template))
12807 # Release node and resource locks if there are any (they might already have
12808 # been released during disk conversion)
12809 _ReleaseLocks(self, locking.LEVEL_NODE)
12810 _ReleaseLocks(self, locking.LEVEL_NODE_RES)
12812 # Apply NIC changes
12813 if self._new_nics is not None:
12814 instance.nics = self._new_nics
12815 result.extend(self._nic_chgdesc)
12818 if self.op.hvparams:
12819 instance.hvparams = self.hv_inst
12820 for key, val in self.op.hvparams.iteritems():
12821 result.append(("hv/%s" % key, val))
12824 if self.op.beparams:
12825 instance.beparams = self.be_inst
12826 for key, val in self.op.beparams.iteritems():
12827 result.append(("be/%s" % key, val))
12830 if self.op.os_name:
12831 instance.os = self.op.os_name
12834 if self.op.osparams:
12835 instance.osparams = self.os_inst
12836 for key, val in self.op.osparams.iteritems():
12837 result.append(("os/%s" % key, val))
12839 if self.op.offline is None:
12842 elif self.op.offline:
12843 # Mark instance as offline
12844 self.cfg.MarkInstanceOffline(instance.name)
12845 result.append(("admin_state", constants.ADMINST_OFFLINE))
12847 # Mark instance as online, but stopped
12848 self.cfg.MarkInstanceDown(instance.name)
12849 result.append(("admin_state", constants.ADMINST_DOWN))
12851 self.cfg.Update(instance, feedback_fn)
12853 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
12854 self.owned_locks(locking.LEVEL_NODE)), \
12855 "All node locks should have been released by now"
12859 _DISK_CONVERSIONS = {
12860 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
12861 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
12865 class LUInstanceChangeGroup(LogicalUnit):
12866 HPATH = "instance-change-group"
12867 HTYPE = constants.HTYPE_INSTANCE
12870 def ExpandNames(self):
12871 self.share_locks = _ShareAll()
12872 self.needed_locks = {
12873 locking.LEVEL_NODEGROUP: [],
12874 locking.LEVEL_NODE: [],
12877 self._ExpandAndLockInstance()
12879 if self.op.target_groups:
12880 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
12881 self.op.target_groups)
12883 self.req_target_uuids = None
12885 self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
12887 def DeclareLocks(self, level):
12888 if level == locking.LEVEL_NODEGROUP:
12889 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
12891 if self.req_target_uuids:
12892 lock_groups = set(self.req_target_uuids)
12894 # Lock all groups used by instance optimistically; this requires going
12895 # via the node before it's locked, requiring verification later on
12896 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
12897 lock_groups.update(instance_groups)
12899 # No target groups, need to lock all of them
12900 lock_groups = locking.ALL_SET
12902 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
12904 elif level == locking.LEVEL_NODE:
12905 if self.req_target_uuids:
12906 # Lock all nodes used by instances
12907 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
12908 self._LockInstancesNodes()
12910 # Lock all nodes in all potential target groups
12911 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
12912 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
12913 member_nodes = [node_name
12914 for group in lock_groups
12915 for node_name in self.cfg.GetNodeGroup(group).members]
12916 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
12918 # Lock all nodes as all groups are potential targets
12919 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
12921 def CheckPrereq(self):
12922 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
12923 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
12924 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
12926 assert (self.req_target_uuids is None or
12927 owned_groups.issuperset(self.req_target_uuids))
12928 assert owned_instances == set([self.op.instance_name])
12930 # Get instance information
12931 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
12933 # Check if node groups for locked instance are still correct
12934 assert owned_nodes.issuperset(self.instance.all_nodes), \
12935 ("Instance %s's nodes changed while we kept the lock" %
12936 self.op.instance_name)
12938 inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
12941 if self.req_target_uuids:
12942 # User requested specific target groups
12943 self.target_uuids = frozenset(self.req_target_uuids)
12945 # All groups except those used by the instance are potential targets
12946 self.target_uuids = owned_groups - inst_groups
12948 conflicting_groups = self.target_uuids & inst_groups
12949 if conflicting_groups:
12950 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
12951 " used by the instance '%s'" %
12952 (utils.CommaJoin(conflicting_groups),
12953 self.op.instance_name),
12954 errors.ECODE_INVAL)
12956 if not self.target_uuids:
12957 raise errors.OpPrereqError("There are no possible target groups",
12958 errors.ECODE_INVAL)
12960 def BuildHooksEnv(self):
12961 """Build hooks env.
12964 assert self.target_uuids
12967 "TARGET_GROUPS": " ".join(self.target_uuids),
12970 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
12974 def BuildHooksNodes(self):
12975 """Build hooks nodes.
12978 mn = self.cfg.GetMasterNode()
12979 return ([mn], [mn])
12981 def Exec(self, feedback_fn):
12982 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
12984 assert instances == [self.op.instance_name], "Instance not locked"
12986 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
12987 instances=instances, target_groups=list(self.target_uuids))
12989 ial.Run(self.op.iallocator)
12991 if not ial.success:
12992 raise errors.OpPrereqError("Can't compute solution for changing group of"
12993 " instance '%s' using iallocator '%s': %s" %
12994 (self.op.instance_name, self.op.iallocator,
12996 errors.ECODE_NORES)
12998 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
13000 self.LogInfo("Iallocator returned %s job(s) for changing group of"
13001 " instance '%s'", len(jobs), self.op.instance_name)
13003 return ResultWithJobs(jobs)
13006 class LUBackupQuery(NoHooksLU):
13007 """Query the exports list
13012 def CheckArguments(self):
13013 self.expq = _ExportQuery(qlang.MakeSimpleFilter("node", self.op.nodes),
13014 ["node", "export"], self.op.use_locking)
13016 def ExpandNames(self):
13017 self.expq.ExpandNames(self)
13019 def DeclareLocks(self, level):
13020 self.expq.DeclareLocks(self, level)
13022 def Exec(self, feedback_fn):
13025 for (node, expname) in self.expq.OldStyleQuery(self):
13026 if expname is None:
13027 result[node] = False
13029 result.setdefault(node, []).append(expname)
13034 class _ExportQuery(_QueryBase):
13035 FIELDS = query.EXPORT_FIELDS
13037 #: The node name is not a unique key for this query
13038 SORT_FIELD = "node"
13040 def ExpandNames(self, lu):
13041 lu.needed_locks = {}
13043 # The following variables interact with _QueryBase._GetNames
13045 self.wanted = _GetWantedNodes(lu, self.names)
13047 self.wanted = locking.ALL_SET
13049 self.do_locking = self.use_locking
13051 if self.do_locking:
13052 lu.share_locks = _ShareAll()
13053 lu.needed_locks = {
13054 locking.LEVEL_NODE: self.wanted,
13057 def DeclareLocks(self, lu, level):
13060 def _GetQueryData(self, lu):
13061 """Computes the list of nodes and their attributes.
13064 # Locking is not used
13066 assert not (compat.any(lu.glm.is_owned(level)
13067 for level in locking.LEVELS
13068 if level != locking.LEVEL_CLUSTER) or
13069 self.do_locking or self.use_locking)
13071 nodes = self._GetNames(lu, lu.cfg.GetNodeList(), locking.LEVEL_NODE)
13075 for (node, nres) in lu.rpc.call_export_list(nodes).items():
13077 result.append((node, None))
13079 result.extend((node, expname) for expname in nres.payload)
13084 class LUBackupPrepare(NoHooksLU):
13085 """Prepares an instance for an export and returns useful information.
13090 def ExpandNames(self):
13091 self._ExpandAndLockInstance()
13093 def CheckPrereq(self):
13094 """Check prerequisites.
13097 instance_name = self.op.instance_name
13099 self.instance = self.cfg.GetInstanceInfo(instance_name)
13100 assert self.instance is not None, \
13101 "Cannot retrieve locked instance %s" % self.op.instance_name
13102 _CheckNodeOnline(self, self.instance.primary_node)
13104 self._cds = _GetClusterDomainSecret()
13106 def Exec(self, feedback_fn):
13107 """Prepares an instance for an export.
13110 instance = self.instance
13112 if self.op.mode == constants.EXPORT_MODE_REMOTE:
13113 salt = utils.GenerateSecret(8)
13115 feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
13116 result = self.rpc.call_x509_cert_create(instance.primary_node,
13117 constants.RIE_CERT_VALIDITY)
13118 result.Raise("Can't create X509 key and certificate on %s" % result.node)
13120 (name, cert_pem) = result.payload
13122 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
13126 "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
13127 "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
13129 "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
13135 class LUBackupExport(LogicalUnit):
13136 """Export an instance to an image in the cluster.
13139 HPATH = "instance-export"
13140 HTYPE = constants.HTYPE_INSTANCE
13143 def CheckArguments(self):
13144 """Check the arguments.
13147 self.x509_key_name = self.op.x509_key_name
13148 self.dest_x509_ca_pem = self.op.destination_x509_ca
13150 if self.op.mode == constants.EXPORT_MODE_REMOTE:
13151 if not self.x509_key_name:
13152 raise errors.OpPrereqError("Missing X509 key name for encryption",
13153 errors.ECODE_INVAL)
13155 if not self.dest_x509_ca_pem:
13156 raise errors.OpPrereqError("Missing destination X509 CA",
13157 errors.ECODE_INVAL)
13159 def ExpandNames(self):
13160 self._ExpandAndLockInstance()
13162 # Lock all nodes for local exports
13163 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13164 # FIXME: lock only instance primary and destination node
13166 # Sad but true, for now we have do lock all nodes, as we don't know where
13167 # the previous export might be, and in this LU we search for it and
13168 # remove it from its current node. In the future we could fix this by:
13169 # - making a tasklet to search (share-lock all), then create the
13170 # new one, then one to remove, after
13171 # - removing the removal operation altogether
13172 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
13174 def DeclareLocks(self, level):
13175 """Last minute lock declaration."""
13176 # All nodes are locked anyway, so nothing to do here.
13178 def BuildHooksEnv(self):
13179 """Build hooks env.
13181 This will run on the master, primary node and target node.
13185 "EXPORT_MODE": self.op.mode,
13186 "EXPORT_NODE": self.op.target_node,
13187 "EXPORT_DO_SHUTDOWN": self.op.shutdown,
13188 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
13189 # TODO: Generic function for boolean env variables
13190 "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
13193 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
13197 def BuildHooksNodes(self):
13198 """Build hooks nodes.
13201 nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
13203 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13204 nl.append(self.op.target_node)
13208 def CheckPrereq(self):
13209 """Check prerequisites.
13211 This checks that the instance and node names are valid.
13214 instance_name = self.op.instance_name
13216 self.instance = self.cfg.GetInstanceInfo(instance_name)
13217 assert self.instance is not None, \
13218 "Cannot retrieve locked instance %s" % self.op.instance_name
13219 _CheckNodeOnline(self, self.instance.primary_node)
13221 if (self.op.remove_instance and
13222 self.instance.admin_state == constants.ADMINST_UP and
13223 not self.op.shutdown):
13224 raise errors.OpPrereqError("Can not remove instance without shutting it"
13227 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13228 self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
13229 self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
13230 assert self.dst_node is not None
13232 _CheckNodeOnline(self, self.dst_node.name)
13233 _CheckNodeNotDrained(self, self.dst_node.name)
13236 self.dest_disk_info = None
13237 self.dest_x509_ca = None
13239 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
13240 self.dst_node = None
13242 if len(self.op.target_node) != len(self.instance.disks):
13243 raise errors.OpPrereqError(("Received destination information for %s"
13244 " disks, but instance %s has %s disks") %
13245 (len(self.op.target_node), instance_name,
13246 len(self.instance.disks)),
13247 errors.ECODE_INVAL)
13249 cds = _GetClusterDomainSecret()
13251 # Check X509 key name
13253 (key_name, hmac_digest, hmac_salt) = self.x509_key_name
13254 except (TypeError, ValueError), err:
13255 raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
13257 if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
13258 raise errors.OpPrereqError("HMAC for X509 key name is wrong",
13259 errors.ECODE_INVAL)
13261 # Load and verify CA
13263 (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
13264 except OpenSSL.crypto.Error, err:
13265 raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
13266 (err, ), errors.ECODE_INVAL)
13268 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
13269 if errcode is not None:
13270 raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
13271 (msg, ), errors.ECODE_INVAL)
13273 self.dest_x509_ca = cert
13275 # Verify target information
13277 for idx, disk_data in enumerate(self.op.target_node):
13279 (host, port, magic) = \
13280 masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
13281 except errors.GenericError, err:
13282 raise errors.OpPrereqError("Target info for disk %s: %s" %
13283 (idx, err), errors.ECODE_INVAL)
13285 disk_info.append((host, port, magic))
13287 assert len(disk_info) == len(self.op.target_node)
13288 self.dest_disk_info = disk_info
13291 raise errors.ProgrammerError("Unhandled export mode %r" %
13294 # instance disk type verification
13295 # TODO: Implement export support for file-based disks
13296 for disk in self.instance.disks:
13297 if disk.dev_type == constants.LD_FILE:
13298 raise errors.OpPrereqError("Export not supported for instances with"
13299 " file-based disks", errors.ECODE_INVAL)
13301 def _CleanupExports(self, feedback_fn):
13302 """Removes exports of current instance from all other nodes.
13304 If an instance in a cluster with nodes A..D was exported to node C, its
13305 exports will be removed from the nodes A, B and D.
13308 assert self.op.mode != constants.EXPORT_MODE_REMOTE
13310 nodelist = self.cfg.GetNodeList()
13311 nodelist.remove(self.dst_node.name)
13313 # on one-node clusters nodelist will be empty after the removal
13314 # if we proceed the backup would be removed because OpBackupQuery
13315 # substitutes an empty list with the full cluster node list.
13316 iname = self.instance.name
13318 feedback_fn("Removing old exports for instance %s" % iname)
13319 exportlist = self.rpc.call_export_list(nodelist)
13320 for node in exportlist:
13321 if exportlist[node].fail_msg:
13323 if iname in exportlist[node].payload:
13324 msg = self.rpc.call_export_remove(node, iname).fail_msg
13326 self.LogWarning("Could not remove older export for instance %s"
13327 " on node %s: %s", iname, node, msg)
13329 def Exec(self, feedback_fn):
13330 """Export an instance to an image in the cluster.
13333 assert self.op.mode in constants.EXPORT_MODES
13335 instance = self.instance
13336 src_node = instance.primary_node
13338 if self.op.shutdown:
13339 # shutdown the instance, but not the disks
13340 feedback_fn("Shutting down instance %s" % instance.name)
13341 result = self.rpc.call_instance_shutdown(src_node, instance,
13342 self.op.shutdown_timeout)
13343 # TODO: Maybe ignore failures if ignore_remove_failures is set
13344 result.Raise("Could not shutdown instance %s on"
13345 " node %s" % (instance.name, src_node))
13347 # set the disks ID correctly since call_instance_start needs the
13348 # correct drbd minor to create the symlinks
13349 for disk in instance.disks:
13350 self.cfg.SetDiskID(disk, src_node)
13352 activate_disks = (instance.admin_state != constants.ADMINST_UP)
13355 # Activate the instance disks if we'exporting a stopped instance
13356 feedback_fn("Activating disks for %s" % instance.name)
13357 _StartInstanceDisks(self, instance, None)
13360 helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
13363 helper.CreateSnapshots()
13365 if (self.op.shutdown and
13366 instance.admin_state == constants.ADMINST_UP and
13367 not self.op.remove_instance):
13368 assert not activate_disks
13369 feedback_fn("Starting instance %s" % instance.name)
13370 result = self.rpc.call_instance_start(src_node,
13371 (instance, None, None), False)
13372 msg = result.fail_msg
13374 feedback_fn("Failed to start instance: %s" % msg)
13375 _ShutdownInstanceDisks(self, instance)
13376 raise errors.OpExecError("Could not start instance: %s" % msg)
13378 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13379 (fin_resu, dresults) = helper.LocalExport(self.dst_node)
13380 elif self.op.mode == constants.EXPORT_MODE_REMOTE:
13381 connect_timeout = constants.RIE_CONNECT_TIMEOUT
13382 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
13384 (key_name, _, _) = self.x509_key_name
13387 OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
13390 (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
13391 key_name, dest_ca_pem,
13396 # Check for backwards compatibility
13397 assert len(dresults) == len(instance.disks)
13398 assert compat.all(isinstance(i, bool) for i in dresults), \
13399 "Not all results are boolean: %r" % dresults
13403 feedback_fn("Deactivating disks for %s" % instance.name)
13404 _ShutdownInstanceDisks(self, instance)
13406 if not (compat.all(dresults) and fin_resu):
13409 failures.append("export finalization")
13410 if not compat.all(dresults):
13411 fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
13413 failures.append("disk export: disk(s) %s" % fdsk)
13415 raise errors.OpExecError("Export failed, errors in %s" %
13416 utils.CommaJoin(failures))
13418 # At this point, the export was successful, we can cleanup/finish
13420 # Remove instance if requested
13421 if self.op.remove_instance:
13422 feedback_fn("Removing instance %s" % instance.name)
13423 _RemoveInstance(self, feedback_fn, instance,
13424 self.op.ignore_remove_failures)
13426 if self.op.mode == constants.EXPORT_MODE_LOCAL:
13427 self._CleanupExports(feedback_fn)
13429 return fin_resu, dresults
13432 class LUBackupRemove(NoHooksLU):
13433 """Remove exports related to the named instance.
13438 def ExpandNames(self):
13439 self.needed_locks = {}
13440 # We need all nodes to be locked in order for RemoveExport to work, but we
13441 # don't need to lock the instance itself, as nothing will happen to it (and
13442 # we can remove exports also for a removed instance)
13443 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
13445 def Exec(self, feedback_fn):
13446 """Remove any export.
13449 instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
13450 # If the instance was not found we'll try with the name that was passed in.
13451 # This will only work if it was an FQDN, though.
13453 if not instance_name:
13455 instance_name = self.op.instance_name
13457 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
13458 exportlist = self.rpc.call_export_list(locked_nodes)
13460 for node in exportlist:
13461 msg = exportlist[node].fail_msg
13463 self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
13465 if instance_name in exportlist[node].payload:
13467 result = self.rpc.call_export_remove(node, instance_name)
13468 msg = result.fail_msg
13470 logging.error("Could not remove export for instance %s"
13471 " on node %s: %s", instance_name, node, msg)
13473 if fqdn_warn and not found:
13474 feedback_fn("Export not found. If trying to remove an export belonging"
13475 " to a deleted instance please use its Fully Qualified"
13479 class LUGroupAdd(LogicalUnit):
13480 """Logical unit for creating node groups.
13483 HPATH = "group-add"
13484 HTYPE = constants.HTYPE_GROUP
13487 def ExpandNames(self):
13488 # We need the new group's UUID here so that we can create and acquire the
13489 # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
13490 # that it should not check whether the UUID exists in the configuration.
13491 self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
13492 self.needed_locks = {}
13493 self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
13495 def CheckPrereq(self):
13496 """Check prerequisites.
13498 This checks that the given group name is not an existing node group
13503 existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
13504 except errors.OpPrereqError:
13507 raise errors.OpPrereqError("Desired group name '%s' already exists as a"
13508 " node group (UUID: %s)" %
13509 (self.op.group_name, existing_uuid),
13510 errors.ECODE_EXISTS)
13512 if self.op.ndparams:
13513 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
13515 if self.op.hv_state:
13516 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
13518 self.new_hv_state = None
13520 if self.op.disk_state:
13521 self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
13523 self.new_disk_state = None
13525 if self.op.diskparams:
13526 for templ in constants.DISK_TEMPLATES:
13527 if templ not in self.op.diskparams:
13528 self.op.diskparams[templ] = {}
13529 utils.ForceDictType(self.op.diskparams[templ], constants.DISK_DT_TYPES)
13531 self.op.diskparams = self.cfg.GetClusterInfo().diskparams
13533 if self.op.ipolicy:
13534 cluster = self.cfg.GetClusterInfo()
13535 full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
13537 objects.InstancePolicy.CheckParameterSyntax(full_ipolicy)
13538 except errors.ConfigurationError, err:
13539 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
13540 errors.ECODE_INVAL)
13542 def BuildHooksEnv(self):
13543 """Build hooks env.
13547 "GROUP_NAME": self.op.group_name,
13550 def BuildHooksNodes(self):
13551 """Build hooks nodes.
13554 mn = self.cfg.GetMasterNode()
13555 return ([mn], [mn])
13557 def Exec(self, feedback_fn):
13558 """Add the node group to the cluster.
13561 group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
13562 uuid=self.group_uuid,
13563 alloc_policy=self.op.alloc_policy,
13564 ndparams=self.op.ndparams,
13565 diskparams=self.op.diskparams,
13566 ipolicy=self.op.ipolicy,
13567 hv_state_static=self.new_hv_state,
13568 disk_state_static=self.new_disk_state)
13570 self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
13571 del self.remove_locks[locking.LEVEL_NODEGROUP]
13574 class LUGroupAssignNodes(NoHooksLU):
13575 """Logical unit for assigning nodes to groups.
13580 def ExpandNames(self):
13581 # These raise errors.OpPrereqError on their own:
13582 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
13583 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
13585 # We want to lock all the affected nodes and groups. We have readily
13586 # available the list of nodes, and the *destination* group. To gather the
13587 # list of "source" groups, we need to fetch node information later on.
13588 self.needed_locks = {
13589 locking.LEVEL_NODEGROUP: set([self.group_uuid]),
13590 locking.LEVEL_NODE: self.op.nodes,
13593 def DeclareLocks(self, level):
13594 if level == locking.LEVEL_NODEGROUP:
13595 assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
13597 # Try to get all affected nodes' groups without having the group or node
13598 # lock yet. Needs verification later in the code flow.
13599 groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
13601 self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
13603 def CheckPrereq(self):
13604 """Check prerequisites.
13607 assert self.needed_locks[locking.LEVEL_NODEGROUP]
13608 assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
13609 frozenset(self.op.nodes))
13611 expected_locks = (set([self.group_uuid]) |
13612 self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
13613 actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
13614 if actual_locks != expected_locks:
13615 raise errors.OpExecError("Nodes changed groups since locks were acquired,"
13616 " current groups are '%s', used to be '%s'" %
13617 (utils.CommaJoin(expected_locks),
13618 utils.CommaJoin(actual_locks)))
13620 self.node_data = self.cfg.GetAllNodesInfo()
13621 self.group = self.cfg.GetNodeGroup(self.group_uuid)
13622 instance_data = self.cfg.GetAllInstancesInfo()
13624 if self.group is None:
13625 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
13626 (self.op.group_name, self.group_uuid))
13628 (new_splits, previous_splits) = \
13629 self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
13630 for node in self.op.nodes],
13631 self.node_data, instance_data)
13634 fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
13636 if not self.op.force:
13637 raise errors.OpExecError("The following instances get split by this"
13638 " change and --force was not given: %s" %
13641 self.LogWarning("This operation will split the following instances: %s",
13644 if previous_splits:
13645 self.LogWarning("In addition, these already-split instances continue"
13646 " to be split across groups: %s",
13647 utils.CommaJoin(utils.NiceSort(previous_splits)))
13649 def Exec(self, feedback_fn):
13650 """Assign nodes to a new group.
13653 mods = [(node_name, self.group_uuid) for node_name in self.op.nodes]
13655 self.cfg.AssignGroupNodes(mods)
13658 def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
13659 """Check for split instances after a node assignment.
13661 This method considers a series of node assignments as an atomic operation,
13662 and returns information about split instances after applying the set of
13665 In particular, it returns information about newly split instances, and
13666 instances that were already split, and remain so after the change.
13668 Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
13671 @type changes: list of (node_name, new_group_uuid) pairs.
13672 @param changes: list of node assignments to consider.
13673 @param node_data: a dict with data for all nodes
13674 @param instance_data: a dict with all instances to consider
13675 @rtype: a two-tuple
13676 @return: a list of instances that were previously okay and result split as a
13677 consequence of this change, and a list of instances that were previously
13678 split and this change does not fix.
13681 changed_nodes = dict((node, group) for node, group in changes
13682 if node_data[node].group != group)
13684 all_split_instances = set()
13685 previously_split_instances = set()
13687 def InstanceNodes(instance):
13688 return [instance.primary_node] + list(instance.secondary_nodes)
13690 for inst in instance_data.values():
13691 if inst.disk_template not in constants.DTS_INT_MIRROR:
13694 instance_nodes = InstanceNodes(inst)
13696 if len(set(node_data[node].group for node in instance_nodes)) > 1:
13697 previously_split_instances.add(inst.name)
13699 if len(set(changed_nodes.get(node, node_data[node].group)
13700 for node in instance_nodes)) > 1:
13701 all_split_instances.add(inst.name)
13703 return (list(all_split_instances - previously_split_instances),
13704 list(previously_split_instances & all_split_instances))
13707 class _GroupQuery(_QueryBase):
13708 FIELDS = query.GROUP_FIELDS
13710 def ExpandNames(self, lu):
13711 lu.needed_locks = {}
13713 self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
13714 self._cluster = lu.cfg.GetClusterInfo()
13715 name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
13718 self.wanted = [name_to_uuid[name]
13719 for name in utils.NiceSort(name_to_uuid.keys())]
13721 # Accept names to be either names or UUIDs.
13724 all_uuid = frozenset(self._all_groups.keys())
13726 for name in self.names:
13727 if name in all_uuid:
13728 self.wanted.append(name)
13729 elif name in name_to_uuid:
13730 self.wanted.append(name_to_uuid[name])
13732 missing.append(name)
13735 raise errors.OpPrereqError("Some groups do not exist: %s" %
13736 utils.CommaJoin(missing),
13737 errors.ECODE_NOENT)
13739 def DeclareLocks(self, lu, level):
13742 def _GetQueryData(self, lu):
13743 """Computes the list of node groups and their attributes.
13746 do_nodes = query.GQ_NODE in self.requested_data
13747 do_instances = query.GQ_INST in self.requested_data
13749 group_to_nodes = None
13750 group_to_instances = None
13752 # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
13753 # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
13754 # latter GetAllInstancesInfo() is not enough, for we have to go through
13755 # instance->node. Hence, we will need to process nodes even if we only need
13756 # instance information.
13757 if do_nodes or do_instances:
13758 all_nodes = lu.cfg.GetAllNodesInfo()
13759 group_to_nodes = dict((uuid, []) for uuid in self.wanted)
13762 for node in all_nodes.values():
13763 if node.group in group_to_nodes:
13764 group_to_nodes[node.group].append(node.name)
13765 node_to_group[node.name] = node.group
13768 all_instances = lu.cfg.GetAllInstancesInfo()
13769 group_to_instances = dict((uuid, []) for uuid in self.wanted)
13771 for instance in all_instances.values():
13772 node = instance.primary_node
13773 if node in node_to_group:
13774 group_to_instances[node_to_group[node]].append(instance.name)
13777 # Do not pass on node information if it was not requested.
13778 group_to_nodes = None
13780 return query.GroupQueryData(self._cluster,
13781 [self._all_groups[uuid]
13782 for uuid in self.wanted],
13783 group_to_nodes, group_to_instances)
13786 class LUGroupQuery(NoHooksLU):
13787 """Logical unit for querying node groups.
13792 def CheckArguments(self):
13793 self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
13794 self.op.output_fields, False)
13796 def ExpandNames(self):
13797 self.gq.ExpandNames(self)
13799 def DeclareLocks(self, level):
13800 self.gq.DeclareLocks(self, level)
13802 def Exec(self, feedback_fn):
13803 return self.gq.OldStyleQuery(self)
13806 class LUGroupSetParams(LogicalUnit):
13807 """Modifies the parameters of a node group.
13810 HPATH = "group-modify"
13811 HTYPE = constants.HTYPE_GROUP
13814 def CheckArguments(self):
13817 self.op.diskparams,
13818 self.op.alloc_policy,
13820 self.op.disk_state,
13824 if all_changes.count(None) == len(all_changes):
13825 raise errors.OpPrereqError("Please pass at least one modification",
13826 errors.ECODE_INVAL)
13828 def ExpandNames(self):
13829 # This raises errors.OpPrereqError on its own:
13830 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
13832 self.needed_locks = {
13833 locking.LEVEL_INSTANCE: [],
13834 locking.LEVEL_NODEGROUP: [self.group_uuid],
13837 self.share_locks[locking.LEVEL_INSTANCE] = 1
13839 def DeclareLocks(self, level):
13840 if level == locking.LEVEL_INSTANCE:
13841 assert not self.needed_locks[locking.LEVEL_INSTANCE]
13843 # Lock instances optimistically, needs verification once group lock has
13845 self.needed_locks[locking.LEVEL_INSTANCE] = \
13846 self.cfg.GetNodeGroupInstances(self.group_uuid)
13848 def CheckPrereq(self):
13849 """Check prerequisites.
13852 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
13854 # Check if locked instances are still correct
13855 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
13857 self.group = self.cfg.GetNodeGroup(self.group_uuid)
13858 cluster = self.cfg.GetClusterInfo()
13860 if self.group is None:
13861 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
13862 (self.op.group_name, self.group_uuid))
13864 if self.op.ndparams:
13865 new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
13866 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
13867 self.new_ndparams = new_ndparams
13869 if self.op.diskparams:
13870 self.new_diskparams = dict()
13871 for templ in constants.DISK_TEMPLATES:
13872 if templ not in self.op.diskparams:
13873 self.op.diskparams[templ] = {}
13874 new_templ_params = _GetUpdatedParams(self.group.diskparams[templ],
13875 self.op.diskparams[templ])
13876 utils.ForceDictType(new_templ_params, constants.DISK_DT_TYPES)
13877 self.new_diskparams[templ] = new_templ_params
13879 if self.op.hv_state:
13880 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
13881 self.group.hv_state_static)
13883 if self.op.disk_state:
13884 self.new_disk_state = \
13885 _MergeAndVerifyDiskState(self.op.disk_state,
13886 self.group.disk_state_static)
13888 if self.op.ipolicy:
13889 self.new_ipolicy = _GetUpdatedIPolicy(self.group.ipolicy,
13893 new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
13894 inst_filter = lambda inst: inst.name in owned_instances
13895 instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
13897 _ComputeNewInstanceViolations(_CalculateGroupIPolicy(cluster,
13899 new_ipolicy, instances)
13902 self.LogWarning("After the ipolicy change the following instances"
13903 " violate them: %s",
13904 utils.CommaJoin(violations))
13906 def BuildHooksEnv(self):
13907 """Build hooks env.
13911 "GROUP_NAME": self.op.group_name,
13912 "NEW_ALLOC_POLICY": self.op.alloc_policy,
13915 def BuildHooksNodes(self):
13916 """Build hooks nodes.
13919 mn = self.cfg.GetMasterNode()
13920 return ([mn], [mn])
13922 def Exec(self, feedback_fn):
13923 """Modifies the node group.
13928 if self.op.ndparams:
13929 self.group.ndparams = self.new_ndparams
13930 result.append(("ndparams", str(self.group.ndparams)))
13932 if self.op.diskparams:
13933 self.group.diskparams = self.new_diskparams
13934 result.append(("diskparams", str(self.group.diskparams)))
13936 if self.op.alloc_policy:
13937 self.group.alloc_policy = self.op.alloc_policy
13939 if self.op.hv_state:
13940 self.group.hv_state_static = self.new_hv_state
13942 if self.op.disk_state:
13943 self.group.disk_state_static = self.new_disk_state
13945 if self.op.ipolicy:
13946 self.group.ipolicy = self.new_ipolicy
13948 self.cfg.Update(self.group, feedback_fn)
13952 class LUGroupRemove(LogicalUnit):
13953 HPATH = "group-remove"
13954 HTYPE = constants.HTYPE_GROUP
13957 def ExpandNames(self):
13958 # This will raises errors.OpPrereqError on its own:
13959 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
13960 self.needed_locks = {
13961 locking.LEVEL_NODEGROUP: [self.group_uuid],
13964 def CheckPrereq(self):
13965 """Check prerequisites.
13967 This checks that the given group name exists as a node group, that is
13968 empty (i.e., contains no nodes), and that is not the last group of the
13972 # Verify that the group is empty.
13973 group_nodes = [node.name
13974 for node in self.cfg.GetAllNodesInfo().values()
13975 if node.group == self.group_uuid]
13978 raise errors.OpPrereqError("Group '%s' not empty, has the following"
13980 (self.op.group_name,
13981 utils.CommaJoin(utils.NiceSort(group_nodes))),
13982 errors.ECODE_STATE)
13984 # Verify the cluster would not be left group-less.
13985 if len(self.cfg.GetNodeGroupList()) == 1:
13986 raise errors.OpPrereqError("Group '%s' is the only group,"
13987 " cannot be removed" %
13988 self.op.group_name,
13989 errors.ECODE_STATE)
13991 def BuildHooksEnv(self):
13992 """Build hooks env.
13996 "GROUP_NAME": self.op.group_name,
13999 def BuildHooksNodes(self):
14000 """Build hooks nodes.
14003 mn = self.cfg.GetMasterNode()
14004 return ([mn], [mn])
14006 def Exec(self, feedback_fn):
14007 """Remove the node group.
14011 self.cfg.RemoveNodeGroup(self.group_uuid)
14012 except errors.ConfigurationError:
14013 raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
14014 (self.op.group_name, self.group_uuid))
14016 self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
14019 class LUGroupRename(LogicalUnit):
14020 HPATH = "group-rename"
14021 HTYPE = constants.HTYPE_GROUP
14024 def ExpandNames(self):
14025 # This raises errors.OpPrereqError on its own:
14026 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14028 self.needed_locks = {
14029 locking.LEVEL_NODEGROUP: [self.group_uuid],
14032 def CheckPrereq(self):
14033 """Check prerequisites.
14035 Ensures requested new name is not yet used.
14039 new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
14040 except errors.OpPrereqError:
14043 raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
14044 " node group (UUID: %s)" %
14045 (self.op.new_name, new_name_uuid),
14046 errors.ECODE_EXISTS)
14048 def BuildHooksEnv(self):
14049 """Build hooks env.
14053 "OLD_NAME": self.op.group_name,
14054 "NEW_NAME": self.op.new_name,
14057 def BuildHooksNodes(self):
14058 """Build hooks nodes.
14061 mn = self.cfg.GetMasterNode()
14063 all_nodes = self.cfg.GetAllNodesInfo()
14064 all_nodes.pop(mn, None)
14067 run_nodes.extend(node.name for node in all_nodes.values()
14068 if node.group == self.group_uuid)
14070 return (run_nodes, run_nodes)
14072 def Exec(self, feedback_fn):
14073 """Rename the node group.
14076 group = self.cfg.GetNodeGroup(self.group_uuid)
14079 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
14080 (self.op.group_name, self.group_uuid))
14082 group.name = self.op.new_name
14083 self.cfg.Update(group, feedback_fn)
14085 return self.op.new_name
14088 class LUGroupEvacuate(LogicalUnit):
14089 HPATH = "group-evacuate"
14090 HTYPE = constants.HTYPE_GROUP
14093 def ExpandNames(self):
14094 # This raises errors.OpPrereqError on its own:
14095 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
14097 if self.op.target_groups:
14098 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
14099 self.op.target_groups)
14101 self.req_target_uuids = []
14103 if self.group_uuid in self.req_target_uuids:
14104 raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
14105 " as a target group (targets are %s)" %
14107 utils.CommaJoin(self.req_target_uuids)),
14108 errors.ECODE_INVAL)
14110 self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
14112 self.share_locks = _ShareAll()
14113 self.needed_locks = {
14114 locking.LEVEL_INSTANCE: [],
14115 locking.LEVEL_NODEGROUP: [],
14116 locking.LEVEL_NODE: [],
14119 def DeclareLocks(self, level):
14120 if level == locking.LEVEL_INSTANCE:
14121 assert not self.needed_locks[locking.LEVEL_INSTANCE]
14123 # Lock instances optimistically, needs verification once node and group
14124 # locks have been acquired
14125 self.needed_locks[locking.LEVEL_INSTANCE] = \
14126 self.cfg.GetNodeGroupInstances(self.group_uuid)
14128 elif level == locking.LEVEL_NODEGROUP:
14129 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
14131 if self.req_target_uuids:
14132 lock_groups = set([self.group_uuid] + self.req_target_uuids)
14134 # Lock all groups used by instances optimistically; this requires going
14135 # via the node before it's locked, requiring verification later on
14136 lock_groups.update(group_uuid
14137 for instance_name in
14138 self.owned_locks(locking.LEVEL_INSTANCE)
14140 self.cfg.GetInstanceNodeGroups(instance_name))
14142 # No target groups, need to lock all of them
14143 lock_groups = locking.ALL_SET
14145 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
14147 elif level == locking.LEVEL_NODE:
14148 # This will only lock the nodes in the group to be evacuated which
14149 # contain actual instances
14150 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
14151 self._LockInstancesNodes()
14153 # Lock all nodes in group to be evacuated and target groups
14154 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
14155 assert self.group_uuid in owned_groups
14156 member_nodes = [node_name
14157 for group in owned_groups
14158 for node_name in self.cfg.GetNodeGroup(group).members]
14159 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
14161 def CheckPrereq(self):
14162 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
14163 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
14164 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
14166 assert owned_groups.issuperset(self.req_target_uuids)
14167 assert self.group_uuid in owned_groups
14169 # Check if locked instances are still correct
14170 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
14172 # Get instance information
14173 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
14175 # Check if node groups for locked instances are still correct
14176 _CheckInstancesNodeGroups(self.cfg, self.instances,
14177 owned_groups, owned_nodes, self.group_uuid)
14179 if self.req_target_uuids:
14180 # User requested specific target groups
14181 self.target_uuids = self.req_target_uuids
14183 # All groups except the one to be evacuated are potential targets
14184 self.target_uuids = [group_uuid for group_uuid in owned_groups
14185 if group_uuid != self.group_uuid]
14187 if not self.target_uuids:
14188 raise errors.OpPrereqError("There are no possible target groups",
14189 errors.ECODE_INVAL)
14191 def BuildHooksEnv(self):
14192 """Build hooks env.
14196 "GROUP_NAME": self.op.group_name,
14197 "TARGET_GROUPS": " ".join(self.target_uuids),
14200 def BuildHooksNodes(self):
14201 """Build hooks nodes.
14204 mn = self.cfg.GetMasterNode()
14206 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
14208 run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
14210 return (run_nodes, run_nodes)
14212 def Exec(self, feedback_fn):
14213 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
14215 assert self.group_uuid not in self.target_uuids
14217 ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
14218 instances=instances, target_groups=self.target_uuids)
14220 ial.Run(self.op.iallocator)
14222 if not ial.success:
14223 raise errors.OpPrereqError("Can't compute group evacuation using"
14224 " iallocator '%s': %s" %
14225 (self.op.iallocator, ial.info),
14226 errors.ECODE_NORES)
14228 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
14230 self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
14231 len(jobs), self.op.group_name)
14233 return ResultWithJobs(jobs)
14236 class TagsLU(NoHooksLU): # pylint: disable=W0223
14237 """Generic tags LU.
14239 This is an abstract class which is the parent of all the other tags LUs.
14242 def ExpandNames(self):
14243 self.group_uuid = None
14244 self.needed_locks = {}
14246 if self.op.kind == constants.TAG_NODE:
14247 self.op.name = _ExpandNodeName(self.cfg, self.op.name)
14248 lock_level = locking.LEVEL_NODE
14249 lock_name = self.op.name
14250 elif self.op.kind == constants.TAG_INSTANCE:
14251 self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
14252 lock_level = locking.LEVEL_INSTANCE
14253 lock_name = self.op.name
14254 elif self.op.kind == constants.TAG_NODEGROUP:
14255 self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
14256 lock_level = locking.LEVEL_NODEGROUP
14257 lock_name = self.group_uuid
14262 if lock_level and getattr(self.op, "use_locking", True):
14263 self.needed_locks[lock_level] = lock_name
14265 # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
14266 # not possible to acquire the BGL based on opcode parameters)
14268 def CheckPrereq(self):
14269 """Check prerequisites.
14272 if self.op.kind == constants.TAG_CLUSTER:
14273 self.target = self.cfg.GetClusterInfo()
14274 elif self.op.kind == constants.TAG_NODE:
14275 self.target = self.cfg.GetNodeInfo(self.op.name)
14276 elif self.op.kind == constants.TAG_INSTANCE:
14277 self.target = self.cfg.GetInstanceInfo(self.op.name)
14278 elif self.op.kind == constants.TAG_NODEGROUP:
14279 self.target = self.cfg.GetNodeGroup(self.group_uuid)
14281 raise errors.OpPrereqError("Wrong tag type requested (%s)" %
14282 str(self.op.kind), errors.ECODE_INVAL)
14285 class LUTagsGet(TagsLU):
14286 """Returns the tags of a given object.
14291 def ExpandNames(self):
14292 TagsLU.ExpandNames(self)
14294 # Share locks as this is only a read operation
14295 self.share_locks = _ShareAll()
14297 def Exec(self, feedback_fn):
14298 """Returns the tag list.
14301 return list(self.target.GetTags())
14304 class LUTagsSearch(NoHooksLU):
14305 """Searches the tags for a given pattern.
14310 def ExpandNames(self):
14311 self.needed_locks = {}
14313 def CheckPrereq(self):
14314 """Check prerequisites.
14316 This checks the pattern passed for validity by compiling it.
14320 self.re = re.compile(self.op.pattern)
14321 except re.error, err:
14322 raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
14323 (self.op.pattern, err), errors.ECODE_INVAL)
14325 def Exec(self, feedback_fn):
14326 """Returns the tag list.
14330 tgts = [("/cluster", cfg.GetClusterInfo())]
14331 ilist = cfg.GetAllInstancesInfo().values()
14332 tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
14333 nlist = cfg.GetAllNodesInfo().values()
14334 tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
14335 tgts.extend(("/nodegroup/%s" % n.name, n)
14336 for n in cfg.GetAllNodeGroupsInfo().values())
14338 for path, target in tgts:
14339 for tag in target.GetTags():
14340 if self.re.search(tag):
14341 results.append((path, tag))
14345 class LUTagsSet(TagsLU):
14346 """Sets a tag on a given object.
14351 def CheckPrereq(self):
14352 """Check prerequisites.
14354 This checks the type and length of the tag name and value.
14357 TagsLU.CheckPrereq(self)
14358 for tag in self.op.tags:
14359 objects.TaggableObject.ValidateTag(tag)
14361 def Exec(self, feedback_fn):
14366 for tag in self.op.tags:
14367 self.target.AddTag(tag)
14368 except errors.TagError, err:
14369 raise errors.OpExecError("Error while setting tag: %s" % str(err))
14370 self.cfg.Update(self.target, feedback_fn)
14373 class LUTagsDel(TagsLU):
14374 """Delete a list of tags from a given object.
14379 def CheckPrereq(self):
14380 """Check prerequisites.
14382 This checks that we have the given tag.
14385 TagsLU.CheckPrereq(self)
14386 for tag in self.op.tags:
14387 objects.TaggableObject.ValidateTag(tag)
14388 del_tags = frozenset(self.op.tags)
14389 cur_tags = self.target.GetTags()
14391 diff_tags = del_tags - cur_tags
14393 diff_names = ("'%s'" % i for i in sorted(diff_tags))
14394 raise errors.OpPrereqError("Tag(s) %s not found" %
14395 (utils.CommaJoin(diff_names), ),
14396 errors.ECODE_NOENT)
14398 def Exec(self, feedback_fn):
14399 """Remove the tag from the object.
14402 for tag in self.op.tags:
14403 self.target.RemoveTag(tag)
14404 self.cfg.Update(self.target, feedback_fn)
14407 class LUTestDelay(NoHooksLU):
14408 """Sleep for a specified amount of time.
14410 This LU sleeps on the master and/or nodes for a specified amount of
14416 def ExpandNames(self):
14417 """Expand names and set required locks.
14419 This expands the node list, if any.
14422 self.needed_locks = {}
14423 if self.op.on_nodes:
14424 # _GetWantedNodes can be used here, but is not always appropriate to use
14425 # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
14426 # more information.
14427 self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
14428 self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
14430 def _TestDelay(self):
14431 """Do the actual sleep.
14434 if self.op.on_master:
14435 if not utils.TestDelay(self.op.duration):
14436 raise errors.OpExecError("Error during master delay test")
14437 if self.op.on_nodes:
14438 result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
14439 for node, node_result in result.items():
14440 node_result.Raise("Failure during rpc call to node %s" % node)
14442 def Exec(self, feedback_fn):
14443 """Execute the test delay opcode, with the wanted repetitions.
14446 if self.op.repeat == 0:
14449 top_value = self.op.repeat - 1
14450 for i in range(self.op.repeat):
14451 self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
14455 class LUTestJqueue(NoHooksLU):
14456 """Utility LU to test some aspects of the job queue.
14461 # Must be lower than default timeout for WaitForJobChange to see whether it
14462 # notices changed jobs
14463 _CLIENT_CONNECT_TIMEOUT = 20.0
14464 _CLIENT_CONFIRM_TIMEOUT = 60.0
14467 def _NotifyUsingSocket(cls, cb, errcls):
14468 """Opens a Unix socket and waits for another program to connect.
14471 @param cb: Callback to send socket name to client
14472 @type errcls: class
14473 @param errcls: Exception class to use for errors
14476 # Using a temporary directory as there's no easy way to create temporary
14477 # sockets without writing a custom loop around tempfile.mktemp and
14479 tmpdir = tempfile.mkdtemp()
14481 tmpsock = utils.PathJoin(tmpdir, "sock")
14483 logging.debug("Creating temporary socket at %s", tmpsock)
14484 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
14489 # Send details to client
14492 # Wait for client to connect before continuing
14493 sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
14495 (conn, _) = sock.accept()
14496 except socket.error, err:
14497 raise errcls("Client didn't connect in time (%s)" % err)
14501 # Remove as soon as client is connected
14502 shutil.rmtree(tmpdir)
14504 # Wait for client to close
14507 # pylint: disable=E1101
14508 # Instance of '_socketobject' has no ... member
14509 conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
14511 except socket.error, err:
14512 raise errcls("Client failed to confirm notification (%s)" % err)
14516 def _SendNotification(self, test, arg, sockname):
14517 """Sends a notification to the client.
14520 @param test: Test name
14521 @param arg: Test argument (depends on test)
14522 @type sockname: string
14523 @param sockname: Socket path
14526 self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
14528 def _Notify(self, prereq, test, arg):
14529 """Notifies the client of a test.
14532 @param prereq: Whether this is a prereq-phase test
14534 @param test: Test name
14535 @param arg: Test argument (depends on test)
14539 errcls = errors.OpPrereqError
14541 errcls = errors.OpExecError
14543 return self._NotifyUsingSocket(compat.partial(self._SendNotification,
14547 def CheckArguments(self):
14548 self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
14549 self.expandnames_calls = 0
14551 def ExpandNames(self):
14552 checkargs_calls = getattr(self, "checkargs_calls", 0)
14553 if checkargs_calls < 1:
14554 raise errors.ProgrammerError("CheckArguments was not called")
14556 self.expandnames_calls += 1
14558 if self.op.notify_waitlock:
14559 self._Notify(True, constants.JQT_EXPANDNAMES, None)
14561 self.LogInfo("Expanding names")
14563 # Get lock on master node (just to get a lock, not for a particular reason)
14564 self.needed_locks = {
14565 locking.LEVEL_NODE: self.cfg.GetMasterNode(),
14568 def Exec(self, feedback_fn):
14569 if self.expandnames_calls < 1:
14570 raise errors.ProgrammerError("ExpandNames was not called")
14572 if self.op.notify_exec:
14573 self._Notify(False, constants.JQT_EXEC, None)
14575 self.LogInfo("Executing")
14577 if self.op.log_messages:
14578 self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
14579 for idx, msg in enumerate(self.op.log_messages):
14580 self.LogInfo("Sending log message %s", idx + 1)
14581 feedback_fn(constants.JQT_MSGPREFIX + msg)
14582 # Report how many test messages have been sent
14583 self._Notify(False, constants.JQT_LOGMSG, idx + 1)
14586 raise errors.OpExecError("Opcode failure was requested")
14591 class IAllocator(object):
14592 """IAllocator framework.
14594 An IAllocator instance has three sets of attributes:
14595 - cfg that is needed to query the cluster
14596 - input data (all members of the _KEYS class attribute are required)
14597 - four buffer attributes (in|out_data|text), that represent the
14598 input (to the external script) in text and data structure format,
14599 and the output from it, again in two formats
14600 - the result variables from the script (success, info, nodes) for
14604 # pylint: disable=R0902
14605 # lots of instance attributes
14607 def __init__(self, cfg, rpc_runner, mode, **kwargs):
14609 self.rpc = rpc_runner
14610 # init buffer variables
14611 self.in_text = self.out_text = self.in_data = self.out_data = None
14612 # init all input fields so that pylint is happy
14614 self.memory = self.disks = self.disk_template = self.spindle_use = None
14615 self.os = self.tags = self.nics = self.vcpus = None
14616 self.hypervisor = None
14617 self.relocate_from = None
14619 self.instances = None
14620 self.evac_mode = None
14621 self.target_groups = []
14623 self.required_nodes = None
14624 # init result fields
14625 self.success = self.info = self.result = None
14628 (fn, keydata, self._result_check) = self._MODE_DATA[self.mode]
14630 raise errors.ProgrammerError("Unknown mode '%s' passed to the"
14631 " IAllocator" % self.mode)
14633 keyset = [n for (n, _) in keydata]
14636 if key not in keyset:
14637 raise errors.ProgrammerError("Invalid input parameter '%s' to"
14638 " IAllocator" % key)
14639 setattr(self, key, kwargs[key])
14642 if key not in kwargs:
14643 raise errors.ProgrammerError("Missing input parameter '%s' to"
14644 " IAllocator" % key)
14645 self._BuildInputData(compat.partial(fn, self), keydata)
14647 def _ComputeClusterData(self):
14648 """Compute the generic allocator input data.
14650 This is the data that is independent of the actual operation.
14654 cluster_info = cfg.GetClusterInfo()
14657 "version": constants.IALLOCATOR_VERSION,
14658 "cluster_name": cfg.GetClusterName(),
14659 "cluster_tags": list(cluster_info.GetTags()),
14660 "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
14661 "ipolicy": cluster_info.ipolicy,
14663 ninfo = cfg.GetAllNodesInfo()
14664 iinfo = cfg.GetAllInstancesInfo().values()
14665 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
14668 node_list = [n.name for n in ninfo.values() if n.vm_capable]
14670 if self.mode == constants.IALLOCATOR_MODE_ALLOC:
14671 hypervisor_name = self.hypervisor
14672 elif self.mode == constants.IALLOCATOR_MODE_RELOC:
14673 hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
14675 hypervisor_name = cluster_info.primary_hypervisor
14677 node_data = self.rpc.call_node_info(node_list, [cfg.GetVGName()],
14680 self.rpc.call_all_instances_info(node_list,
14681 cluster_info.enabled_hypervisors)
14683 data["nodegroups"] = self._ComputeNodeGroupData(cfg)
14685 config_ndata = self._ComputeBasicNodeData(cfg, ninfo)
14686 data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
14687 i_list, config_ndata)
14688 assert len(data["nodes"]) == len(ninfo), \
14689 "Incomplete node data computed"
14691 data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
14693 self.in_data = data
14696 def _ComputeNodeGroupData(cfg):
14697 """Compute node groups data.
14700 cluster = cfg.GetClusterInfo()
14701 ng = dict((guuid, {
14702 "name": gdata.name,
14703 "alloc_policy": gdata.alloc_policy,
14704 "ipolicy": _CalculateGroupIPolicy(cluster, gdata),
14706 for guuid, gdata in cfg.GetAllNodeGroupsInfo().items())
14711 def _ComputeBasicNodeData(cfg, node_cfg):
14712 """Compute global node data.
14715 @returns: a dict of name: (node dict, node config)
14718 # fill in static (config-based) values
14719 node_results = dict((ninfo.name, {
14720 "tags": list(ninfo.GetTags()),
14721 "primary_ip": ninfo.primary_ip,
14722 "secondary_ip": ninfo.secondary_ip,
14723 "offline": ninfo.offline,
14724 "drained": ninfo.drained,
14725 "master_candidate": ninfo.master_candidate,
14726 "group": ninfo.group,
14727 "master_capable": ninfo.master_capable,
14728 "vm_capable": ninfo.vm_capable,
14729 "ndparams": cfg.GetNdParams(ninfo),
14731 for ninfo in node_cfg.values())
14733 return node_results
14736 def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
14738 """Compute global node data.
14740 @param node_results: the basic node structures as filled from the config
14743 #TODO(dynmem): compute the right data on MAX and MIN memory
14744 # make a copy of the current dict
14745 node_results = dict(node_results)
14746 for nname, nresult in node_data.items():
14747 assert nname in node_results, "Missing basic data for node %s" % nname
14748 ninfo = node_cfg[nname]
14750 if not (ninfo.offline or ninfo.drained):
14751 nresult.Raise("Can't get data for node %s" % nname)
14752 node_iinfo[nname].Raise("Can't get node instance info from node %s" %
14754 remote_info = _MakeLegacyNodeInfo(nresult.payload)
14756 for attr in ["memory_total", "memory_free", "memory_dom0",
14757 "vg_size", "vg_free", "cpu_total"]:
14758 if attr not in remote_info:
14759 raise errors.OpExecError("Node '%s' didn't return attribute"
14760 " '%s'" % (nname, attr))
14761 if not isinstance(remote_info[attr], int):
14762 raise errors.OpExecError("Node '%s' returned invalid value"
14764 (nname, attr, remote_info[attr]))
14765 # compute memory used by primary instances
14766 i_p_mem = i_p_up_mem = 0
14767 for iinfo, beinfo in i_list:
14768 if iinfo.primary_node == nname:
14769 i_p_mem += beinfo[constants.BE_MAXMEM]
14770 if iinfo.name not in node_iinfo[nname].payload:
14773 i_used_mem = int(node_iinfo[nname].payload[iinfo.name]["memory"])
14774 i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem
14775 remote_info["memory_free"] -= max(0, i_mem_diff)
14777 if iinfo.admin_state == constants.ADMINST_UP:
14778 i_p_up_mem += beinfo[constants.BE_MAXMEM]
14780 # compute memory used by instances
14782 "total_memory": remote_info["memory_total"],
14783 "reserved_memory": remote_info["memory_dom0"],
14784 "free_memory": remote_info["memory_free"],
14785 "total_disk": remote_info["vg_size"],
14786 "free_disk": remote_info["vg_free"],
14787 "total_cpus": remote_info["cpu_total"],
14788 "i_pri_memory": i_p_mem,
14789 "i_pri_up_memory": i_p_up_mem,
14791 pnr_dyn.update(node_results[nname])
14792 node_results[nname] = pnr_dyn
14794 return node_results
14797 def _ComputeInstanceData(cluster_info, i_list):
14798 """Compute global instance data.
14802 for iinfo, beinfo in i_list:
14804 for nic in iinfo.nics:
14805 filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
14809 "mode": filled_params[constants.NIC_MODE],
14810 "link": filled_params[constants.NIC_LINK],
14812 if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
14813 nic_dict["bridge"] = filled_params[constants.NIC_LINK]
14814 nic_data.append(nic_dict)
14816 "tags": list(iinfo.GetTags()),
14817 "admin_state": iinfo.admin_state,
14818 "vcpus": beinfo[constants.BE_VCPUS],
14819 "memory": beinfo[constants.BE_MAXMEM],
14820 "spindle_use": beinfo[constants.BE_SPINDLE_USE],
14822 "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
14824 "disks": [{constants.IDISK_SIZE: dsk.size,
14825 constants.IDISK_MODE: dsk.mode}
14826 for dsk in iinfo.disks],
14827 "disk_template": iinfo.disk_template,
14828 "hypervisor": iinfo.hypervisor,
14830 pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
14832 instance_data[iinfo.name] = pir
14834 return instance_data
14836 def _AddNewInstance(self):
14837 """Add new instance data to allocator structure.
14839 This in combination with _AllocatorGetClusterData will create the
14840 correct structure needed as input for the allocator.
14842 The checks for the completeness of the opcode must have already been
14846 disk_space = _ComputeDiskSize(self.disk_template, self.disks)
14848 if self.disk_template in constants.DTS_INT_MIRROR:
14849 self.required_nodes = 2
14851 self.required_nodes = 1
14855 "disk_template": self.disk_template,
14858 "vcpus": self.vcpus,
14859 "memory": self.memory,
14860 "spindle_use": self.spindle_use,
14861 "disks": self.disks,
14862 "disk_space_total": disk_space,
14864 "required_nodes": self.required_nodes,
14865 "hypervisor": self.hypervisor,
14870 def _AddRelocateInstance(self):
14871 """Add relocate instance data to allocator structure.
14873 This in combination with _IAllocatorGetClusterData will create the
14874 correct structure needed as input for the allocator.
14876 The checks for the completeness of the opcode must have already been
14880 instance = self.cfg.GetInstanceInfo(self.name)
14881 if instance is None:
14882 raise errors.ProgrammerError("Unknown instance '%s' passed to"
14883 " IAllocator" % self.name)
14885 if instance.disk_template not in constants.DTS_MIRRORED:
14886 raise errors.OpPrereqError("Can't relocate non-mirrored instances",
14887 errors.ECODE_INVAL)
14889 if instance.disk_template in constants.DTS_INT_MIRROR and \
14890 len(instance.secondary_nodes) != 1:
14891 raise errors.OpPrereqError("Instance has not exactly one secondary node",
14892 errors.ECODE_STATE)
14894 self.required_nodes = 1
14895 disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks]
14896 disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
14900 "disk_space_total": disk_space,
14901 "required_nodes": self.required_nodes,
14902 "relocate_from": self.relocate_from,
14906 def _AddNodeEvacuate(self):
14907 """Get data for node-evacuate requests.
14911 "instances": self.instances,
14912 "evac_mode": self.evac_mode,
14915 def _AddChangeGroup(self):
14916 """Get data for node-evacuate requests.
14920 "instances": self.instances,
14921 "target_groups": self.target_groups,
14924 def _BuildInputData(self, fn, keydata):
14925 """Build input data structures.
14928 self._ComputeClusterData()
14931 request["type"] = self.mode
14932 for keyname, keytype in keydata:
14933 if keyname not in request:
14934 raise errors.ProgrammerError("Request parameter %s is missing" %
14936 val = request[keyname]
14937 if not keytype(val):
14938 raise errors.ProgrammerError("Request parameter %s doesn't pass"
14939 " validation, value %s, expected"
14940 " type %s" % (keyname, val, keytype))
14941 self.in_data["request"] = request
14943 self.in_text = serializer.Dump(self.in_data)
14945 _STRING_LIST = ht.TListOf(ht.TString)
14946 _JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
14947 # pylint: disable=E1101
14948 # Class '...' has no 'OP_ID' member
14949 "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
14950 opcodes.OpInstanceMigrate.OP_ID,
14951 opcodes.OpInstanceReplaceDisks.OP_ID])
14955 ht.TListOf(ht.TAnd(ht.TIsLength(3),
14956 ht.TItems([ht.TNonEmptyString,
14957 ht.TNonEmptyString,
14958 ht.TListOf(ht.TNonEmptyString),
14961 ht.TListOf(ht.TAnd(ht.TIsLength(2),
14962 ht.TItems([ht.TNonEmptyString,
14965 _NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
14966 ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))
14969 constants.IALLOCATOR_MODE_ALLOC:
14972 ("name", ht.TString),
14973 ("memory", ht.TInt),
14974 ("spindle_use", ht.TInt),
14975 ("disks", ht.TListOf(ht.TDict)),
14976 ("disk_template", ht.TString),
14977 ("os", ht.TString),
14978 ("tags", _STRING_LIST),
14979 ("nics", ht.TListOf(ht.TDict)),
14980 ("vcpus", ht.TInt),
14981 ("hypervisor", ht.TString),
14983 constants.IALLOCATOR_MODE_RELOC:
14984 (_AddRelocateInstance,
14985 [("name", ht.TString), ("relocate_from", _STRING_LIST)],
14987 constants.IALLOCATOR_MODE_NODE_EVAC:
14988 (_AddNodeEvacuate, [
14989 ("instances", _STRING_LIST),
14990 ("evac_mode", ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)),
14992 constants.IALLOCATOR_MODE_CHG_GROUP:
14993 (_AddChangeGroup, [
14994 ("instances", _STRING_LIST),
14995 ("target_groups", _STRING_LIST),
14999 def Run(self, name, validate=True, call_fn=None):
15000 """Run an instance allocator and return the results.
15003 if call_fn is None:
15004 call_fn = self.rpc.call_iallocator_runner
15006 result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
15007 result.Raise("Failure while running the iallocator script")
15009 self.out_text = result.payload
15011 self._ValidateResult()
15013 def _ValidateResult(self):
15014 """Process the allocator results.
15016 This will process and if successful save the result in
15017 self.out_data and the other parameters.
15021 rdict = serializer.Load(self.out_text)
15022 except Exception, err:
15023 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
15025 if not isinstance(rdict, dict):
15026 raise errors.OpExecError("Can't parse iallocator results: not a dict")
15028 # TODO: remove backwards compatiblity in later versions
15029 if "nodes" in rdict and "result" not in rdict:
15030 rdict["result"] = rdict["nodes"]
15033 for key in "success", "info", "result":
15034 if key not in rdict:
15035 raise errors.OpExecError("Can't parse iallocator results:"
15036 " missing key '%s'" % key)
15037 setattr(self, key, rdict[key])
15039 if not self._result_check(self.result):
15040 raise errors.OpExecError("Iallocator returned invalid result,"
15041 " expected %s, got %s" %
15042 (self._result_check, self.result),
15043 errors.ECODE_INVAL)
15045 if self.mode == constants.IALLOCATOR_MODE_RELOC:
15046 assert self.relocate_from is not None
15047 assert self.required_nodes == 1
15049 node2group = dict((name, ndata["group"])
15050 for (name, ndata) in self.in_data["nodes"].items())
15052 fn = compat.partial(self._NodesToGroups, node2group,
15053 self.in_data["nodegroups"])
15055 instance = self.cfg.GetInstanceInfo(self.name)
15056 request_groups = fn(self.relocate_from + [instance.primary_node])
15057 result_groups = fn(rdict["result"] + [instance.primary_node])
15059 if self.success and not set(result_groups).issubset(request_groups):
15060 raise errors.OpExecError("Groups of nodes returned by iallocator (%s)"
15061 " differ from original groups (%s)" %
15062 (utils.CommaJoin(result_groups),
15063 utils.CommaJoin(request_groups)))
15065 elif self.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
15066 assert self.evac_mode in constants.IALLOCATOR_NEVAC_MODES
15068 self.out_data = rdict
15071 def _NodesToGroups(node2group, groups, nodes):
15072 """Returns a list of unique group names for a list of nodes.
15074 @type node2group: dict
15075 @param node2group: Map from node name to group UUID
15077 @param groups: Group information
15079 @param nodes: Node names
15086 group_uuid = node2group[node]
15088 # Ignore unknown node
15092 group = groups[group_uuid]
15094 # Can't find group, let's use UUID
15095 group_name = group_uuid
15097 group_name = group["name"]
15099 result.add(group_name)
15101 return sorted(result)
15104 class LUTestAllocator(NoHooksLU):
15105 """Run allocator tests.
15107 This LU runs the allocator tests
15110 def CheckPrereq(self):
15111 """Check prerequisites.
15113 This checks the opcode parameters depending on the director and mode test.
15116 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
15117 for attr in ["memory", "disks", "disk_template",
15118 "os", "tags", "nics", "vcpus"]:
15119 if not hasattr(self.op, attr):
15120 raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
15121 attr, errors.ECODE_INVAL)
15122 iname = self.cfg.ExpandInstanceName(self.op.name)
15123 if iname is not None:
15124 raise errors.OpPrereqError("Instance '%s' already in the cluster" %
15125 iname, errors.ECODE_EXISTS)
15126 if not isinstance(self.op.nics, list):
15127 raise errors.OpPrereqError("Invalid parameter 'nics'",
15128 errors.ECODE_INVAL)
15129 if not isinstance(self.op.disks, list):
15130 raise errors.OpPrereqError("Invalid parameter 'disks'",
15131 errors.ECODE_INVAL)
15132 for row in self.op.disks:
15133 if (not isinstance(row, dict) or
15134 constants.IDISK_SIZE not in row or
15135 not isinstance(row[constants.IDISK_SIZE], int) or
15136 constants.IDISK_MODE not in row or
15137 row[constants.IDISK_MODE] not in constants.DISK_ACCESS_SET):
15138 raise errors.OpPrereqError("Invalid contents of the 'disks'"
15139 " parameter", errors.ECODE_INVAL)
15140 if self.op.hypervisor is None:
15141 self.op.hypervisor = self.cfg.GetHypervisorType()
15142 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
15143 fname = _ExpandInstanceName(self.cfg, self.op.name)
15144 self.op.name = fname
15145 self.relocate_from = \
15146 list(self.cfg.GetInstanceInfo(fname).secondary_nodes)
15147 elif self.op.mode in (constants.IALLOCATOR_MODE_CHG_GROUP,
15148 constants.IALLOCATOR_MODE_NODE_EVAC):
15149 if not self.op.instances:
15150 raise errors.OpPrereqError("Missing instances", errors.ECODE_INVAL)
15151 self.op.instances = _GetWantedInstances(self, self.op.instances)
15153 raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
15154 self.op.mode, errors.ECODE_INVAL)
15156 if self.op.direction == constants.IALLOCATOR_DIR_OUT:
15157 if self.op.allocator is None:
15158 raise errors.OpPrereqError("Missing allocator name",
15159 errors.ECODE_INVAL)
15160 elif self.op.direction != constants.IALLOCATOR_DIR_IN:
15161 raise errors.OpPrereqError("Wrong allocator test '%s'" %
15162 self.op.direction, errors.ECODE_INVAL)
15164 def Exec(self, feedback_fn):
15165 """Run the allocator test.
15168 if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
15169 ial = IAllocator(self.cfg, self.rpc,
15172 memory=self.op.memory,
15173 disks=self.op.disks,
15174 disk_template=self.op.disk_template,
15178 vcpus=self.op.vcpus,
15179 hypervisor=self.op.hypervisor,
15181 elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
15182 ial = IAllocator(self.cfg, self.rpc,
15185 relocate_from=list(self.relocate_from),
15187 elif self.op.mode == constants.IALLOCATOR_MODE_CHG_GROUP:
15188 ial = IAllocator(self.cfg, self.rpc,
15190 instances=self.op.instances,
15191 target_groups=self.op.target_groups)
15192 elif self.op.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
15193 ial = IAllocator(self.cfg, self.rpc,
15195 instances=self.op.instances,
15196 evac_mode=self.op.evac_mode)
15198 raise errors.ProgrammerError("Uncatched mode %s in"
15199 " LUTestAllocator.Exec", self.op.mode)
15201 if self.op.direction == constants.IALLOCATOR_DIR_IN:
15202 result = ial.in_text
15204 ial.Run(self.op.allocator, validate=False)
15205 result = ial.out_text
15209 #: Query type implementations
15211 constants.QR_CLUSTER: _ClusterQuery,
15212 constants.QR_INSTANCE: _InstanceQuery,
15213 constants.QR_NODE: _NodeQuery,
15214 constants.QR_GROUP: _GroupQuery,
15215 constants.QR_OS: _OsQuery,
15216 constants.QR_EXPORT: _ExportQuery,
15219 assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP
15222 def _GetQueryImplementation(name):
15223 """Returns the implemtnation for a query type.
15225 @param name: Query type, must be one of L{constants.QR_VIA_OP}
15229 return _QUERY_IMPL[name]
15231 raise errors.OpPrereqError("Unknown query resource '%s'" % name,
15232 errors.ECODE_INVAL)